aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2012-05-13 04:21:25 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-13 15:51:53 -0400
commit9701b264d3267b55ace45bc579eabefc7decfa57 (patch)
tree399be67cba77e5da48b3fe9e3fdaa3927df044c7 /arch/sparc/mm
parent642ea3ed9c652bb9e105e55afcb9ad15b040f71f (diff)
sparc32: drop btfixup in pgtable_32.h
Only one function left using btfixup. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/srmmu.c77
1 files changed, 17 insertions, 60 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index d9487d851843..32cec268c2c1 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -109,10 +109,6 @@ void *srmmu_nocache_pool;
109void *srmmu_nocache_bitmap; 109void *srmmu_nocache_bitmap;
110static struct bit_map srmmu_nocache_map; 110static struct bit_map srmmu_nocache_map;
111 111
112static inline unsigned long srmmu_pgd_page(pgd_t pgd)
113{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
114
115
116static inline int srmmu_pte_none(pte_t pte) 112static inline int srmmu_pte_none(pte_t pte)
117{ return !(pte_val(pte) & 0xFFFFFFF); } 113{ return !(pte_val(pte) & 0xFFFFFFF); }
118 114
@@ -163,15 +159,8 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
163static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) 159static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
164{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } 160{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
165 161
166/* Find an entry in the second-level page table.. */
167static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
168{
169 return (pmd_t *) srmmu_pgd_page(*dir) +
170 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
171}
172
173/* Find an entry in the third-level page table.. */ 162/* Find an entry in the third-level page table.. */
174static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) 163pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
175{ 164{
176 void *pte; 165 void *pte;
177 166
@@ -180,23 +169,6 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
180 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 169 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
181} 170}
182 171
183static unsigned long srmmu_swp_type(swp_entry_t entry)
184{
185 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
186}
187
188static unsigned long srmmu_swp_offset(swp_entry_t entry)
189{
190 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
191}
192
193static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
194{
195 return (swp_entry_t) {
196 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
197 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
198}
199
200/* 172/*
201 * size: bytes to allocate in the nocache area. 173 * size: bytes to allocate in the nocache area.
202 * align: bytes, number to align at. 174 * align: bytes, number to align at.
@@ -333,8 +305,8 @@ static void __init srmmu_nocache_init(void)
333 305
334 while (vaddr < srmmu_nocache_end) { 306 while (vaddr < srmmu_nocache_end) {
335 pgd = pgd_offset_k(vaddr); 307 pgd = pgd_offset_k(vaddr);
336 pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); 308 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
337 pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); 309 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
338 310
339 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); 311 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
340 312
@@ -467,8 +439,8 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
467 439
468 physaddr &= PAGE_MASK; 440 physaddr &= PAGE_MASK;
469 pgdp = pgd_offset_k(virt_addr); 441 pgdp = pgd_offset_k(virt_addr);
470 pmdp = srmmu_pmd_offset(pgdp, virt_addr); 442 pmdp = pmd_offset(pgdp, virt_addr);
471 ptep = srmmu_pte_offset(pmdp, virt_addr); 443 ptep = pte_offset_kernel(pmdp, virt_addr);
472 tmp = (physaddr >> 4) | SRMMU_ET_PTE; 444 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
473 445
474 /* 446 /*
@@ -482,8 +454,8 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
482 set_pte(ptep, __pte(tmp)); 454 set_pte(ptep, __pte(tmp));
483} 455}
484 456
485static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, 457void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
486 unsigned long xva, unsigned int len) 458 unsigned long xva, unsigned int len)
487{ 459{
488 while (len != 0) { 460 while (len != 0) {
489 len -= PAGE_SIZE; 461 len -= PAGE_SIZE;
@@ -501,14 +473,14 @@ static inline void srmmu_unmapioaddr(unsigned long virt_addr)
501 pte_t *ptep; 473 pte_t *ptep;
502 474
503 pgdp = pgd_offset_k(virt_addr); 475 pgdp = pgd_offset_k(virt_addr);
504 pmdp = srmmu_pmd_offset(pgdp, virt_addr); 476 pmdp = pmd_offset(pgdp, virt_addr);
505 ptep = srmmu_pte_offset(pmdp, virt_addr); 477 ptep = pte_offset_kernel(pmdp, virt_addr);
506 478
507 /* No need to flush uncacheable page. */ 479 /* No need to flush uncacheable page. */
508 __pte_clear(ptep); 480 __pte_clear(ptep);
509} 481}
510 482
511static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) 483void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
512{ 484{
513 while (len != 0) { 485 while (len != 0) {
514 len -= PAGE_SIZE; 486 len -= PAGE_SIZE;
@@ -949,7 +921,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
949 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 921 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
950 pgd_set(__nocache_fix(pgdp), pmdp); 922 pgd_set(__nocache_fix(pgdp), pmdp);
951 } 923 }
952 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); 924 pmdp = pmd_offset(__nocache_fix(pgdp), start);
953 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 925 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
954 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 926 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
955 if (ptep == NULL) 927 if (ptep == NULL)
@@ -979,7 +951,7 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
979 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); 951 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
980 pgd_set(pgdp, pmdp); 952 pgd_set(pgdp, pmdp);
981 } 953 }
982 pmdp = srmmu_pmd_offset(pgdp, start); 954 pmdp = pmd_offset(pgdp, start);
983 if(srmmu_pmd_none(*pmdp)) { 955 if(srmmu_pmd_none(*pmdp)) {
984 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 956 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
985 PTE_SIZE); 957 PTE_SIZE);
@@ -1045,7 +1017,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
1045 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 1017 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1046 pgd_set(__nocache_fix(pgdp), pmdp); 1018 pgd_set(__nocache_fix(pgdp), pmdp);
1047 } 1019 }
1048 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); 1020 pmdp = pmd_offset(__nocache_fix(pgdp), start);
1049 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 1021 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1050 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 1022 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1051 PTE_SIZE); 1023 PTE_SIZE);
@@ -1066,7 +1038,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
1066 start += SRMMU_REAL_PMD_SIZE; 1038 start += SRMMU_REAL_PMD_SIZE;
1067 continue; 1039 continue;
1068 } 1040 }
1069 ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); 1041 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
1070 *(pte_t *)__nocache_fix(ptep) = __pte(prompte); 1042 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
1071 start += PAGE_SIZE; 1043 start += PAGE_SIZE;
1072 } 1044 }
@@ -1200,8 +1172,8 @@ void __init srmmu_paging_init(void)
1200 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); 1172 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
1201 1173
1202 pgd = pgd_offset_k(PKMAP_BASE); 1174 pgd = pgd_offset_k(PKMAP_BASE);
1203 pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); 1175 pmd = pmd_offset(pgd, PKMAP_BASE);
1204 pte = srmmu_pte_offset(pmd, PKMAP_BASE); 1176 pte = pte_offset_kernel(pmd, PKMAP_BASE);
1205 pkmap_page_table = pte; 1177 pkmap_page_table = pte;
1206 1178
1207 flush_cache_all(); 1179 flush_cache_all();
@@ -1233,7 +1205,7 @@ void __init srmmu_paging_init(void)
1233 } 1205 }
1234} 1206}
1235 1207
1236static void srmmu_mmu_info(struct seq_file *m) 1208void mmu_info(struct seq_file *m)
1237{ 1209{
1238 seq_printf(m, 1210 seq_printf(m,
1239 "MMU type\t: %s\n" 1211 "MMU type\t: %s\n"
@@ -2015,24 +1987,9 @@ void __init load_mmu(void)
2015 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); 1987 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
2016#endif 1988#endif
2017 1989
2018 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
2019
2020 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
2021 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
2022 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
2023
2024 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); 1990 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
2025 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); 1991 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
2026 1992
2027 BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
2028 BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
2029
2030 BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
2031 BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
2032 BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
2033
2034 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2035
2036 get_srmmu_type(); 1993 get_srmmu_type();
2037 1994
2038#ifdef CONFIG_SMP 1995#ifdef CONFIG_SMP