diff options
author | Sam Ravnborg <sam@ravnborg.org> | 2012-05-13 02:40:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-13 15:51:52 -0400 |
commit | 642ea3ed9c652bb9e105e55afcb9ad15b040f71f (patch) | |
tree | f4cf27f0f16277a03f997be6dc2b61e7e8980f82 /arch/sparc/mm/srmmu.c | |
parent | 5471fa6265a1facadcd593d995a76804fc641106 (diff) |
sparc32: drop btfixup in pgalloc_32.h
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/srmmu.c')
-rw-r--r-- | arch/sparc/mm/srmmu.c | 71 |
1 files changed, 14 insertions, 57 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 54efa9263a44..d9487d851843 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -132,10 +132,7 @@ static inline pte_t srmmu_pte_mkold(pte_t pte) | |||
132 | static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) | 132 | static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) |
133 | { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } | 133 | { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } |
134 | 134 | ||
135 | static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) | 135 | void pmd_set(pmd_t *pmdp, pte_t *ptep) |
136 | { set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } | ||
137 | |||
138 | static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) | ||
139 | { | 136 | { |
140 | unsigned long ptp; /* Physical address, shifted right by 4 */ | 137 | unsigned long ptp; /* Physical address, shifted right by 4 */ |
141 | int i; | 138 | int i; |
@@ -147,7 +144,7 @@ static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) | |||
147 | } | 144 | } |
148 | } | 145 | } |
149 | 146 | ||
150 | static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) | 147 | void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) |
151 | { | 148 | { |
152 | unsigned long ptp; /* Physical address, shifted right by 4 */ | 149 | unsigned long ptp; /* Physical address, shifted right by 4 */ |
153 | int i; | 150 | int i; |
@@ -232,7 +229,7 @@ static unsigned long __srmmu_get_nocache(int size, int align) | |||
232 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); | 229 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); |
233 | } | 230 | } |
234 | 231 | ||
235 | static unsigned long srmmu_get_nocache(int size, int align) | 232 | unsigned long srmmu_get_nocache(int size, int align) |
236 | { | 233 | { |
237 | unsigned long tmp; | 234 | unsigned long tmp; |
238 | 235 | ||
@@ -244,7 +241,7 @@ static unsigned long srmmu_get_nocache(int size, int align) | |||
244 | return tmp; | 241 | return tmp; |
245 | } | 242 | } |
246 | 243 | ||
247 | static void srmmu_free_nocache(unsigned long vaddr, int size) | 244 | void srmmu_free_nocache(unsigned long vaddr, int size) |
248 | { | 245 | { |
249 | int offset; | 246 | int offset; |
250 | 247 | ||
@@ -354,7 +351,7 @@ static void __init srmmu_nocache_init(void) | |||
354 | flush_tlb_all(); | 351 | flush_tlb_all(); |
355 | } | 352 | } |
356 | 353 | ||
357 | static inline pgd_t *srmmu_get_pgd_fast(void) | 354 | pgd_t *get_pgd_fast(void) |
358 | { | 355 | { |
359 | pgd_t *pgd = NULL; | 356 | pgd_t *pgd = NULL; |
360 | 357 | ||
@@ -369,21 +366,6 @@ static inline pgd_t *srmmu_get_pgd_fast(void) | |||
369 | return pgd; | 366 | return pgd; |
370 | } | 367 | } |
371 | 368 | ||
372 | static void srmmu_free_pgd_fast(pgd_t *pgd) | ||
373 | { | ||
374 | srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); | ||
375 | } | ||
376 | |||
377 | static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
378 | { | ||
379 | return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | ||
380 | } | ||
381 | |||
382 | static void srmmu_pmd_free(pmd_t * pmd) | ||
383 | { | ||
384 | srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); | ||
385 | } | ||
386 | |||
387 | /* | 369 | /* |
388 | * Hardware needs alignment to 256 only, but we align to whole page size | 370 | * Hardware needs alignment to 256 only, but we align to whole page size |
389 | * to reduce fragmentation problems due to the buddy principle. | 371 | * to reduce fragmentation problems due to the buddy principle. |
@@ -392,31 +374,19 @@ static void srmmu_pmd_free(pmd_t * pmd) | |||
392 | * Alignments up to the page size are the same for physical and virtual | 374 | * Alignments up to the page size are the same for physical and virtual |
393 | * addresses of the nocache area. | 375 | * addresses of the nocache area. |
394 | */ | 376 | */ |
395 | static pte_t * | 377 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
396 | srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
397 | { | ||
398 | return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | ||
399 | } | ||
400 | |||
401 | static pgtable_t | ||
402 | srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
403 | { | 378 | { |
404 | unsigned long pte; | 379 | unsigned long pte; |
405 | struct page *page; | 380 | struct page *page; |
406 | 381 | ||
407 | if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) | 382 | if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) |
408 | return NULL; | 383 | return NULL; |
409 | page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); | 384 | page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); |
410 | pgtable_page_ctor(page); | 385 | pgtable_page_ctor(page); |
411 | return page; | 386 | return page; |
412 | } | 387 | } |
413 | 388 | ||
414 | static void srmmu_free_pte_fast(pte_t *pte) | 389 | void pte_free(struct mm_struct *mm, pgtable_t pte) |
415 | { | ||
416 | srmmu_free_nocache((unsigned long)pte, PTE_SIZE); | ||
417 | } | ||
418 | |||
419 | static void srmmu_pte_free(pgtable_t pte) | ||
420 | { | 390 | { |
421 | unsigned long p; | 391 | unsigned long p; |
422 | 392 | ||
@@ -977,7 +947,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, | |||
977 | if (pmdp == NULL) | 947 | if (pmdp == NULL) |
978 | early_pgtable_allocfail("pmd"); | 948 | early_pgtable_allocfail("pmd"); |
979 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | 949 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); |
980 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); | 950 | pgd_set(__nocache_fix(pgdp), pmdp); |
981 | } | 951 | } |
982 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); | 952 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); |
983 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | 953 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { |
@@ -985,7 +955,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, | |||
985 | if (ptep == NULL) | 955 | if (ptep == NULL) |
986 | early_pgtable_allocfail("pte"); | 956 | early_pgtable_allocfail("pte"); |
987 | memset(__nocache_fix(ptep), 0, PTE_SIZE); | 957 | memset(__nocache_fix(ptep), 0, PTE_SIZE); |
988 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); | 958 | pmd_set(__nocache_fix(pmdp), ptep); |
989 | } | 959 | } |
990 | if (start > (0xffffffffUL - PMD_SIZE)) | 960 | if (start > (0xffffffffUL - PMD_SIZE)) |
991 | break; | 961 | break; |
@@ -1007,7 +977,7 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start, | |||
1007 | if (pmdp == NULL) | 977 | if (pmdp == NULL) |
1008 | early_pgtable_allocfail("pmd"); | 978 | early_pgtable_allocfail("pmd"); |
1009 | memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); | 979 | memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); |
1010 | srmmu_pgd_set(pgdp, pmdp); | 980 | pgd_set(pgdp, pmdp); |
1011 | } | 981 | } |
1012 | pmdp = srmmu_pmd_offset(pgdp, start); | 982 | pmdp = srmmu_pmd_offset(pgdp, start); |
1013 | if(srmmu_pmd_none(*pmdp)) { | 983 | if(srmmu_pmd_none(*pmdp)) { |
@@ -1016,7 +986,7 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start, | |||
1016 | if (ptep == NULL) | 986 | if (ptep == NULL) |
1017 | early_pgtable_allocfail("pte"); | 987 | early_pgtable_allocfail("pte"); |
1018 | memset(ptep, 0, PTE_SIZE); | 988 | memset(ptep, 0, PTE_SIZE); |
1019 | srmmu_pmd_set(pmdp, ptep); | 989 | pmd_set(pmdp, ptep); |
1020 | } | 990 | } |
1021 | if (start > (0xffffffffUL - PMD_SIZE)) | 991 | if (start > (0xffffffffUL - PMD_SIZE)) |
1022 | break; | 992 | break; |
@@ -1073,7 +1043,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, | |||
1073 | if (pmdp == NULL) | 1043 | if (pmdp == NULL) |
1074 | early_pgtable_allocfail("pmd"); | 1044 | early_pgtable_allocfail("pmd"); |
1075 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | 1045 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); |
1076 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); | 1046 | pgd_set(__nocache_fix(pgdp), pmdp); |
1077 | } | 1047 | } |
1078 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); | 1048 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); |
1079 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | 1049 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { |
@@ -1082,7 +1052,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, | |||
1082 | if (ptep == NULL) | 1052 | if (ptep == NULL) |
1083 | early_pgtable_allocfail("pte"); | 1053 | early_pgtable_allocfail("pte"); |
1084 | memset(__nocache_fix(ptep), 0, PTE_SIZE); | 1054 | memset(__nocache_fix(ptep), 0, PTE_SIZE); |
1085 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); | 1055 | pmd_set(__nocache_fix(pmdp), ptep); |
1086 | } | 1056 | } |
1087 | if(what == 1) { | 1057 | if(what == 1) { |
1088 | /* | 1058 | /* |
@@ -2047,23 +2017,10 @@ void __init load_mmu(void) | |||
2047 | 2017 | ||
2048 | BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); | 2018 | BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); |
2049 | 2019 | ||
2050 | BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); | ||
2051 | BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); | ||
2052 | BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); | ||
2053 | |||
2054 | BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); | 2020 | BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); |
2055 | BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); | 2021 | BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); |
2056 | BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); | 2022 | BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); |
2057 | 2023 | ||
2058 | BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); | ||
2059 | BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); | ||
2060 | BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); | ||
2061 | BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); | ||
2062 | BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); | ||
2063 | BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); | ||
2064 | BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); | ||
2065 | BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); | ||
2066 | |||
2067 | BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); | 2024 | BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); |
2068 | BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); | 2025 | BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); |
2069 | 2026 | ||