aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2012-07-26 07:02:14 -0400
committerDavid S. Miller <davem@davemloft.net>2012-07-26 19:46:15 -0400
commitf71a2aacc63e3185e27ee45e8ebc2bebad9bec28 (patch)
tree3a76b0a10dc0747c1064e1b4218d08e74f7c20d1
parent605ae96240a165baaceeff0eeec35e41d68dc978 (diff)
sparc32: use void * in nocache get/free
This allowed to us to kill a lot of casts, with no loss of readability in any places Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h16
-rw-r--r--arch/sparc/mm/srmmu.c61
2 files changed, 42 insertions, 35 deletions
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index e5b169b46d21..bf20809f6665 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -18,8 +18,8 @@ extern struct pgtable_cache_struct {
18 unsigned long pgd_cache_sz; 18 unsigned long pgd_cache_sz;
19} pgt_quicklists; 19} pgt_quicklists;
20 20
21unsigned long srmmu_get_nocache(int size, int align); 21void *srmmu_get_nocache(int size, int align);
22void srmmu_free_nocache(unsigned long vaddr, int size); 22void srmmu_free_nocache(void *addr, int size);
23 23
24#define pgd_quicklist (pgt_quicklists.pgd_cache) 24#define pgd_quicklist (pgt_quicklists.pgd_cache)
25#define pmd_quicklist ((unsigned long *)0) 25#define pmd_quicklist ((unsigned long *)0)
@@ -32,7 +32,7 @@ void srmmu_free_nocache(unsigned long vaddr, int size);
32pgd_t *get_pgd_fast(void); 32pgd_t *get_pgd_fast(void);
33static inline void free_pgd_fast(pgd_t *pgd) 33static inline void free_pgd_fast(pgd_t *pgd)
34{ 34{
35 srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); 35 srmmu_free_nocache(pgd, SRMMU_PGD_TABLE_SIZE);
36} 36}
37 37
38#define pgd_free(mm, pgd) free_pgd_fast(pgd) 38#define pgd_free(mm, pgd) free_pgd_fast(pgd)
@@ -50,13 +50,13 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
50static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, 50static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
51 unsigned long address) 51 unsigned long address)
52{ 52{
53 return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, 53 return srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
54 SRMMU_PMD_TABLE_SIZE); 54 SRMMU_PMD_TABLE_SIZE);
55} 55}
56 56
57static inline void free_pmd_fast(pmd_t * pmd) 57static inline void free_pmd_fast(pmd_t * pmd)
58{ 58{
59 srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); 59 srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE);
60} 60}
61 61
62#define pmd_free(mm, pmd) free_pmd_fast(pmd) 62#define pmd_free(mm, pmd) free_pmd_fast(pmd)
@@ -73,13 +73,13 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
73static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 73static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
74 unsigned long address) 74 unsigned long address)
75{ 75{
76 return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 76 return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
77} 77}
78 78
79 79
80static inline void free_pte_fast(pte_t *pte) 80static inline void free_pte_fast(pte_t *pte)
81{ 81{
82 srmmu_free_nocache((unsigned long)pte, PTE_SIZE); 82 srmmu_free_nocache(pte, PTE_SIZE);
83} 83}
84 84
85#define pte_free_kernel(mm, pte) free_pte_fast(pte) 85#define pte_free_kernel(mm, pte) free_pte_fast(pte)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 4b00f6982a97..146742bee39a 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -151,49 +151,55 @@ pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
151 * align: bytes, number to align at. 151 * align: bytes, number to align at.
152 * Returns the virtual address of the allocated area. 152 * Returns the virtual address of the allocated area.
153 */ 153 */
154static unsigned long __srmmu_get_nocache(int size, int align) 154static void *__srmmu_get_nocache(int size, int align)
155{ 155{
156 int offset; 156 int offset;
157 unsigned long addr;
157 158
158 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { 159 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
159 printk("Size 0x%x too small for nocache request\n", size); 160 printk(KERN_ERR "Size 0x%x too small for nocache request\n",
161 size);
160 size = SRMMU_NOCACHE_BITMAP_SHIFT; 162 size = SRMMU_NOCACHE_BITMAP_SHIFT;
161 } 163 }
162 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { 164 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
163 printk("Size 0x%x unaligned int nocache request\n", size); 165 printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
164 size += SRMMU_NOCACHE_BITMAP_SHIFT-1; 166 size);
167 size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
165 } 168 }
166 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); 169 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
167 170
168 offset = bit_map_string_get(&srmmu_nocache_map, 171 offset = bit_map_string_get(&srmmu_nocache_map,
169 size >> SRMMU_NOCACHE_BITMAP_SHIFT, 172 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
170 align >> SRMMU_NOCACHE_BITMAP_SHIFT); 173 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
171 if (offset == -1) { 174 if (offset == -1) {
172 printk("srmmu: out of nocache %d: %d/%d\n", 175 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
173 size, (int) srmmu_nocache_size, 176 size, (int) srmmu_nocache_size,
174 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); 177 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
175 return 0; 178 return 0;
176 } 179 }
177 180
178 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); 181 addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
182 return (void *)addr;
179} 183}
180 184
181unsigned long srmmu_get_nocache(int size, int align) 185void *srmmu_get_nocache(int size, int align)
182{ 186{
183 unsigned long tmp; 187 void *tmp;
184 188
185 tmp = __srmmu_get_nocache(size, align); 189 tmp = __srmmu_get_nocache(size, align);
186 190
187 if (tmp) 191 if (tmp)
188 memset((void *)tmp, 0, size); 192 memset(tmp, 0, size);
189 193
190 return tmp; 194 return tmp;
191} 195}
192 196
193void srmmu_free_nocache(unsigned long vaddr, int size) 197void srmmu_free_nocache(void *addr, int size)
194{ 198{
199 unsigned long vaddr;
195 int offset; 200 int offset;
196 201
202 vaddr = (unsigned long)addr;
197 if (vaddr < SRMMU_NOCACHE_VADDR) { 203 if (vaddr < SRMMU_NOCACHE_VADDR) {
198 printk("Vaddr %lx is smaller than nocache base 0x%lx\n", 204 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
199 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); 205 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
@@ -271,7 +277,7 @@ static void __init srmmu_nocache_init(void)
271 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); 277 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
272 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); 278 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
273 279
274 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); 280 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
275 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); 281 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
276 init_mm.pgd = srmmu_swapper_pg_dir; 282 init_mm.pgd = srmmu_swapper_pg_dir;
277 283
@@ -304,7 +310,7 @@ pgd_t *get_pgd_fast(void)
304{ 310{
305 pgd_t *pgd = NULL; 311 pgd_t *pgd = NULL;
306 312
307 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); 313 pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
308 if (pgd) { 314 if (pgd) {
309 pgd_t *init = pgd_offset_k(0); 315 pgd_t *init = pgd_offset_k(0);
310 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); 316 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
@@ -344,8 +350,9 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
344 if (p == 0) 350 if (p == 0)
345 BUG(); 351 BUG();
346 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ 352 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
347 p = (unsigned long) __nocache_va(p); /* Nocached virtual */ 353
348 srmmu_free_nocache(p, PTE_SIZE); 354 /* free non cached virtual address*/
355 srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
349} 356}
350 357
351/* 358/*
@@ -593,7 +600,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
593 while (start < end) { 600 while (start < end) {
594 pgdp = pgd_offset_k(start); 601 pgdp = pgd_offset_k(start);
595 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 602 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
596 pmdp = (pmd_t *) __srmmu_get_nocache( 603 pmdp = __srmmu_get_nocache(
597 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 604 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
598 if (pmdp == NULL) 605 if (pmdp == NULL)
599 early_pgtable_allocfail("pmd"); 606 early_pgtable_allocfail("pmd");
@@ -602,7 +609,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
602 } 609 }
603 pmdp = pmd_offset(__nocache_fix(pgdp), start); 610 pmdp = pmd_offset(__nocache_fix(pgdp), start);
604 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 611 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
605 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 612 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
606 if (ptep == NULL) 613 if (ptep == NULL)
607 early_pgtable_allocfail("pte"); 614 early_pgtable_allocfail("pte");
608 memset(__nocache_fix(ptep), 0, PTE_SIZE); 615 memset(__nocache_fix(ptep), 0, PTE_SIZE);
@@ -624,7 +631,7 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
624 while (start < end) { 631 while (start < end) {
625 pgdp = pgd_offset_k(start); 632 pgdp = pgd_offset_k(start);
626 if (pgd_none(*pgdp)) { 633 if (pgd_none(*pgdp)) {
627 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 634 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
628 if (pmdp == NULL) 635 if (pmdp == NULL)
629 early_pgtable_allocfail("pmd"); 636 early_pgtable_allocfail("pmd");
630 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); 637 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
@@ -632,7 +639,7 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
632 } 639 }
633 pmdp = pmd_offset(pgdp, start); 640 pmdp = pmd_offset(pgdp, start);
634 if (srmmu_pmd_none(*pmdp)) { 641 if (srmmu_pmd_none(*pmdp)) {
635 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 642 ptep = __srmmu_get_nocache(PTE_SIZE,
636 PTE_SIZE); 643 PTE_SIZE);
637 if (ptep == NULL) 644 if (ptep == NULL)
638 early_pgtable_allocfail("pte"); 645 early_pgtable_allocfail("pte");
@@ -707,7 +714,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
707 continue; 714 continue;
708 } 715 }
709 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 716 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
710 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 717 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
711 if (pmdp == NULL) 718 if (pmdp == NULL)
712 early_pgtable_allocfail("pmd"); 719 early_pgtable_allocfail("pmd");
713 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 720 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
@@ -715,7 +722,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
715 } 722 }
716 pmdp = pmd_offset(__nocache_fix(pgdp), start); 723 pmdp = pmd_offset(__nocache_fix(pgdp), start);
717 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 724 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
718 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 725 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
719 if (ptep == NULL) 726 if (ptep == NULL)
720 early_pgtable_allocfail("pte"); 727 early_pgtable_allocfail("pte");
721 memset(__nocache_fix(ptep), 0, PTE_SIZE); 728 memset(__nocache_fix(ptep), 0, PTE_SIZE);
@@ -831,11 +838,11 @@ void __init srmmu_paging_init(void)
831 838
832 srmmu_nocache_calcsize(); 839 srmmu_nocache_calcsize();
833 srmmu_nocache_init(); 840 srmmu_nocache_init();
834 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM-PAGE_SIZE)); 841 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
835 map_kernel(); 842 map_kernel();
836 843
837 /* ctx table has to be physically aligned to its size */ 844 /* ctx table has to be physically aligned to its size */
838 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); 845 srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
839 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); 846 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
840 847
841 for (i = 0; i < num_contexts; i++) 848 for (i = 0; i < num_contexts; i++)