diff options
author | Adrian Bunk <bunk@kernel.org> | 2008-06-05 14:41:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-18 00:38:01 -0400 |
commit | 50215d6511265d46ba14038640b16c5dd7731ff4 (patch) | |
tree | e8c70718dcb89aee9fc7ee4f42e22f3a382a85c6 /arch/sparc/mm | |
parent | c61c65cdcd1021cfbd7be8685ff1cf4f86c68c44 (diff) |
sparc/mm/: possible cleanups
This patch contains the following possible cleanups:
- make the following needlessly global code static:
- fault.c: force_user_fault()
- init.c: calc_max_low_pfn()
- init.c: pgt_cache_water[]
- init.c: map_high_region()
- srmmu.c: hwbug_bitmask
- srmmu.c: srmmu_swapper_pg_dir
- srmmu.c: srmmu_context_table
- srmmu.c: is_hypersparc
- srmmu.c: srmmu_cache_pagetables
- srmmu.c: srmmu_nocache_size
- srmmu.c: srmmu_nocache_end
- srmmu.c: srmmu_get_nocache()
- srmmu.c: srmmu_free_nocache()
- srmmu.c: srmmu_early_allocate_ptable_skeleton()
- srmmu.c: srmmu_nocache_calcsize()
- srmmu.c: srmmu_nocache_init()
- srmmu.c: srmmu_alloc_thread_info()
- srmmu.c: early_pgtable_allocfail()
- srmmu.c: srmmu_early_allocate_ptable_skeleton()
- srmmu.c: srmmu_allocate_ptable_skeleton()
- srmmu.c: srmmu_inherit_prom_mappings()
- sunami.S: tsunami_copy_1page
- remove the following unused code:
- init.c: struct sparc_aliases
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/sparc/mm/init.c | 8 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 38 | ||||
-rw-r--r-- | arch/sparc/mm/tsunami.S | 1 |
4 files changed, 25 insertions, 24 deletions
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c index 0a3cd8f6cfe4..3604c2e86709 100644 --- a/arch/sparc/mm/fault.c +++ b/arch/sparc/mm/fault.c | |||
@@ -451,7 +451,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, | |||
451 | } | 451 | } |
452 | 452 | ||
453 | /* This always deals with user addresses. */ | 453 | /* This always deals with user addresses. */ |
454 | inline void force_user_fault(unsigned long address, int write) | 454 | static void force_user_fault(unsigned long address, int write) |
455 | { | 455 | { |
456 | struct vm_area_struct *vma; | 456 | struct vm_area_struct *vma; |
457 | struct task_struct *tsk = current; | 457 | struct task_struct *tsk = current; |
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c index 7794ecb896e3..8f94a2d62f13 100644 --- a/arch/sparc/mm/init.c +++ b/arch/sparc/mm/init.c | |||
@@ -128,7 +128,7 @@ unsigned long calc_highpages(void) | |||
128 | return nr; | 128 | return nr; |
129 | } | 129 | } |
130 | 130 | ||
131 | unsigned long calc_max_low_pfn(void) | 131 | static unsigned long calc_max_low_pfn(void) |
132 | { | 132 | { |
133 | int i; | 133 | int i; |
134 | unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); | 134 | unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); |
@@ -292,7 +292,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
292 | * | 292 | * |
293 | * We simply copy the 2.4 implementation for now. | 293 | * We simply copy the 2.4 implementation for now. |
294 | */ | 294 | */ |
295 | int pgt_cache_water[2] = { 25, 50 }; | 295 | static int pgt_cache_water[2] = { 25, 50 }; |
296 | 296 | ||
297 | void check_pgt_cache(void) | 297 | void check_pgt_cache(void) |
298 | { | 298 | { |
@@ -356,8 +356,6 @@ void __init paging_init(void) | |||
356 | device_scan(); | 356 | device_scan(); |
357 | } | 357 | } |
358 | 358 | ||
359 | struct cache_palias *sparc_aliases; | ||
360 | |||
361 | static void __init taint_real_pages(void) | 359 | static void __init taint_real_pages(void) |
362 | { | 360 | { |
363 | int i; | 361 | int i; |
@@ -375,7 +373,7 @@ static void __init taint_real_pages(void) | |||
375 | } | 373 | } |
376 | } | 374 | } |
377 | 375 | ||
378 | void map_high_region(unsigned long start_pfn, unsigned long end_pfn) | 376 | static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) |
379 | { | 377 | { |
380 | unsigned long tmp; | 378 | unsigned long tmp; |
381 | 379 | ||
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 23d3291a3e81..c624e04ff03e 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include <asm/btfixup.h> | 50 | #include <asm/btfixup.h> |
51 | 51 | ||
52 | enum mbus_module srmmu_modtype; | 52 | enum mbus_module srmmu_modtype; |
53 | unsigned int hwbug_bitmask; | 53 | static unsigned int hwbug_bitmask; |
54 | int vac_cache_size; | 54 | int vac_cache_size; |
55 | int vac_line_size; | 55 | int vac_line_size; |
56 | 56 | ||
@@ -60,7 +60,7 @@ extern unsigned long last_valid_pfn; | |||
60 | 60 | ||
61 | extern unsigned long page_kernel; | 61 | extern unsigned long page_kernel; |
62 | 62 | ||
63 | pgd_t *srmmu_swapper_pg_dir; | 63 | static pgd_t *srmmu_swapper_pg_dir; |
64 | 64 | ||
65 | #ifdef CONFIG_SMP | 65 | #ifdef CONFIG_SMP |
66 | #define FLUSH_BEGIN(mm) | 66 | #define FLUSH_BEGIN(mm) |
@@ -83,12 +83,12 @@ BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) | |||
83 | char *srmmu_name; | 83 | char *srmmu_name; |
84 | 84 | ||
85 | ctxd_t *srmmu_ctx_table_phys; | 85 | ctxd_t *srmmu_ctx_table_phys; |
86 | ctxd_t *srmmu_context_table; | 86 | static ctxd_t *srmmu_context_table; |
87 | 87 | ||
88 | int viking_mxcc_present; | 88 | int viking_mxcc_present; |
89 | static DEFINE_SPINLOCK(srmmu_context_spinlock); | 89 | static DEFINE_SPINLOCK(srmmu_context_spinlock); |
90 | 90 | ||
91 | int is_hypersparc; | 91 | static int is_hypersparc; |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * In general all page table modifications should use the V8 atomic | 94 | * In general all page table modifications should use the V8 atomic |
@@ -112,11 +112,11 @@ static inline int srmmu_device_memory(unsigned long x) | |||
112 | return ((x & 0xF0000000) != 0); | 112 | return ((x & 0xF0000000) != 0); |
113 | } | 113 | } |
114 | 114 | ||
115 | int srmmu_cache_pagetables; | 115 | static int srmmu_cache_pagetables; |
116 | 116 | ||
117 | /* these will be initialized in srmmu_nocache_calcsize() */ | 117 | /* these will be initialized in srmmu_nocache_calcsize() */ |
118 | unsigned long srmmu_nocache_size; | 118 | static unsigned long srmmu_nocache_size; |
119 | unsigned long srmmu_nocache_end; | 119 | static unsigned long srmmu_nocache_end; |
120 | 120 | ||
121 | /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ | 121 | /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ |
122 | #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) | 122 | #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) |
@@ -324,7 +324,7 @@ static unsigned long __srmmu_get_nocache(int size, int align) | |||
324 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); | 324 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); |
325 | } | 325 | } |
326 | 326 | ||
327 | unsigned inline long srmmu_get_nocache(int size, int align) | 327 | static unsigned long srmmu_get_nocache(int size, int align) |
328 | { | 328 | { |
329 | unsigned long tmp; | 329 | unsigned long tmp; |
330 | 330 | ||
@@ -336,7 +336,7 @@ unsigned inline long srmmu_get_nocache(int size, int align) | |||
336 | return tmp; | 336 | return tmp; |
337 | } | 337 | } |
338 | 338 | ||
339 | void srmmu_free_nocache(unsigned long vaddr, int size) | 339 | static void srmmu_free_nocache(unsigned long vaddr, int size) |
340 | { | 340 | { |
341 | int offset; | 341 | int offset; |
342 | 342 | ||
@@ -369,7 +369,8 @@ void srmmu_free_nocache(unsigned long vaddr, int size) | |||
369 | bit_map_clear(&srmmu_nocache_map, offset, size); | 369 | bit_map_clear(&srmmu_nocache_map, offset, size); |
370 | } | 370 | } |
371 | 371 | ||
372 | void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); | 372 | static void srmmu_early_allocate_ptable_skeleton(unsigned long start, |
373 | unsigned long end); | ||
373 | 374 | ||
374 | extern unsigned long probe_memory(void); /* in fault.c */ | 375 | extern unsigned long probe_memory(void); /* in fault.c */ |
375 | 376 | ||
@@ -377,7 +378,7 @@ extern unsigned long probe_memory(void); /* in fault.c */ | |||
377 | * Reserve nocache dynamically proportionally to the amount of | 378 | * Reserve nocache dynamically proportionally to the amount of |
378 | * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 | 379 | * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 |
379 | */ | 380 | */ |
380 | void srmmu_nocache_calcsize(void) | 381 | static void srmmu_nocache_calcsize(void) |
381 | { | 382 | { |
382 | unsigned long sysmemavail = probe_memory() / 1024; | 383 | unsigned long sysmemavail = probe_memory() / 1024; |
383 | int srmmu_nocache_npages; | 384 | int srmmu_nocache_npages; |
@@ -398,7 +399,7 @@ void srmmu_nocache_calcsize(void) | |||
398 | srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; | 399 | srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; |
399 | } | 400 | } |
400 | 401 | ||
401 | void __init srmmu_nocache_init(void) | 402 | static void __init srmmu_nocache_init(void) |
402 | { | 403 | { |
403 | unsigned int bitmap_bits; | 404 | unsigned int bitmap_bits; |
404 | pgd_t *pgd; | 405 | pgd_t *pgd; |
@@ -645,7 +646,7 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) | |||
645 | * mappings on the kernel stack without any special code as we did | 646 | * mappings on the kernel stack without any special code as we did |
646 | * need on the sun4c. | 647 | * need on the sun4c. |
647 | */ | 648 | */ |
648 | struct thread_info *srmmu_alloc_thread_info(void) | 649 | static struct thread_info *srmmu_alloc_thread_info(void) |
649 | { | 650 | { |
650 | struct thread_info *ret; | 651 | struct thread_info *ret; |
651 | 652 | ||
@@ -1045,13 +1046,14 @@ extern void hypersparc_setup_blockops(void); | |||
1045 | * around 8mb mapped for us. | 1046 | * around 8mb mapped for us. |
1046 | */ | 1047 | */ |
1047 | 1048 | ||
1048 | void __init early_pgtable_allocfail(char *type) | 1049 | static void __init early_pgtable_allocfail(char *type) |
1049 | { | 1050 | { |
1050 | prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); | 1051 | prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); |
1051 | prom_halt(); | 1052 | prom_halt(); |
1052 | } | 1053 | } |
1053 | 1054 | ||
1054 | void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end) | 1055 | static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, |
1056 | unsigned long end) | ||
1055 | { | 1057 | { |
1056 | pgd_t *pgdp; | 1058 | pgd_t *pgdp; |
1057 | pmd_t *pmdp; | 1059 | pmd_t *pmdp; |
@@ -1081,7 +1083,8 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l | |||
1081 | } | 1083 | } |
1082 | } | 1084 | } |
1083 | 1085 | ||
1084 | void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end) | 1086 | static void __init srmmu_allocate_ptable_skeleton(unsigned long start, |
1087 | unsigned long end) | ||
1085 | { | 1088 | { |
1086 | pgd_t *pgdp; | 1089 | pgd_t *pgdp; |
1087 | pmd_t *pmdp; | 1090 | pmd_t *pmdp; |
@@ -1116,7 +1119,8 @@ void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long en | |||
1116 | * looking at the prom's page table directly which is what most | 1119 | * looking at the prom's page table directly which is what most |
1117 | * other OS's do. Yuck... this is much better. | 1120 | * other OS's do. Yuck... this is much better. |
1118 | */ | 1121 | */ |
1119 | void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end) | 1122 | static void __init srmmu_inherit_prom_mappings(unsigned long start, |
1123 | unsigned long end) | ||
1120 | { | 1124 | { |
1121 | pgd_t *pgdp; | 1125 | pgd_t *pgdp; |
1122 | pmd_t *pmdp; | 1126 | pmd_t *pmdp; |
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S index db0d6de33a87..4e55e8f76648 100644 --- a/arch/sparc/mm/tsunami.S +++ b/arch/sparc/mm/tsunami.S | |||
@@ -93,7 +93,6 @@ tsunami_flush_tlb_page_out: | |||
93 | ldd [src + offset + 0x00], t2; \ | 93 | ldd [src + offset + 0x00], t2; \ |
94 | std t2, [dst + offset + 0x00]; | 94 | std t2, [dst + offset + 0x00]; |
95 | 95 | ||
96 | .globl tsunami_copy_1page | ||
97 | tsunami_copy_1page: | 96 | tsunami_copy_1page: |
98 | /* NOTE: This routine has to be shorter than 70insns --jj */ | 97 | /* NOTE: This routine has to be shorter than 70insns --jj */ |
99 | or %g0, (PAGE_SIZE >> 8), %g1 | 98 | or %g0, (PAGE_SIZE >> 8), %g1 |