aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:29:18 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:13 -0500
commit74bf4312fff083ab25c3f357cc653ada7995e5f6 (patch)
treec23dea461e32485f4cd7ca4b8c33c632655eb906 /arch/sparc64/kernel/smp.c
parent30d4d1ffed7098afe2641536d67eef150499da02 (diff)
[SPARC64]: Move away from virtual page tables, part 1.
We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c12
1 files changed, 1 insertions, 11 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 1f7ad8a69052..d2d3369e7b5d 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -123,6 +123,7 @@ extern void inherit_locked_prom_mappings(int save_p);
123 123
124static inline void cpu_setup_percpu_base(unsigned long cpu_id) 124static inline void cpu_setup_percpu_base(unsigned long cpu_id)
125{ 125{
126#error IMMU TSB usage must be fixed
126 __asm__ __volatile__("mov %0, %%g5\n\t" 127 __asm__ __volatile__("mov %0, %%g5\n\t"
127 "stxa %0, [%1] %2\n\t" 128 "stxa %0, [%1] %2\n\t"
128 "membar #Sync" 129 "membar #Sync"
@@ -662,8 +663,6 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
662extern unsigned long xcall_flush_tlb_mm; 663extern unsigned long xcall_flush_tlb_mm;
663extern unsigned long xcall_flush_tlb_pending; 664extern unsigned long xcall_flush_tlb_pending;
664extern unsigned long xcall_flush_tlb_kernel_range; 665extern unsigned long xcall_flush_tlb_kernel_range;
665extern unsigned long xcall_flush_tlb_all_spitfire;
666extern unsigned long xcall_flush_tlb_all_cheetah;
667extern unsigned long xcall_report_regs; 666extern unsigned long xcall_report_regs;
668extern unsigned long xcall_receive_signal; 667extern unsigned long xcall_receive_signal;
669 668
@@ -794,15 +793,6 @@ void smp_report_regs(void)
794 smp_cross_call(&xcall_report_regs, 0, 0, 0); 793 smp_cross_call(&xcall_report_regs, 0, 0, 0);
795} 794}
796 795
797void smp_flush_tlb_all(void)
798{
799 if (tlb_type == spitfire)
800 smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
801 else
802 smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
803 __flush_tlb_all();
804}
805
806/* We know that the window frames of the user have been flushed 796/* We know that the window frames of the user have been flushed
807 * to the stack before we get here because all callers of us 797 * to the stack before we get here because all callers of us
808 * are flush_tlb_*() routines, and these run after flush_cache_*() 798 * are flush_tlb_*() routines, and these run after flush_cache_*()