aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-04 06:10:53 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:40 -0500
commita43fe0e789f5445f5224511034f410adf11f153b (patch)
treecface7b6e616be616899da8c0762f904263c5985
parent1633a53c79498455b16d051451f4e3f83ab4e7dd (diff)
[SPARC64]: Add some hypervisor tlb_type checks.
And more consistently check cheetah{,_plus} instead of assuming anything not spitfire is cheetah{,_plus}. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/smp.c32
-rw-r--r--arch/sparc64/mm/init.c6
2 files changed, 30 insertions, 8 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 16b8eca9754e..aba0f886b05b 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -528,6 +528,11 @@ retry:
528 } 528 }
529} 529}
530 530
531static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
532{
533 /* XXX implement me */
534}
535
531/* Send cross call to all processors mentioned in MASK 536/* Send cross call to all processors mentioned in MASK
532 * except self. 537 * except self.
533 */ 538 */
@@ -541,8 +546,10 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
541 546
542 if (tlb_type == spitfire) 547 if (tlb_type == spitfire)
543 spitfire_xcall_deliver(data0, data1, data2, mask); 548 spitfire_xcall_deliver(data0, data1, data2, mask);
544 else 549 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
545 cheetah_xcall_deliver(data0, data1, data2, mask); 550 cheetah_xcall_deliver(data0, data1, data2, mask);
551 else
552 hypervisor_xcall_deliver(data0, data1, data2, mask);
546 /* NOTE: Caller runs local copy on master. */ 553 /* NOTE: Caller runs local copy on master. */
547 554
548 put_cpu(); 555 put_cpu();
@@ -695,11 +702,17 @@ static __inline__ void __local_flush_dcache_page(struct page *page)
695void smp_flush_dcache_page_impl(struct page *page, int cpu) 702void smp_flush_dcache_page_impl(struct page *page, int cpu)
696{ 703{
697 cpumask_t mask = cpumask_of_cpu(cpu); 704 cpumask_t mask = cpumask_of_cpu(cpu);
698 int this_cpu = get_cpu(); 705 int this_cpu;
706
707 if (tlb_type == hypervisor)
708 return;
699 709
700#ifdef CONFIG_DEBUG_DCFLUSH 710#ifdef CONFIG_DEBUG_DCFLUSH
701 atomic_inc(&dcpage_flushes); 711 atomic_inc(&dcpage_flushes);
702#endif 712#endif
713
714 this_cpu = get_cpu();
715
703 if (cpu == this_cpu) { 716 if (cpu == this_cpu) {
704 __local_flush_dcache_page(page); 717 __local_flush_dcache_page(page);
705 } else if (cpu_online(cpu)) { 718 } else if (cpu_online(cpu)) {
@@ -715,7 +728,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
715 __pa(pg_addr), 728 __pa(pg_addr),
716 (u64) pg_addr, 729 (u64) pg_addr,
717 mask); 730 mask);
718 } else { 731 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
719#ifdef DCACHE_ALIASING_POSSIBLE 732#ifdef DCACHE_ALIASING_POSSIBLE
720 data0 = 733 data0 =
721 ((u64)&xcall_flush_dcache_page_cheetah); 734 ((u64)&xcall_flush_dcache_page_cheetah);
@@ -737,7 +750,12 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
737 void *pg_addr = page_address(page); 750 void *pg_addr = page_address(page);
738 cpumask_t mask = cpu_online_map; 751 cpumask_t mask = cpu_online_map;
739 u64 data0; 752 u64 data0;
740 int this_cpu = get_cpu(); 753 int this_cpu;
754
755 if (tlb_type == hypervisor)
756 return;
757
758 this_cpu = get_cpu();
741 759
742 cpu_clear(this_cpu, mask); 760 cpu_clear(this_cpu, mask);
743 761
@@ -754,7 +772,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
754 __pa(pg_addr), 772 __pa(pg_addr),
755 (u64) pg_addr, 773 (u64) pg_addr,
756 mask); 774 mask);
757 } else { 775 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
758#ifdef DCACHE_ALIASING_POSSIBLE 776#ifdef DCACHE_ALIASING_POSSIBLE
759 data0 = ((u64)&xcall_flush_dcache_page_cheetah); 777 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
760 cheetah_xcall_deliver(data0, 778 cheetah_xcall_deliver(data0,
@@ -780,8 +798,10 @@ void smp_receive_signal(int cpu)
780 798
781 if (tlb_type == spitfire) 799 if (tlb_type == spitfire)
782 spitfire_xcall_deliver(data0, 0, 0, mask); 800 spitfire_xcall_deliver(data0, 0, 0, mask);
783 else 801 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
784 cheetah_xcall_deliver(data0, 0, 0, mask); 802 cheetah_xcall_deliver(data0, 0, 0, mask);
803 else if (tlb_type == hypervisor)
804 hypervisor_xcall_deliver(data0, 0, 0, mask);
785 } 805 }
786} 806}
787 807
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 1af63307b24f..ab50cd9618f3 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -335,7 +335,7 @@ out:
335 335
336void __kprobes flush_icache_range(unsigned long start, unsigned long end) 336void __kprobes flush_icache_range(unsigned long start, unsigned long end)
337{ 337{
338 /* Cheetah has coherent I-cache. */ 338 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
339 if (tlb_type == spitfire) { 339 if (tlb_type == spitfire) {
340 unsigned long kaddr; 340 unsigned long kaddr;
341 341
@@ -372,6 +372,8 @@ void mmu_info(struct seq_file *m)
372 seq_printf(m, "MMU Type\t: Cheetah+\n"); 372 seq_printf(m, "MMU Type\t: Cheetah+\n");
373 else if (tlb_type == spitfire) 373 else if (tlb_type == spitfire)
374 seq_printf(m, "MMU Type\t: Spitfire\n"); 374 seq_printf(m, "MMU Type\t: Spitfire\n");
375 else if (tlb_type == hypervisor)
376 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
375 else 377 else
376 seq_printf(m, "MMU Type\t: ???\n"); 378 seq_printf(m, "MMU Type\t: ???\n");
377 379
@@ -581,7 +583,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
581 if (++n >= 512) 583 if (++n >= 512)
582 break; 584 break;
583 } 585 }
584 } else { 586 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
585 start = __pa(start); 587 start = __pa(start);
586 end = __pa(end); 588 end = __pa(end);
587 for (va = start; va < end; va += 32) 589 for (va = start; va < end; va += 32)