diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-04 06:10:53 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:11:40 -0500 |
commit | a43fe0e789f5445f5224511034f410adf11f153b (patch) | |
tree | cface7b6e616be616899da8c0762f904263c5985 /arch/sparc64/kernel/smp.c | |
parent | 1633a53c79498455b16d051451f4e3f83ab4e7dd (diff) |
[SPARC64]: Add some hypervisor tlb_type checks.
And more consistently check cheetah{,_plus} instead
of assuming anything not spitfire is cheetah{,_plus}.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r-- | arch/sparc64/kernel/smp.c | 32 |
1 files changed, 26 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 16b8eca9754e..aba0f886b05b 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -528,6 +528,11 @@ retry: | |||
528 | } | 528 | } |
529 | } | 529 | } |
530 | 530 | ||
531 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | ||
532 | { | ||
533 | /* XXX implement me */ | ||
534 | } | ||
535 | |||
531 | /* Send cross call to all processors mentioned in MASK | 536 | /* Send cross call to all processors mentioned in MASK |
532 | * except self. | 537 | * except self. |
533 | */ | 538 | */ |
@@ -541,8 +546,10 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d | |||
541 | 546 | ||
542 | if (tlb_type == spitfire) | 547 | if (tlb_type == spitfire) |
543 | spitfire_xcall_deliver(data0, data1, data2, mask); | 548 | spitfire_xcall_deliver(data0, data1, data2, mask); |
544 | else | 549 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) |
545 | cheetah_xcall_deliver(data0, data1, data2, mask); | 550 | cheetah_xcall_deliver(data0, data1, data2, mask); |
551 | else | ||
552 | hypervisor_xcall_deliver(data0, data1, data2, mask); | ||
546 | /* NOTE: Caller runs local copy on master. */ | 553 | /* NOTE: Caller runs local copy on master. */ |
547 | 554 | ||
548 | put_cpu(); | 555 | put_cpu(); |
@@ -695,11 +702,17 @@ static __inline__ void __local_flush_dcache_page(struct page *page) | |||
695 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | 702 | void smp_flush_dcache_page_impl(struct page *page, int cpu) |
696 | { | 703 | { |
697 | cpumask_t mask = cpumask_of_cpu(cpu); | 704 | cpumask_t mask = cpumask_of_cpu(cpu); |
698 | int this_cpu = get_cpu(); | 705 | int this_cpu; |
706 | |||
707 | if (tlb_type == hypervisor) | ||
708 | return; | ||
699 | 709 | ||
700 | #ifdef CONFIG_DEBUG_DCFLUSH | 710 | #ifdef CONFIG_DEBUG_DCFLUSH |
701 | atomic_inc(&dcpage_flushes); | 711 | atomic_inc(&dcpage_flushes); |
702 | #endif | 712 | #endif |
713 | |||
714 | this_cpu = get_cpu(); | ||
715 | |||
703 | if (cpu == this_cpu) { | 716 | if (cpu == this_cpu) { |
704 | __local_flush_dcache_page(page); | 717 | __local_flush_dcache_page(page); |
705 | } else if (cpu_online(cpu)) { | 718 | } else if (cpu_online(cpu)) { |
@@ -715,7 +728,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) | |||
715 | __pa(pg_addr), | 728 | __pa(pg_addr), |
716 | (u64) pg_addr, | 729 | (u64) pg_addr, |
717 | mask); | 730 | mask); |
718 | } else { | 731 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
719 | #ifdef DCACHE_ALIASING_POSSIBLE | 732 | #ifdef DCACHE_ALIASING_POSSIBLE |
720 | data0 = | 733 | data0 = |
721 | ((u64)&xcall_flush_dcache_page_cheetah); | 734 | ((u64)&xcall_flush_dcache_page_cheetah); |
@@ -737,7 +750,12 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
737 | void *pg_addr = page_address(page); | 750 | void *pg_addr = page_address(page); |
738 | cpumask_t mask = cpu_online_map; | 751 | cpumask_t mask = cpu_online_map; |
739 | u64 data0; | 752 | u64 data0; |
740 | int this_cpu = get_cpu(); | 753 | int this_cpu; |
754 | |||
755 | if (tlb_type == hypervisor) | ||
756 | return; | ||
757 | |||
758 | this_cpu = get_cpu(); | ||
741 | 759 | ||
742 | cpu_clear(this_cpu, mask); | 760 | cpu_clear(this_cpu, mask); |
743 | 761 | ||
@@ -754,7 +772,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
754 | __pa(pg_addr), | 772 | __pa(pg_addr), |
755 | (u64) pg_addr, | 773 | (u64) pg_addr, |
756 | mask); | 774 | mask); |
757 | } else { | 775 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
758 | #ifdef DCACHE_ALIASING_POSSIBLE | 776 | #ifdef DCACHE_ALIASING_POSSIBLE |
759 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | 777 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); |
760 | cheetah_xcall_deliver(data0, | 778 | cheetah_xcall_deliver(data0, |
@@ -780,8 +798,10 @@ void smp_receive_signal(int cpu) | |||
780 | 798 | ||
781 | if (tlb_type == spitfire) | 799 | if (tlb_type == spitfire) |
782 | spitfire_xcall_deliver(data0, 0, 0, mask); | 800 | spitfire_xcall_deliver(data0, 0, 0, mask); |
783 | else | 801 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) |
784 | cheetah_xcall_deliver(data0, 0, 0, mask); | 802 | cheetah_xcall_deliver(data0, 0, 0, mask); |
803 | else if (tlb_type == hypervisor) | ||
804 | hypervisor_xcall_deliver(data0, 0, 0, mask); | ||
785 | } | 805 | } |
786 | } | 806 | } |
787 | 807 | ||