aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mach-voyager
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-18 15:13:20 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-18 15:13:20 -0400
commitf6dc8ccaab6d8f63cbae1e6c73fe972b26f5376c (patch)
treec5643fcdc884a8d0bfc3f1bc28039cab7394e5bc /arch/x86/mach-voyager
parent323ec001c6bb98eeabb5abbdbb8c8055d9496554 (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
Merge branch 'linus' into core/generic-dma-coherent
Conflicts: kernel/Makefile Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mach-voyager')
-rw-r--r--arch/x86/mach-voyager/setup.c37
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c112
2 files changed, 25 insertions, 124 deletions
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index 5ae5466b9eb9..6bbdd633864c 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -62,6 +62,7 @@ void __init time_init_hook(void)
62char *__init machine_specific_memory_setup(void) 62char *__init machine_specific_memory_setup(void)
63{ 63{
64 char *who; 64 char *who;
65 int new_nr;
65 66
66 who = "NOT VOYAGER"; 67 who = "NOT VOYAGER";
67 68
@@ -73,7 +74,7 @@ char *__init machine_specific_memory_setup(void)
73 74
74 e820.nr_map = 0; 75 e820.nr_map = 0;
75 for (i = 0; voyager_memory_detect(i, &addr, &length); i++) { 76 for (i = 0; voyager_memory_detect(i, &addr, &length); i++) {
76 add_memory_region(addr, length, E820_RAM); 77 e820_add_region(addr, length, E820_RAM);
77 } 78 }
78 return who; 79 return who;
79 } else if (voyager_level == 4) { 80 } else if (voyager_level == 4) {
@@ -91,43 +92,17 @@ char *__init machine_specific_memory_setup(void)
91 tom = (boot_params.screen_info.ext_mem_k) << 10; 92 tom = (boot_params.screen_info.ext_mem_k) << 10;
92 } 93 }
93 who = "Voyager-TOM"; 94 who = "Voyager-TOM";
94 add_memory_region(0, 0x9f000, E820_RAM); 95 e820_add_region(0, 0x9f000, E820_RAM);
95 /* map from 1M to top of memory */ 96 /* map from 1M to top of memory */
96 add_memory_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024, 97 e820_add_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024,
97 E820_RAM); 98 E820_RAM);
98 /* FIXME: Should check the ASICs to see if I need to 99 /* FIXME: Should check the ASICs to see if I need to
99 * take out the 8M window. Just do it at the moment 100 * take out the 8M window. Just do it at the moment
100 * */ 101 * */
101 add_memory_region(8 * 1024 * 1024, 8 * 1024 * 1024, 102 e820_add_region(8 * 1024 * 1024, 8 * 1024 * 1024,
102 E820_RESERVED); 103 E820_RESERVED);
103 return who; 104 return who;
104 } 105 }
105 106
106 who = "BIOS-e820"; 107 return default_machine_specific_memory_setup();
107
108 /*
109 * Try to copy the BIOS-supplied E820-map.
110 *
111 * Otherwise fake a memory map; one section from 0k->640k,
112 * the next section from 1mb->appropriate_mem_k
113 */
114 sanitize_e820_map(boot_params.e820_map, &boot_params.e820_entries);
115 if (copy_e820_map(boot_params.e820_map, boot_params.e820_entries)
116 < 0) {
117 unsigned long mem_size;
118
119 /* compare results from other methods and take the greater */
120 if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) {
121 mem_size = boot_params.screen_info.ext_mem_k;
122 who = "BIOS-88";
123 } else {
124 mem_size = boot_params.alt_mem_k;
125 who = "BIOS-e801";
126 }
127
128 e820.nr_map = 0;
129 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
130 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
131 }
132 return who;
133} 108}
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 8acbf0cdf1a5..ee0fba092157 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -59,11 +59,6 @@ __u32 voyager_quad_processors = 0;
59 * activity count. Finally exported by i386_ksyms.c */ 59 * activity count. Finally exported by i386_ksyms.c */
60static int voyager_extended_cpus = 1; 60static int voyager_extended_cpus = 1;
61 61
62/* Have we found an SMP box - used by time.c to do the profiling
63 interrupt for timeslicing; do not set to 1 until the per CPU timer
64 interrupt is active */
65int smp_found_config = 0;
66
67/* Used for the invalidate map that's also checked in the spinlock */ 62/* Used for the invalidate map that's also checked in the spinlock */
68static volatile unsigned long smp_invalidate_needed; 63static volatile unsigned long smp_invalidate_needed;
69 64
@@ -955,94 +950,24 @@ static void smp_stop_cpu_function(void *dummy)
955 halt(); 950 halt();
956} 951}
957 952
958static DEFINE_SPINLOCK(call_lock);
959
960struct call_data_struct {
961 void (*func) (void *info);
962 void *info;
963 volatile unsigned long started;
964 volatile unsigned long finished;
965 int wait;
966};
967
968static struct call_data_struct *call_data;
969
970/* execute a thread on a new CPU. The function to be called must be 953/* execute a thread on a new CPU. The function to be called must be
971 * previously set up. This is used to schedule a function for 954 * previously set up. This is used to schedule a function for
972 * execution on all CPUs - set up the function then broadcast a 955 * execution on all CPUs - set up the function then broadcast a
973 * function_interrupt CPI to come here on each CPU */ 956 * function_interrupt CPI to come here on each CPU */
974static void smp_call_function_interrupt(void) 957static void smp_call_function_interrupt(void)
975{ 958{
976 void (*func) (void *info) = call_data->func;
977 void *info = call_data->info;
978 /* must take copy of wait because call_data may be replaced
979 * unless the function is waiting for us to finish */
980 int wait = call_data->wait;
981 __u8 cpu = smp_processor_id();
982
983 /*
984 * Notify initiating CPU that I've grabbed the data and am
985 * about to execute the function
986 */
987 mb();
988 if (!test_and_clear_bit(cpu, &call_data->started)) {
989 /* If the bit wasn't set, this could be a replay */
990 printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
991 " with no call pending\n", cpu);
992 return;
993 }
994 /*
995 * At this point the info structure may be out of scope unless wait==1
996 */
997 irq_enter(); 959 irq_enter();
998 (*func) (info); 960 generic_smp_call_function_interrupt();
999 __get_cpu_var(irq_stat).irq_call_count++; 961 __get_cpu_var(irq_stat).irq_call_count++;
1000 irq_exit(); 962 irq_exit();
1001 if (wait) {
1002 mb();
1003 clear_bit(cpu, &call_data->finished);
1004 }
1005} 963}
1006 964
1007static int 965static void smp_call_function_single_interrupt(void)
1008voyager_smp_call_function_mask(cpumask_t cpumask,
1009 void (*func) (void *info), void *info, int wait)
1010{ 966{
1011 struct call_data_struct data; 967 irq_enter();
1012 u32 mask = cpus_addr(cpumask)[0]; 968 generic_smp_call_function_single_interrupt();
1013 969 __get_cpu_var(irq_stat).irq_call_count++;
1014 mask &= ~(1 << smp_processor_id()); 970 irq_exit();
1015
1016 if (!mask)
1017 return 0;
1018
1019 /* Can deadlock when called with interrupts disabled */
1020 WARN_ON(irqs_disabled());
1021
1022 data.func = func;
1023 data.info = info;
1024 data.started = mask;
1025 data.wait = wait;
1026 if (wait)
1027 data.finished = mask;
1028
1029 spin_lock(&call_lock);
1030 call_data = &data;
1031 wmb();
1032 /* Send a message to all other CPUs and wait for them to respond */
1033 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1034
1035 /* Wait for response */
1036 while (data.started)
1037 barrier();
1038
1039 if (wait)
1040 while (data.finished)
1041 barrier();
1042
1043 spin_unlock(&call_lock);
1044
1045 return 0;
1046} 971}
1047 972
1048/* Sorry about the name. In an APIC based system, the APICs 973/* Sorry about the name. In an APIC based system, the APICs
@@ -1099,6 +1024,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs)
1099 smp_call_function_interrupt(); 1024 smp_call_function_interrupt();
1100} 1025}
1101 1026
1027void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
1028{
1029 ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
1030 smp_call_function_single_interrupt();
1031}
1032
1102void smp_vic_cpi_interrupt(struct pt_regs *regs) 1033void smp_vic_cpi_interrupt(struct pt_regs *regs)
1103{ 1034{
1104 struct pt_regs *old_regs = set_irq_regs(regs); 1035 struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1119,6 +1050,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs)
1119 smp_enable_irq_interrupt(); 1050 smp_enable_irq_interrupt();
1120 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) 1051 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
1121 smp_call_function_interrupt(); 1052 smp_call_function_interrupt();
1053 if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
1054 smp_call_function_single_interrupt();
1122 set_irq_regs(old_regs); 1055 set_irq_regs(old_regs);
1123} 1056}
1124 1057
@@ -1134,16 +1067,7 @@ static void do_flush_tlb_all(void *info)
1134/* flush the TLB of every active CPU in the system */ 1067/* flush the TLB of every active CPU in the system */
1135void flush_tlb_all(void) 1068void flush_tlb_all(void)
1136{ 1069{
1137 on_each_cpu(do_flush_tlb_all, 0, 1, 1); 1070 on_each_cpu(do_flush_tlb_all, 0, 1);
1138}
1139
1140/* used to set up the trampoline for other CPUs when the memory manager
1141 * is sorted out */
1142void __init smp_alloc_memory(void)
1143{
1144 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
1145 if (__pa(trampoline_base) >= 0x93000)
1146 BUG();
1147} 1071}
1148 1072
1149/* send a reschedule CPI to one CPU by physical CPU number*/ 1073/* send a reschedule CPI to one CPU by physical CPU number*/
@@ -1175,7 +1099,7 @@ int safe_smp_processor_id(void)
1175/* broadcast a halt to all other CPUs */ 1099/* broadcast a halt to all other CPUs */
1176static void voyager_smp_send_stop(void) 1100static void voyager_smp_send_stop(void)
1177{ 1101{
1178 smp_call_function(smp_stop_cpu_function, NULL, 1, 1); 1102 smp_call_function(smp_stop_cpu_function, NULL, 1);
1179} 1103}
1180 1104
1181/* this function is triggered in time.c when a clock tick fires 1105/* this function is triggered in time.c when a clock tick fires
@@ -1862,5 +1786,7 @@ struct smp_ops smp_ops = {
1862 1786
1863 .smp_send_stop = voyager_smp_send_stop, 1787 .smp_send_stop = voyager_smp_send_stop,
1864 .smp_send_reschedule = voyager_smp_send_reschedule, 1788 .smp_send_reschedule = voyager_smp_send_reschedule,
1865 .smp_call_function_mask = voyager_smp_call_function_mask, 1789
1790 .send_call_func_ipi = native_send_call_func_ipi,
1791 .send_call_func_single_ipi = native_send_call_func_single_ipi,
1866}; 1792};