diff options
Diffstat (limited to 'arch/i386/mach-voyager/voyager_smp.c')
-rw-r--r-- | arch/i386/mach-voyager/voyager_smp.c | 106 |
1 files changed, 41 insertions, 65 deletions
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 50d9c52070b1..b87f8548e75a 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/pgalloc.h> | 27 | #include <asm/pgalloc.h> |
28 | #include <asm/tlbflush.h> | 28 | #include <asm/tlbflush.h> |
29 | #include <asm/arch_hooks.h> | 29 | #include <asm/arch_hooks.h> |
30 | #include <asm/pda.h> | ||
31 | 30 | ||
32 | /* TLB state -- visible externally, indexed physically */ | 31 | /* TLB state -- visible externally, indexed physically */ |
33 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; | 32 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; |
@@ -422,7 +421,7 @@ find_smp_config(void) | |||
422 | VOYAGER_SUS_IN_CONTROL_PORT); | 421 | VOYAGER_SUS_IN_CONTROL_PORT); |
423 | 422 | ||
424 | current_thread_info()->cpu = boot_cpu_id; | 423 | current_thread_info()->cpu = boot_cpu_id; |
425 | write_pda(cpu_number, boot_cpu_id); | 424 | x86_write_percpu(cpu_number, boot_cpu_id); |
426 | } | 425 | } |
427 | 426 | ||
428 | /* | 427 | /* |
@@ -435,7 +434,7 @@ smp_store_cpu_info(int id) | |||
435 | 434 | ||
436 | *c = boot_cpu_data; | 435 | *c = boot_cpu_data; |
437 | 436 | ||
438 | identify_cpu(c); | 437 | identify_secondary_cpu(c); |
439 | } | 438 | } |
440 | 439 | ||
441 | /* set up the trampoline and return the physical address of the code */ | 440 | /* set up the trampoline and return the physical address of the code */ |
@@ -459,7 +458,7 @@ start_secondary(void *unused) | |||
459 | /* external functions not defined in the headers */ | 458 | /* external functions not defined in the headers */ |
460 | extern void calibrate_delay(void); | 459 | extern void calibrate_delay(void); |
461 | 460 | ||
462 | secondary_cpu_init(); | 461 | cpu_init(); |
463 | 462 | ||
464 | /* OK, we're in the routine */ | 463 | /* OK, we're in the routine */ |
465 | ack_CPI(VIC_CPU_BOOT_CPI); | 464 | ack_CPI(VIC_CPU_BOOT_CPI); |
@@ -572,7 +571,9 @@ do_boot_cpu(__u8 cpu) | |||
572 | /* init_tasks (in sched.c) is indexed logically */ | 571 | /* init_tasks (in sched.c) is indexed logically */ |
573 | stack_start.esp = (void *) idle->thread.esp; | 572 | stack_start.esp = (void *) idle->thread.esp; |
574 | 573 | ||
575 | init_gdt(cpu, idle); | 574 | init_gdt(cpu); |
575 | per_cpu(current_task, cpu) = idle; | ||
576 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | ||
576 | irq_ctx_init(cpu); | 577 | irq_ctx_init(cpu); |
577 | 578 | ||
578 | /* Note: Don't modify initial ss override */ | 579 | /* Note: Don't modify initial ss override */ |
@@ -859,8 +860,8 @@ smp_invalidate_interrupt(void) | |||
859 | 860 | ||
860 | /* This routine is called with a physical cpu mask */ | 861 | /* This routine is called with a physical cpu mask */ |
861 | static void | 862 | static void |
862 | flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, | 863 | voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, |
863 | unsigned long va) | 864 | unsigned long va) |
864 | { | 865 | { |
865 | int stuck = 50000; | 866 | int stuck = 50000; |
866 | 867 | ||
@@ -912,7 +913,7 @@ flush_tlb_current_task(void) | |||
912 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | 913 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); |
913 | local_flush_tlb(); | 914 | local_flush_tlb(); |
914 | if (cpu_mask) | 915 | if (cpu_mask) |
915 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 916 | voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); |
916 | 917 | ||
917 | preempt_enable(); | 918 | preempt_enable(); |
918 | } | 919 | } |
@@ -934,7 +935,7 @@ flush_tlb_mm (struct mm_struct * mm) | |||
934 | leave_mm(smp_processor_id()); | 935 | leave_mm(smp_processor_id()); |
935 | } | 936 | } |
936 | if (cpu_mask) | 937 | if (cpu_mask) |
937 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 938 | voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); |
938 | 939 | ||
939 | preempt_enable(); | 940 | preempt_enable(); |
940 | } | 941 | } |
@@ -955,7 +956,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |||
955 | } | 956 | } |
956 | 957 | ||
957 | if (cpu_mask) | 958 | if (cpu_mask) |
958 | flush_tlb_others(cpu_mask, mm, va); | 959 | voyager_flush_tlb_others(cpu_mask, mm, va); |
959 | 960 | ||
960 | preempt_enable(); | 961 | preempt_enable(); |
961 | } | 962 | } |
@@ -1044,10 +1045,12 @@ smp_call_function_interrupt(void) | |||
1044 | } | 1045 | } |
1045 | 1046 | ||
1046 | static int | 1047 | static int |
1047 | __smp_call_function_mask (void (*func) (void *info), void *info, int retry, | 1048 | voyager_smp_call_function_mask (cpumask_t cpumask, |
1048 | int wait, __u32 mask) | 1049 | void (*func) (void *info), void *info, |
1050 | int wait) | ||
1049 | { | 1051 | { |
1050 | struct call_data_struct data; | 1052 | struct call_data_struct data; |
1053 | u32 mask = cpus_addr(cpumask)[0]; | ||
1051 | 1054 | ||
1052 | mask &= ~(1<<smp_processor_id()); | 1055 | mask &= ~(1<<smp_processor_id()); |
1053 | 1056 | ||
@@ -1083,47 +1086,6 @@ __smp_call_function_mask (void (*func) (void *info), void *info, int retry, | |||
1083 | return 0; | 1086 | return 0; |
1084 | } | 1087 | } |
1085 | 1088 | ||
1086 | /* Call this function on all CPUs using the function_interrupt above | ||
1087 | <func> The function to run. This must be fast and non-blocking. | ||
1088 | <info> An arbitrary pointer to pass to the function. | ||
1089 | <retry> If true, keep retrying until ready. | ||
1090 | <wait> If true, wait until function has completed on other CPUs. | ||
1091 | [RETURNS] 0 on success, else a negative status code. Does not return until | ||
1092 | remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
1093 | */ | ||
1094 | int | ||
1095 | smp_call_function(void (*func) (void *info), void *info, int retry, | ||
1096 | int wait) | ||
1097 | { | ||
1098 | __u32 mask = cpus_addr(cpu_online_map)[0]; | ||
1099 | |||
1100 | return __smp_call_function_mask(func, info, retry, wait, mask); | ||
1101 | } | ||
1102 | EXPORT_SYMBOL(smp_call_function); | ||
1103 | |||
1104 | /* | ||
1105 | * smp_call_function_single - Run a function on another CPU | ||
1106 | * @func: The function to run. This must be fast and non-blocking. | ||
1107 | * @info: An arbitrary pointer to pass to the function. | ||
1108 | * @nonatomic: Currently unused. | ||
1109 | * @wait: If true, wait until function has completed on other CPUs. | ||
1110 | * | ||
1111 | * Retrurns 0 on success, else a negative status code. | ||
1112 | * | ||
1113 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
1114 | * or is or has executed. | ||
1115 | */ | ||
1116 | |||
1117 | int | ||
1118 | smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
1119 | int nonatomic, int wait) | ||
1120 | { | ||
1121 | __u32 mask = 1 << cpu; | ||
1122 | |||
1123 | return __smp_call_function_mask(func, info, nonatomic, wait, mask); | ||
1124 | } | ||
1125 | EXPORT_SYMBOL(smp_call_function_single); | ||
1126 | |||
1127 | /* Sorry about the name. In an APIC based system, the APICs | 1089 | /* Sorry about the name. In an APIC based system, the APICs |
1128 | * themselves are programmed to send a timer interrupt. This is used | 1090 | * themselves are programmed to send a timer interrupt. This is used |
1129 | * by linux to reschedule the processor. Voyager doesn't have this, | 1091 | * by linux to reschedule the processor. Voyager doesn't have this, |
@@ -1237,8 +1199,8 @@ smp_alloc_memory(void) | |||
1237 | } | 1199 | } |
1238 | 1200 | ||
1239 | /* send a reschedule CPI to one CPU by physical CPU number*/ | 1201 | /* send a reschedule CPI to one CPU by physical CPU number*/ |
1240 | void | 1202 | static void |
1241 | smp_send_reschedule(int cpu) | 1203 | voyager_smp_send_reschedule(int cpu) |
1242 | { | 1204 | { |
1243 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); | 1205 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); |
1244 | } | 1206 | } |
@@ -1267,8 +1229,8 @@ safe_smp_processor_id(void) | |||
1267 | } | 1229 | } |
1268 | 1230 | ||
1269 | /* broadcast a halt to all other CPUs */ | 1231 | /* broadcast a halt to all other CPUs */ |
1270 | void | 1232 | static void |
1271 | smp_send_stop(void) | 1233 | voyager_smp_send_stop(void) |
1272 | { | 1234 | { |
1273 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | 1235 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); |
1274 | } | 1236 | } |
@@ -1930,23 +1892,26 @@ smp_voyager_power_off(void *dummy) | |||
1930 | smp_stop_cpu_function(NULL); | 1892 | smp_stop_cpu_function(NULL); |
1931 | } | 1893 | } |
1932 | 1894 | ||
1933 | void __init | 1895 | static void __init |
1934 | smp_prepare_cpus(unsigned int max_cpus) | 1896 | voyager_smp_prepare_cpus(unsigned int max_cpus) |
1935 | { | 1897 | { |
1936 | /* FIXME: ignore max_cpus for now */ | 1898 | /* FIXME: ignore max_cpus for now */ |
1937 | smp_boot_cpus(); | 1899 | smp_boot_cpus(); |
1938 | } | 1900 | } |
1939 | 1901 | ||
1940 | void __devinit smp_prepare_boot_cpu(void) | 1902 | static void __devinit voyager_smp_prepare_boot_cpu(void) |
1941 | { | 1903 | { |
1904 | init_gdt(smp_processor_id()); | ||
1905 | switch_to_new_gdt(); | ||
1906 | |||
1942 | cpu_set(smp_processor_id(), cpu_online_map); | 1907 | cpu_set(smp_processor_id(), cpu_online_map); |
1943 | cpu_set(smp_processor_id(), cpu_callout_map); | 1908 | cpu_set(smp_processor_id(), cpu_callout_map); |
1944 | cpu_set(smp_processor_id(), cpu_possible_map); | 1909 | cpu_set(smp_processor_id(), cpu_possible_map); |
1945 | cpu_set(smp_processor_id(), cpu_present_map); | 1910 | cpu_set(smp_processor_id(), cpu_present_map); |
1946 | } | 1911 | } |
1947 | 1912 | ||
1948 | int __devinit | 1913 | static int __devinit |
1949 | __cpu_up(unsigned int cpu) | 1914 | voyager_cpu_up(unsigned int cpu) |
1950 | { | 1915 | { |
1951 | /* This only works at boot for x86. See "rewrite" above. */ | 1916 | /* This only works at boot for x86. See "rewrite" above. */ |
1952 | if (cpu_isset(cpu, smp_commenced_mask)) | 1917 | if (cpu_isset(cpu, smp_commenced_mask)) |
@@ -1962,8 +1927,8 @@ __cpu_up(unsigned int cpu) | |||
1962 | return 0; | 1927 | return 0; |
1963 | } | 1928 | } |
1964 | 1929 | ||
1965 | void __init | 1930 | static void __init |
1966 | smp_cpus_done(unsigned int max_cpus) | 1931 | voyager_smp_cpus_done(unsigned int max_cpus) |
1967 | { | 1932 | { |
1968 | zap_low_mappings(); | 1933 | zap_low_mappings(); |
1969 | } | 1934 | } |
@@ -1972,5 +1937,16 @@ void __init | |||
1972 | smp_setup_processor_id(void) | 1937 | smp_setup_processor_id(void) |
1973 | { | 1938 | { |
1974 | current_thread_info()->cpu = hard_smp_processor_id(); | 1939 | current_thread_info()->cpu = hard_smp_processor_id(); |
1975 | write_pda(cpu_number, hard_smp_processor_id()); | 1940 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
1976 | } | 1941 | } |
1942 | |||
1943 | struct smp_ops smp_ops = { | ||
1944 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, | ||
1945 | .smp_prepare_cpus = voyager_smp_prepare_cpus, | ||
1946 | .cpu_up = voyager_cpu_up, | ||
1947 | .smp_cpus_done = voyager_smp_cpus_done, | ||
1948 | |||
1949 | .smp_send_stop = voyager_smp_send_stop, | ||
1950 | .smp_send_reschedule = voyager_smp_send_reschedule, | ||
1951 | .smp_call_function_mask = voyager_smp_call_function_mask, | ||
1952 | }; | ||