diff options
Diffstat (limited to 'arch/x86/kernel/apic/ipi.c')
| -rw-r--r-- | arch/x86/kernel/apic/ipi.c | 164 |
1 files changed, 164 insertions, 0 deletions
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c new file mode 100644 index 000000000000..dbf5445727a9 --- /dev/null +++ b/arch/x86/kernel/apic/ipi.c | |||
| @@ -0,0 +1,164 @@ | |||
| 1 | #include <linux/cpumask.h> | ||
| 2 | #include <linux/interrupt.h> | ||
| 3 | #include <linux/init.h> | ||
| 4 | |||
| 5 | #include <linux/mm.h> | ||
| 6 | #include <linux/delay.h> | ||
| 7 | #include <linux/spinlock.h> | ||
| 8 | #include <linux/kernel_stat.h> | ||
| 9 | #include <linux/mc146818rtc.h> | ||
| 10 | #include <linux/cache.h> | ||
| 11 | #include <linux/cpu.h> | ||
| 12 | #include <linux/module.h> | ||
| 13 | |||
| 14 | #include <asm/smp.h> | ||
| 15 | #include <asm/mtrr.h> | ||
| 16 | #include <asm/tlbflush.h> | ||
| 17 | #include <asm/mmu_context.h> | ||
| 18 | #include <asm/apic.h> | ||
| 19 | #include <asm/proto.h> | ||
| 20 | #include <asm/ipi.h> | ||
| 21 | |||
| 22 | void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) | ||
| 23 | { | ||
| 24 | unsigned long query_cpu; | ||
| 25 | unsigned long flags; | ||
| 26 | |||
| 27 | /* | ||
| 28 | * Hack. The clustered APIC addressing mode doesn't allow us to send | ||
| 29 | * to an arbitrary mask, so I do a unicast to each CPU instead. | ||
| 30 | * - mbligh | ||
| 31 | */ | ||
| 32 | local_irq_save(flags); | ||
| 33 | for_each_cpu(query_cpu, mask) { | ||
| 34 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, | ||
| 35 | query_cpu), vector, APIC_DEST_PHYSICAL); | ||
| 36 | } | ||
| 37 | local_irq_restore(flags); | ||
| 38 | } | ||
| 39 | |||
| 40 | void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, | ||
| 41 | int vector) | ||
| 42 | { | ||
| 43 | unsigned int this_cpu = smp_processor_id(); | ||
| 44 | unsigned int query_cpu; | ||
| 45 | unsigned long flags; | ||
| 46 | |||
| 47 | /* See Hack comment above */ | ||
| 48 | |||
| 49 | local_irq_save(flags); | ||
| 50 | for_each_cpu(query_cpu, mask) { | ||
| 51 | if (query_cpu == this_cpu) | ||
| 52 | continue; | ||
| 53 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, | ||
| 54 | query_cpu), vector, APIC_DEST_PHYSICAL); | ||
| 55 | } | ||
| 56 | local_irq_restore(flags); | ||
| 57 | } | ||
| 58 | |||
| 59 | void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, | ||
| 60 | int vector) | ||
| 61 | { | ||
| 62 | unsigned long flags; | ||
| 63 | unsigned int query_cpu; | ||
| 64 | |||
| 65 | /* | ||
| 66 | * Hack. The clustered APIC addressing mode doesn't allow us to send | ||
| 67 | * to an arbitrary mask, so I do a unicasts to each CPU instead. This | ||
| 68 | * should be modified to do 1 message per cluster ID - mbligh | ||
| 69 | */ | ||
| 70 | |||
| 71 | local_irq_save(flags); | ||
| 72 | for_each_cpu(query_cpu, mask) | ||
| 73 | __default_send_IPI_dest_field( | ||
| 74 | apic->cpu_to_logical_apicid(query_cpu), vector, | ||
| 75 | apic->dest_logical); | ||
| 76 | local_irq_restore(flags); | ||
| 77 | } | ||
| 78 | |||
| 79 | void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, | ||
| 80 | int vector) | ||
| 81 | { | ||
| 82 | unsigned long flags; | ||
| 83 | unsigned int query_cpu; | ||
| 84 | unsigned int this_cpu = smp_processor_id(); | ||
| 85 | |||
| 86 | /* See Hack comment above */ | ||
| 87 | |||
| 88 | local_irq_save(flags); | ||
| 89 | for_each_cpu(query_cpu, mask) { | ||
| 90 | if (query_cpu == this_cpu) | ||
| 91 | continue; | ||
| 92 | __default_send_IPI_dest_field( | ||
| 93 | apic->cpu_to_logical_apicid(query_cpu), vector, | ||
| 94 | apic->dest_logical); | ||
| 95 | } | ||
| 96 | local_irq_restore(flags); | ||
| 97 | } | ||
| 98 | |||
| 99 | #ifdef CONFIG_X86_32 | ||
| 100 | |||
| 101 | /* | ||
| 102 | * This is only used on smaller machines. | ||
| 103 | */ | ||
| 104 | void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) | ||
| 105 | { | ||
| 106 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
| 107 | unsigned long flags; | ||
| 108 | |||
| 109 | local_irq_save(flags); | ||
| 110 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); | ||
| 111 | __default_send_IPI_dest_field(mask, vector, apic->dest_logical); | ||
| 112 | local_irq_restore(flags); | ||
| 113 | } | ||
| 114 | |||
| 115 | void default_send_IPI_allbutself(int vector) | ||
| 116 | { | ||
| 117 | /* | ||
| 118 | * if there are no other CPUs in the system then we get an APIC send | ||
| 119 | * error if we try to broadcast, thus avoid sending IPIs in this case. | ||
| 120 | */ | ||
| 121 | if (!(num_online_cpus() > 1)) | ||
| 122 | return; | ||
| 123 | |||
| 124 | __default_local_send_IPI_allbutself(vector); | ||
| 125 | } | ||
| 126 | |||
| 127 | void default_send_IPI_all(int vector) | ||
| 128 | { | ||
| 129 | __default_local_send_IPI_all(vector); | ||
| 130 | } | ||
| 131 | |||
| 132 | void default_send_IPI_self(int vector) | ||
| 133 | { | ||
| 134 | __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); | ||
| 135 | } | ||
| 136 | |||
| 137 | /* must come after the send_IPI functions above for inlining */ | ||
| 138 | static int convert_apicid_to_cpu(int apic_id) | ||
| 139 | { | ||
| 140 | int i; | ||
| 141 | |||
| 142 | for_each_possible_cpu(i) { | ||
| 143 | if (per_cpu(x86_cpu_to_apicid, i) == apic_id) | ||
| 144 | return i; | ||
| 145 | } | ||
| 146 | return -1; | ||
| 147 | } | ||
| 148 | |||
| 149 | int safe_smp_processor_id(void) | ||
| 150 | { | ||
| 151 | int apicid, cpuid; | ||
| 152 | |||
| 153 | if (!boot_cpu_has(X86_FEATURE_APIC)) | ||
| 154 | return 0; | ||
| 155 | |||
| 156 | apicid = hard_smp_processor_id(); | ||
| 157 | if (apicid == BAD_APICID) | ||
| 158 | return 0; | ||
| 159 | |||
| 160 | cpuid = convert_apicid_to_cpu(apicid); | ||
| 161 | |||
| 162 | return cpuid >= 0 ? cpuid : 0; | ||
| 163 | } | ||
| 164 | #endif | ||
