diff options
author | Milton Miller <miltonm@bga.com> | 2007-12-13 23:52:09 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-12-20 00:15:34 -0500 |
commit | d7cf0edb8f7d53bfc8b5e40893415d48656c0399 (patch) | |
tree | b868deafc979c1e93b0aa17ec22c0be268e66e7d /arch/powerpc/platforms | |
parent | 7e1961ff4954d6ab627d9b606785afdcd0dda84b (diff) |
[POWERPC] Push down or eliminate smp_processor_id calls in xics code
The per-processor interrupt request register and current processor
priority register are only accessed on the current cpu. In fact the
hypervisor doesn't even let us choose which cpu's registers to access.
The only function to use cpu twice is xics_migrate_irqs_away, not a fast
path. But we can cache the result of get_hard_processor_id() instead of
calling get_hard_smp_processor_id(cpu) in a loop across the call to rtas.
Years ago the irq code passed smp_processor_id into get_irq, I thought
we might initialize the CPPR third party at boot as an extra measure of
saftey, and it made the code symmetric with the qirr (queued interrupt
for software generated interrupts), but now it is just extra and
sometimes unneeded work to pass it down.
Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 59 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.h | 3 |
2 files changed, 29 insertions, 33 deletions
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 66e7d68ffeb1..8f8dd9c3ca6b 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -87,19 +87,25 @@ static int ibm_int_off; | |||
87 | /* Direct HW low level accessors */ | 87 | /* Direct HW low level accessors */ |
88 | 88 | ||
89 | 89 | ||
90 | static inline unsigned int direct_xirr_info_get(int n_cpu) | 90 | static inline unsigned int direct_xirr_info_get(void) |
91 | { | 91 | { |
92 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); | 92 | int cpu = smp_processor_id(); |
93 | |||
94 | return in_be32(&xics_per_cpu[cpu]->xirr.word); | ||
93 | } | 95 | } |
94 | 96 | ||
95 | static inline void direct_xirr_info_set(int n_cpu, int value) | 97 | static inline void direct_xirr_info_set(int value) |
96 | { | 98 | { |
97 | out_be32(&xics_per_cpu[n_cpu]->xirr.word, value); | 99 | int cpu = smp_processor_id(); |
100 | |||
101 | out_be32(&xics_per_cpu[cpu]->xirr.word, value); | ||
98 | } | 102 | } |
99 | 103 | ||
100 | static inline void direct_cppr_info(int n_cpu, u8 value) | 104 | static inline void direct_cppr_info(u8 value) |
101 | { | 105 | { |
102 | out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value); | 106 | int cpu = smp_processor_id(); |
107 | |||
108 | out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value); | ||
103 | } | 109 | } |
104 | 110 | ||
105 | static inline void direct_qirr_info(int n_cpu, u8 value) | 111 | static inline void direct_qirr_info(int n_cpu, u8 value) |
@@ -111,7 +117,7 @@ static inline void direct_qirr_info(int n_cpu, u8 value) | |||
111 | /* LPAR low level accessors */ | 117 | /* LPAR low level accessors */ |
112 | 118 | ||
113 | 119 | ||
114 | static inline unsigned int lpar_xirr_info_get(int n_cpu) | 120 | static inline unsigned int lpar_xirr_info_get(void) |
115 | { | 121 | { |
116 | unsigned long lpar_rc; | 122 | unsigned long lpar_rc; |
117 | unsigned long return_value; | 123 | unsigned long return_value; |
@@ -122,7 +128,7 @@ static inline unsigned int lpar_xirr_info_get(int n_cpu) | |||
122 | return (unsigned int)return_value; | 128 | return (unsigned int)return_value; |
123 | } | 129 | } |
124 | 130 | ||
125 | static inline void lpar_xirr_info_set(int n_cpu, int value) | 131 | static inline void lpar_xirr_info_set(int value) |
126 | { | 132 | { |
127 | unsigned long lpar_rc; | 133 | unsigned long lpar_rc; |
128 | unsigned long val64 = value & 0xffffffff; | 134 | unsigned long val64 = value & 0xffffffff; |
@@ -133,7 +139,7 @@ static inline void lpar_xirr_info_set(int n_cpu, int value) | |||
133 | val64); | 139 | val64); |
134 | } | 140 | } |
135 | 141 | ||
136 | static inline void lpar_cppr_info(int n_cpu, u8 value) | 142 | static inline void lpar_cppr_info(u8 value) |
137 | { | 143 | { |
138 | unsigned long lpar_rc; | 144 | unsigned long lpar_rc; |
139 | 145 | ||
@@ -275,21 +281,19 @@ static unsigned int xics_startup(unsigned int virq) | |||
275 | 281 | ||
276 | static void xics_eoi_direct(unsigned int virq) | 282 | static void xics_eoi_direct(unsigned int virq) |
277 | { | 283 | { |
278 | int cpu = smp_processor_id(); | ||
279 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | 284 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; |
280 | 285 | ||
281 | iosync(); | 286 | iosync(); |
282 | direct_xirr_info_set(cpu, (0xff << 24) | irq); | 287 | direct_xirr_info_set((0xff << 24) | irq); |
283 | } | 288 | } |
284 | 289 | ||
285 | 290 | ||
286 | static void xics_eoi_lpar(unsigned int virq) | 291 | static void xics_eoi_lpar(unsigned int virq) |
287 | { | 292 | { |
288 | int cpu = smp_processor_id(); | ||
289 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | 293 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; |
290 | 294 | ||
291 | iosync(); | 295 | iosync(); |
292 | lpar_xirr_info_set(cpu, (0xff << 24) | irq); | 296 | lpar_xirr_info_set((0xff << 24) | irq); |
293 | } | 297 | } |
294 | 298 | ||
295 | static inline unsigned int xics_remap_irq(unsigned int vec) | 299 | static inline unsigned int xics_remap_irq(unsigned int vec) |
@@ -312,16 +316,12 @@ static inline unsigned int xics_remap_irq(unsigned int vec) | |||
312 | 316 | ||
313 | static unsigned int xics_get_irq_direct(void) | 317 | static unsigned int xics_get_irq_direct(void) |
314 | { | 318 | { |
315 | unsigned int cpu = smp_processor_id(); | 319 | return xics_remap_irq(direct_xirr_info_get()); |
316 | |||
317 | return xics_remap_irq(direct_xirr_info_get(cpu)); | ||
318 | } | 320 | } |
319 | 321 | ||
320 | static unsigned int xics_get_irq_lpar(void) | 322 | static unsigned int xics_get_irq_lpar(void) |
321 | { | 323 | { |
322 | unsigned int cpu = smp_processor_id(); | 324 | return xics_remap_irq(lpar_xirr_info_get()); |
323 | |||
324 | return xics_remap_irq(lpar_xirr_info_get(cpu)); | ||
325 | } | 325 | } |
326 | 326 | ||
327 | #ifdef CONFIG_SMP | 327 | #ifdef CONFIG_SMP |
@@ -387,12 +387,12 @@ void xics_cause_IPI(int cpu) | |||
387 | 387 | ||
388 | #endif /* CONFIG_SMP */ | 388 | #endif /* CONFIG_SMP */ |
389 | 389 | ||
390 | static void xics_set_cpu_priority(int cpu, unsigned char cppr) | 390 | static void xics_set_cpu_priority(unsigned char cppr) |
391 | { | 391 | { |
392 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 392 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
393 | lpar_cppr_info(cpu, cppr); | 393 | lpar_cppr_info(cppr); |
394 | else | 394 | else |
395 | direct_cppr_info(cpu, cppr); | 395 | direct_cppr_info(cppr); |
396 | iosync(); | 396 | iosync(); |
397 | } | 397 | } |
398 | 398 | ||
@@ -440,9 +440,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | |||
440 | 440 | ||
441 | void xics_setup_cpu(void) | 441 | void xics_setup_cpu(void) |
442 | { | 442 | { |
443 | int cpu = smp_processor_id(); | 443 | xics_set_cpu_priority(0xff); |
444 | |||
445 | xics_set_cpu_priority(cpu, 0xff); | ||
446 | 444 | ||
447 | /* | 445 | /* |
448 | * Put the calling processor into the GIQ. This is really only | 446 | * Put the calling processor into the GIQ. This is really only |
@@ -783,7 +781,7 @@ void xics_teardown_cpu(int secondary) | |||
783 | unsigned int ipi; | 781 | unsigned int ipi; |
784 | struct irq_desc *desc; | 782 | struct irq_desc *desc; |
785 | 783 | ||
786 | xics_set_cpu_priority(cpu, 0); | 784 | xics_set_cpu_priority(0); |
787 | 785 | ||
788 | /* | 786 | /* |
789 | * Clear IPI | 787 | * Clear IPI |
@@ -824,10 +822,11 @@ void xics_teardown_cpu(int secondary) | |||
824 | void xics_migrate_irqs_away(void) | 822 | void xics_migrate_irqs_away(void) |
825 | { | 823 | { |
826 | int status; | 824 | int status; |
827 | unsigned int irq, virq, cpu = smp_processor_id(); | 825 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); |
826 | unsigned int irq, virq; | ||
828 | 827 | ||
829 | /* Reject any interrupt that was queued to us... */ | 828 | /* Reject any interrupt that was queued to us... */ |
830 | xics_set_cpu_priority(cpu, 0); | 829 | xics_set_cpu_priority(0); |
831 | 830 | ||
832 | /* remove ourselves from the global interrupt queue */ | 831 | /* remove ourselves from the global interrupt queue */ |
833 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, | 832 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, |
@@ -835,7 +834,7 @@ void xics_migrate_irqs_away(void) | |||
835 | WARN_ON(status < 0); | 834 | WARN_ON(status < 0); |
836 | 835 | ||
837 | /* Allow IPIs again... */ | 836 | /* Allow IPIs again... */ |
838 | xics_set_cpu_priority(cpu, DEFAULT_PRIORITY); | 837 | xics_set_cpu_priority(DEFAULT_PRIORITY); |
839 | 838 | ||
840 | for_each_irq(virq) { | 839 | for_each_irq(virq) { |
841 | struct irq_desc *desc; | 840 | struct irq_desc *desc; |
@@ -874,7 +873,7 @@ void xics_migrate_irqs_away(void) | |||
874 | * The irq has to be migrated only in the single cpu | 873 | * The irq has to be migrated only in the single cpu |
875 | * case. | 874 | * case. |
876 | */ | 875 | */ |
877 | if (xics_status[0] != get_hard_smp_processor_id(cpu)) | 876 | if (xics_status[0] != hw_cpu) |
878 | goto unlock; | 877 | goto unlock; |
879 | 878 | ||
880 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", | 879 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", |
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h index db0ec3ba3ae2..9ffd809d29e2 100644 --- a/arch/powerpc/platforms/pseries/xics.h +++ b/arch/powerpc/platforms/pseries/xics.h | |||
@@ -21,9 +21,6 @@ extern void xics_cause_IPI(int cpu); | |||
21 | extern void xics_request_IPIs(void); | 21 | extern void xics_request_IPIs(void); |
22 | extern void xics_migrate_irqs_away(void); | 22 | extern void xics_migrate_irqs_away(void); |
23 | 23 | ||
24 | /* first argument is ignored for now*/ | ||
25 | void pSeriesLP_cppr_info(int n_cpu, u8 value); | ||
26 | |||
27 | struct xics_ipi_struct { | 24 | struct xics_ipi_struct { |
28 | volatile unsigned long value; | 25 | volatile unsigned long value; |
29 | } ____cacheline_aligned; | 26 | } ____cacheline_aligned; |