diff options
Diffstat (limited to 'arch/powerpc/sysdev/mpic.c')
| -rw-r--r-- | arch/powerpc/sysdev/mpic.c | 927 |
1 files changed, 927 insertions, 0 deletions
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c new file mode 100644 index 000000000000..105f05341a41 --- /dev/null +++ b/arch/powerpc/sysdev/mpic.c | |||
| @@ -0,0 +1,927 @@ | |||
| 1 | /* | ||
| 2 | * arch/powerpc/kernel/mpic.c | ||
| 3 | * | ||
| 4 | * Driver for interrupt controllers following the OpenPIC standard, the | ||
| 5 | * common implementation beeing IBM's MPIC. This driver also can deal | ||
| 6 | * with various broken implementations of this HW. | ||
| 7 | * | ||
| 8 | * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. | ||
| 9 | * | ||
| 10 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 11 | * License. See the file COPYING in the main directory of this archive | ||
| 12 | * for more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #undef DEBUG | ||
| 16 | |||
| 17 | #include <linux/config.h> | ||
| 18 | #include <linux/types.h> | ||
| 19 | #include <linux/kernel.h> | ||
| 20 | #include <linux/init.h> | ||
| 21 | #include <linux/irq.h> | ||
| 22 | #include <linux/smp.h> | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | #include <linux/bootmem.h> | ||
| 25 | #include <linux/spinlock.h> | ||
| 26 | #include <linux/pci.h> | ||
| 27 | |||
| 28 | #include <asm/ptrace.h> | ||
| 29 | #include <asm/signal.h> | ||
| 30 | #include <asm/io.h> | ||
| 31 | #include <asm/pgtable.h> | ||
| 32 | #include <asm/irq.h> | ||
| 33 | #include <asm/machdep.h> | ||
| 34 | #include <asm/mpic.h> | ||
| 35 | #include <asm/smp.h> | ||
| 36 | |||
| 37 | #ifdef DEBUG | ||
| 38 | #define DBG(fmt...) printk(fmt) | ||
| 39 | #else | ||
| 40 | #define DBG(fmt...) | ||
| 41 | #endif | ||
| 42 | |||
| 43 | static struct mpic *mpics; | ||
| 44 | static struct mpic *mpic_primary; | ||
| 45 | static DEFINE_SPINLOCK(mpic_lock); | ||
| 46 | |||
| 47 | #ifdef CONFIG_PPC32 /* XXX for now */ | ||
| 48 | #define distribute_irqs CONFIG_IRQ_ALL_CPUS | ||
| 49 | #endif | ||
| 50 | |||
| 51 | /* | ||
| 52 | * Register accessor functions | ||
| 53 | */ | ||
| 54 | |||
| 55 | |||
| 56 | static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base, | ||
| 57 | unsigned int reg) | ||
| 58 | { | ||
| 59 | if (be) | ||
| 60 | return in_be32(base + (reg >> 2)); | ||
| 61 | else | ||
| 62 | return in_le32(base + (reg >> 2)); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base, | ||
| 66 | unsigned int reg, u32 value) | ||
| 67 | { | ||
| 68 | if (be) | ||
| 69 | out_be32(base + (reg >> 2), value); | ||
| 70 | else | ||
| 71 | out_le32(base + (reg >> 2), value); | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) | ||
| 75 | { | ||
| 76 | unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0; | ||
| 77 | unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); | ||
| 78 | |||
| 79 | if (mpic->flags & MPIC_BROKEN_IPI) | ||
| 80 | be = !be; | ||
| 81 | return _mpic_read(be, mpic->gregs, offset); | ||
| 82 | } | ||
| 83 | |||
| 84 | static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) | ||
| 85 | { | ||
| 86 | unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); | ||
| 87 | |||
| 88 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value); | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) | ||
| 92 | { | ||
| 93 | unsigned int cpu = 0; | ||
| 94 | |||
| 95 | if (mpic->flags & MPIC_PRIMARY) | ||
| 96 | cpu = hard_smp_processor_id(); | ||
| 97 | |||
| 98 | return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg); | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) | ||
| 102 | { | ||
| 103 | unsigned int cpu = 0; | ||
| 104 | |||
| 105 | if (mpic->flags & MPIC_PRIMARY) | ||
| 106 | cpu = hard_smp_processor_id(); | ||
| 107 | |||
| 108 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) | ||
| 112 | { | ||
| 113 | unsigned int isu = src_no >> mpic->isu_shift; | ||
| 114 | unsigned int idx = src_no & mpic->isu_mask; | ||
| 115 | |||
| 116 | return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], | ||
| 117 | reg + (idx * MPIC_IRQ_STRIDE)); | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, | ||
| 121 | unsigned int reg, u32 value) | ||
| 122 | { | ||
| 123 | unsigned int isu = src_no >> mpic->isu_shift; | ||
| 124 | unsigned int idx = src_no & mpic->isu_mask; | ||
| 125 | |||
| 126 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], | ||
| 127 | reg + (idx * MPIC_IRQ_STRIDE), value); | ||
| 128 | } | ||
| 129 | |||
| 130 | #define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r)) | ||
| 131 | #define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v)) | ||
| 132 | #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) | ||
| 133 | #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) | ||
| 134 | #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) | ||
| 135 | #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) | ||
| 136 | #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) | ||
| 137 | #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) | ||
| 138 | |||
| 139 | |||
| 140 | /* | ||
| 141 | * Low level utility functions | ||
| 142 | */ | ||
| 143 | |||
| 144 | |||
| 145 | |||
| 146 | /* Check if we have one of those nice broken MPICs with a flipped endian on | ||
| 147 | * reads from IPI registers | ||
| 148 | */ | ||
| 149 | static void __init mpic_test_broken_ipi(struct mpic *mpic) | ||
| 150 | { | ||
| 151 | u32 r; | ||
| 152 | |||
| 153 | mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK); | ||
| 154 | r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0); | ||
| 155 | |||
| 156 | if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { | ||
| 157 | printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); | ||
| 158 | mpic->flags |= MPIC_BROKEN_IPI; | ||
| 159 | } | ||
| 160 | } | ||
| 161 | |||
| 162 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
| 163 | |||
| 164 | /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) | ||
| 165 | * to force the edge setting on the MPIC and do the ack workaround. | ||
| 166 | */ | ||
| 167 | static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no) | ||
| 168 | { | ||
| 169 | if (source_no >= 128 || !mpic->fixups) | ||
| 170 | return 0; | ||
| 171 | return mpic->fixups[source_no].base != NULL; | ||
| 172 | } | ||
| 173 | |||
| 174 | static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no) | ||
| 175 | { | ||
| 176 | struct mpic_irq_fixup *fixup = &mpic->fixups[source_no]; | ||
| 177 | u32 tmp; | ||
| 178 | |||
| 179 | spin_lock(&mpic->fixup_lock); | ||
| 180 | writeb(0x11 + 2 * fixup->irq, fixup->base); | ||
| 181 | tmp = readl(fixup->base + 2); | ||
| 182 | writel(tmp | 0x80000000ul, fixup->base + 2); | ||
| 183 | /* config writes shouldn't be posted but let's be safe ... */ | ||
| 184 | (void)readl(fixup->base + 2); | ||
| 185 | spin_unlock(&mpic->fixup_lock); | ||
| 186 | } | ||
| 187 | |||
| 188 | |||
| 189 | static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase) | ||
| 190 | { | ||
| 191 | int i, irq; | ||
| 192 | u32 tmp; | ||
| 193 | |||
| 194 | printk(KERN_INFO "mpic: - Workarounds on AMD 8111 @ %p\n", devbase); | ||
| 195 | |||
| 196 | for (i=0; i < 24; i++) { | ||
| 197 | writeb(0x10 + 2*i, devbase + 0xf2); | ||
| 198 | tmp = readl(devbase + 0xf4); | ||
| 199 | if ((tmp & 0x1) || !(tmp & 0x20)) | ||
| 200 | continue; | ||
| 201 | irq = (tmp >> 16) & 0xff; | ||
| 202 | mpic->fixups[irq].irq = i; | ||
| 203 | mpic->fixups[irq].base = devbase + 0xf2; | ||
| 204 | } | ||
| 205 | } | ||
| 206 | |||
| 207 | static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase) | ||
| 208 | { | ||
| 209 | int i, irq; | ||
| 210 | u32 tmp; | ||
| 211 | |||
| 212 | printk(KERN_INFO "mpic: - Workarounds on AMD 8131 @ %p\n", devbase); | ||
| 213 | |||
| 214 | for (i=0; i < 4; i++) { | ||
| 215 | writeb(0x10 + 2*i, devbase + 0xba); | ||
| 216 | tmp = readl(devbase + 0xbc); | ||
| 217 | if ((tmp & 0x1) || !(tmp & 0x20)) | ||
| 218 | continue; | ||
| 219 | irq = (tmp >> 16) & 0xff; | ||
| 220 | mpic->fixups[irq].irq = i; | ||
| 221 | mpic->fixups[irq].base = devbase + 0xba; | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | static void __init mpic_scan_ioapics(struct mpic *mpic) | ||
| 226 | { | ||
| 227 | unsigned int devfn; | ||
| 228 | u8 __iomem *cfgspace; | ||
| 229 | |||
| 230 | printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n"); | ||
| 231 | |||
| 232 | /* Allocate fixups array */ | ||
| 233 | mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup)); | ||
| 234 | BUG_ON(mpic->fixups == NULL); | ||
| 235 | memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup)); | ||
| 236 | |||
| 237 | /* Init spinlock */ | ||
| 238 | spin_lock_init(&mpic->fixup_lock); | ||
| 239 | |||
| 240 | /* Map u3 config space. We assume all IO-APICs are on the primary bus | ||
| 241 | * and slot will never be above "0xf" so we only need to map 32k | ||
| 242 | */ | ||
| 243 | cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000); | ||
| 244 | BUG_ON(cfgspace == NULL); | ||
| 245 | |||
| 246 | /* Now we scan all slots. We do a very quick scan, we read the header type, | ||
| 247 | * vendor ID and device ID only, that's plenty enough | ||
| 248 | */ | ||
| 249 | for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) { | ||
| 250 | u8 __iomem *devbase = cfgspace + (devfn << 8); | ||
| 251 | u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); | ||
| 252 | u32 l = readl(devbase + PCI_VENDOR_ID); | ||
| 253 | u16 vendor_id, device_id; | ||
| 254 | int multifunc = 0; | ||
| 255 | |||
| 256 | DBG("devfn %x, l: %x\n", devfn, l); | ||
| 257 | |||
| 258 | /* If no device, skip */ | ||
| 259 | if (l == 0xffffffff || l == 0x00000000 || | ||
| 260 | l == 0x0000ffff || l == 0xffff0000) | ||
| 261 | goto next; | ||
| 262 | |||
| 263 | /* Check if it's a multifunction device (only really used | ||
| 264 | * to function 0 though | ||
| 265 | */ | ||
| 266 | multifunc = !!(hdr_type & 0x80); | ||
| 267 | vendor_id = l & 0xffff; | ||
| 268 | device_id = (l >> 16) & 0xffff; | ||
| 269 | |||
| 270 | /* If a known device, go to fixup setup code */ | ||
| 271 | if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460) | ||
| 272 | mpic_amd8111_read_irq(mpic, devbase); | ||
| 273 | if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450) | ||
| 274 | mpic_amd8131_read_irq(mpic, devbase); | ||
| 275 | next: | ||
| 276 | /* next device, if function 0 */ | ||
| 277 | if ((PCI_FUNC(devfn) == 0) && !multifunc) | ||
| 278 | devfn += 7; | ||
| 279 | } | ||
| 280 | } | ||
| 281 | |||
| 282 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
| 283 | |||
| 284 | |||
| 285 | /* Find an mpic associated with a given linux interrupt */ | ||
| 286 | static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) | ||
| 287 | { | ||
| 288 | struct mpic *mpic = mpics; | ||
| 289 | |||
| 290 | while(mpic) { | ||
| 291 | /* search IPIs first since they may override the main interrupts */ | ||
| 292 | if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) { | ||
| 293 | if (is_ipi) | ||
| 294 | *is_ipi = 1; | ||
| 295 | return mpic; | ||
| 296 | } | ||
| 297 | if (irq >= mpic->irq_offset && | ||
| 298 | irq < (mpic->irq_offset + mpic->irq_count)) { | ||
| 299 | if (is_ipi) | ||
| 300 | *is_ipi = 0; | ||
| 301 | return mpic; | ||
| 302 | } | ||
| 303 | mpic = mpic -> next; | ||
| 304 | } | ||
| 305 | return NULL; | ||
| 306 | } | ||
| 307 | |||
| 308 | /* Convert a cpu mask from logical to physical cpu numbers. */ | ||
| 309 | static inline u32 mpic_physmask(u32 cpumask) | ||
| 310 | { | ||
| 311 | int i; | ||
| 312 | u32 mask = 0; | ||
| 313 | |||
| 314 | for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) | ||
| 315 | mask |= (cpumask & 1) << get_hard_smp_processor_id(i); | ||
| 316 | return mask; | ||
| 317 | } | ||
| 318 | |||
| 319 | #ifdef CONFIG_SMP | ||
| 320 | /* Get the mpic structure from the IPI number */ | ||
| 321 | static inline struct mpic * mpic_from_ipi(unsigned int ipi) | ||
| 322 | { | ||
| 323 | return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi); | ||
| 324 | } | ||
| 325 | #endif | ||
| 326 | |||
| 327 | /* Get the mpic structure from the irq number */ | ||
| 328 | static inline struct mpic * mpic_from_irq(unsigned int irq) | ||
| 329 | { | ||
| 330 | return container_of(irq_desc[irq].handler, struct mpic, hc_irq); | ||
| 331 | } | ||
| 332 | |||
| 333 | /* Send an EOI */ | ||
| 334 | static inline void mpic_eoi(struct mpic *mpic) | ||
| 335 | { | ||
| 336 | mpic_cpu_write(MPIC_CPU_EOI, 0); | ||
| 337 | (void)mpic_cpu_read(MPIC_CPU_WHOAMI); | ||
| 338 | } | ||
| 339 | |||
| 340 | #ifdef CONFIG_SMP | ||
| 341 | static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | ||
| 342 | { | ||
| 343 | struct mpic *mpic = dev_id; | ||
| 344 | |||
| 345 | smp_message_recv(irq - mpic->ipi_offset, regs); | ||
| 346 | return IRQ_HANDLED; | ||
| 347 | } | ||
| 348 | #endif /* CONFIG_SMP */ | ||
| 349 | |||
| 350 | /* | ||
| 351 | * Linux descriptor level callbacks | ||
| 352 | */ | ||
| 353 | |||
| 354 | |||
| 355 | static void mpic_enable_irq(unsigned int irq) | ||
| 356 | { | ||
| 357 | unsigned int loops = 100000; | ||
| 358 | struct mpic *mpic = mpic_from_irq(irq); | ||
| 359 | unsigned int src = irq - mpic->irq_offset; | ||
| 360 | |||
| 361 | DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); | ||
| 362 | |||
| 363 | mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, | ||
| 364 | mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK); | ||
| 365 | |||
| 366 | /* make sure mask gets to controller before we return to user */ | ||
| 367 | do { | ||
| 368 | if (!loops--) { | ||
| 369 | printk(KERN_ERR "mpic_enable_irq timeout\n"); | ||
| 370 | break; | ||
| 371 | } | ||
| 372 | } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); | ||
| 373 | } | ||
| 374 | |||
| 375 | static void mpic_disable_irq(unsigned int irq) | ||
| 376 | { | ||
| 377 | unsigned int loops = 100000; | ||
| 378 | struct mpic *mpic = mpic_from_irq(irq); | ||
| 379 | unsigned int src = irq - mpic->irq_offset; | ||
| 380 | |||
| 381 | DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); | ||
| 382 | |||
| 383 | mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, | ||
| 384 | mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | MPIC_VECPRI_MASK); | ||
| 385 | |||
| 386 | /* make sure mask gets to controller before we return to user */ | ||
| 387 | do { | ||
| 388 | if (!loops--) { | ||
| 389 | printk(KERN_ERR "mpic_enable_irq timeout\n"); | ||
| 390 | break; | ||
| 391 | } | ||
| 392 | } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); | ||
| 393 | } | ||
| 394 | |||
| 395 | static void mpic_end_irq(unsigned int irq) | ||
| 396 | { | ||
| 397 | struct mpic *mpic = mpic_from_irq(irq); | ||
| 398 | |||
| 399 | DBG("%s: end_irq: %d\n", mpic->name, irq); | ||
| 400 | |||
| 401 | /* We always EOI on end_irq() even for edge interrupts since that | ||
| 402 | * should only lower the priority, the MPIC should have properly | ||
| 403 | * latched another edge interrupt coming in anyway | ||
| 404 | */ | ||
| 405 | |||
| 406 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
| 407 | if (mpic->flags & MPIC_BROKEN_U3) { | ||
| 408 | unsigned int src = irq - mpic->irq_offset; | ||
| 409 | if (mpic_is_ht_interrupt(mpic, src)) | ||
| 410 | mpic_apic_end_irq(mpic, src); | ||
| 411 | } | ||
| 412 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
| 413 | |||
| 414 | mpic_eoi(mpic); | ||
| 415 | } | ||
| 416 | |||
| 417 | #ifdef CONFIG_SMP | ||
| 418 | |||
| 419 | static void mpic_enable_ipi(unsigned int irq) | ||
| 420 | { | ||
| 421 | struct mpic *mpic = mpic_from_ipi(irq); | ||
| 422 | unsigned int src = irq - mpic->ipi_offset; | ||
| 423 | |||
| 424 | DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); | ||
| 425 | mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); | ||
| 426 | } | ||
| 427 | |||
| 428 | static void mpic_disable_ipi(unsigned int irq) | ||
| 429 | { | ||
| 430 | /* NEVER disable an IPI... that's just plain wrong! */ | ||
| 431 | } | ||
| 432 | |||
| 433 | static void mpic_end_ipi(unsigned int irq) | ||
| 434 | { | ||
| 435 | struct mpic *mpic = mpic_from_ipi(irq); | ||
| 436 | |||
| 437 | /* | ||
| 438 | * IPIs are marked IRQ_PER_CPU. This has the side effect of | ||
| 439 | * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from | ||
| 440 | * applying to them. We EOI them late to avoid re-entering. | ||
| 441 | * We mark IPI's with SA_INTERRUPT as they must run with | ||
| 442 | * irqs disabled. | ||
| 443 | */ | ||
| 444 | mpic_eoi(mpic); | ||
| 445 | } | ||
| 446 | |||
| 447 | #endif /* CONFIG_SMP */ | ||
| 448 | |||
| 449 | static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | ||
| 450 | { | ||
| 451 | struct mpic *mpic = mpic_from_irq(irq); | ||
| 452 | |||
| 453 | cpumask_t tmp; | ||
| 454 | |||
| 455 | cpus_and(tmp, cpumask, cpu_online_map); | ||
| 456 | |||
| 457 | mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION, | ||
| 458 | mpic_physmask(cpus_addr(tmp)[0])); | ||
| 459 | } | ||
| 460 | |||
| 461 | |||
| 462 | /* | ||
| 463 | * Exported functions | ||
| 464 | */ | ||
| 465 | |||
| 466 | |||
| 467 | struct mpic * __init mpic_alloc(unsigned long phys_addr, | ||
| 468 | unsigned int flags, | ||
| 469 | unsigned int isu_size, | ||
| 470 | unsigned int irq_offset, | ||
| 471 | unsigned int irq_count, | ||
| 472 | unsigned int ipi_offset, | ||
| 473 | unsigned char *senses, | ||
| 474 | unsigned int senses_count, | ||
| 475 | const char *name) | ||
| 476 | { | ||
| 477 | struct mpic *mpic; | ||
| 478 | u32 reg; | ||
| 479 | const char *vers; | ||
| 480 | int i; | ||
| 481 | |||
| 482 | mpic = alloc_bootmem(sizeof(struct mpic)); | ||
| 483 | if (mpic == NULL) | ||
| 484 | return NULL; | ||
| 485 | |||
| 486 | |||
| 487 | memset(mpic, 0, sizeof(struct mpic)); | ||
| 488 | mpic->name = name; | ||
| 489 | |||
| 490 | mpic->hc_irq.typename = name; | ||
| 491 | mpic->hc_irq.enable = mpic_enable_irq; | ||
| 492 | mpic->hc_irq.disable = mpic_disable_irq; | ||
| 493 | mpic->hc_irq.end = mpic_end_irq; | ||
| 494 | if (flags & MPIC_PRIMARY) | ||
| 495 | mpic->hc_irq.set_affinity = mpic_set_affinity; | ||
| 496 | #ifdef CONFIG_SMP | ||
| 497 | mpic->hc_ipi.typename = name; | ||
| 498 | mpic->hc_ipi.enable = mpic_enable_ipi; | ||
| 499 | mpic->hc_ipi.disable = mpic_disable_ipi; | ||
| 500 | mpic->hc_ipi.end = mpic_end_ipi; | ||
| 501 | #endif /* CONFIG_SMP */ | ||
| 502 | |||
| 503 | mpic->flags = flags; | ||
| 504 | mpic->isu_size = isu_size; | ||
| 505 | mpic->irq_offset = irq_offset; | ||
| 506 | mpic->irq_count = irq_count; | ||
| 507 | mpic->ipi_offset = ipi_offset; | ||
| 508 | mpic->num_sources = 0; /* so far */ | ||
| 509 | mpic->senses = senses; | ||
| 510 | mpic->senses_count = senses_count; | ||
| 511 | |||
| 512 | /* Map the global registers */ | ||
| 513 | mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); | ||
| 514 | mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2); | ||
| 515 | BUG_ON(mpic->gregs == NULL); | ||
| 516 | |||
| 517 | /* Reset */ | ||
| 518 | if (flags & MPIC_WANTS_RESET) { | ||
| 519 | mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, | ||
| 520 | mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | ||
| 521 | | MPIC_GREG_GCONF_RESET); | ||
| 522 | while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | ||
| 523 | & MPIC_GREG_GCONF_RESET) | ||
| 524 | mb(); | ||
| 525 | } | ||
| 526 | |||
| 527 | /* Read feature register, calculate num CPUs and, for non-ISU | ||
| 528 | * MPICs, num sources as well. On ISU MPICs, sources are counted | ||
| 529 | * as ISUs are added | ||
| 530 | */ | ||
| 531 | reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0); | ||
| 532 | mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK) | ||
| 533 | >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; | ||
| 534 | if (isu_size == 0) | ||
| 535 | mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK) | ||
| 536 | >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; | ||
| 537 | |||
| 538 | /* Map the per-CPU registers */ | ||
| 539 | for (i = 0; i < mpic->num_cpus; i++) { | ||
| 540 | mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE + | ||
| 541 | i * MPIC_CPU_STRIDE, 0x1000); | ||
| 542 | BUG_ON(mpic->cpuregs[i] == NULL); | ||
| 543 | } | ||
| 544 | |||
| 545 | /* Initialize main ISU if none provided */ | ||
| 546 | if (mpic->isu_size == 0) { | ||
| 547 | mpic->isu_size = mpic->num_sources; | ||
| 548 | mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE, | ||
| 549 | MPIC_IRQ_STRIDE * mpic->isu_size); | ||
| 550 | BUG_ON(mpic->isus[0] == NULL); | ||
| 551 | } | ||
| 552 | mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); | ||
| 553 | mpic->isu_mask = (1 << mpic->isu_shift) - 1; | ||
| 554 | |||
| 555 | /* Display version */ | ||
| 556 | switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) { | ||
| 557 | case 1: | ||
| 558 | vers = "1.0"; | ||
| 559 | break; | ||
| 560 | case 2: | ||
| 561 | vers = "1.2"; | ||
| 562 | break; | ||
| 563 | case 3: | ||
| 564 | vers = "1.3"; | ||
| 565 | break; | ||
| 566 | default: | ||
| 567 | vers = "<unknown>"; | ||
| 568 | break; | ||
| 569 | } | ||
| 570 | printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n", | ||
| 571 | name, vers, phys_addr, mpic->num_cpus); | ||
| 572 | printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size, | ||
| 573 | mpic->isu_shift, mpic->isu_mask); | ||
| 574 | |||
| 575 | mpic->next = mpics; | ||
| 576 | mpics = mpic; | ||
| 577 | |||
| 578 | if (flags & MPIC_PRIMARY) | ||
| 579 | mpic_primary = mpic; | ||
| 580 | |||
| 581 | return mpic; | ||
| 582 | } | ||
| 583 | |||
| 584 | void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | ||
| 585 | unsigned long phys_addr) | ||
| 586 | { | ||
| 587 | unsigned int isu_first = isu_num * mpic->isu_size; | ||
| 588 | |||
| 589 | BUG_ON(isu_num >= MPIC_MAX_ISU); | ||
| 590 | |||
| 591 | mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size); | ||
| 592 | if ((isu_first + mpic->isu_size) > mpic->num_sources) | ||
| 593 | mpic->num_sources = isu_first + mpic->isu_size; | ||
| 594 | } | ||
| 595 | |||
| 596 | void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler, | ||
| 597 | void *data) | ||
| 598 | { | ||
| 599 | struct mpic *mpic = mpic_find(irq, NULL); | ||
| 600 | unsigned long flags; | ||
| 601 | |||
| 602 | /* Synchronization here is a bit dodgy, so don't try to replace cascade | ||
| 603 | * interrupts on the fly too often ... but normally it's set up at boot. | ||
| 604 | */ | ||
| 605 | spin_lock_irqsave(&mpic_lock, flags); | ||
| 606 | if (mpic->cascade) | ||
| 607 | mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset); | ||
| 608 | mpic->cascade = NULL; | ||
| 609 | wmb(); | ||
| 610 | mpic->cascade_vec = irq - mpic->irq_offset; | ||
| 611 | mpic->cascade_data = data; | ||
| 612 | wmb(); | ||
| 613 | mpic->cascade = handler; | ||
| 614 | mpic_enable_irq(irq); | ||
| 615 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
| 616 | } | ||
| 617 | |||
| 618 | void __init mpic_init(struct mpic *mpic) | ||
| 619 | { | ||
| 620 | int i; | ||
| 621 | |||
| 622 | BUG_ON(mpic->num_sources == 0); | ||
| 623 | |||
| 624 | printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); | ||
| 625 | |||
| 626 | /* Set current processor priority to max */ | ||
| 627 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); | ||
| 628 | |||
| 629 | /* Initialize timers: just disable them all */ | ||
| 630 | for (i = 0; i < 4; i++) { | ||
| 631 | mpic_write(mpic->tmregs, | ||
| 632 | i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0); | ||
| 633 | mpic_write(mpic->tmregs, | ||
| 634 | i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI, | ||
| 635 | MPIC_VECPRI_MASK | | ||
| 636 | (MPIC_VEC_TIMER_0 + i)); | ||
| 637 | } | ||
| 638 | |||
| 639 | /* Initialize IPIs to our reserved vectors and mark them disabled for now */ | ||
| 640 | mpic_test_broken_ipi(mpic); | ||
| 641 | for (i = 0; i < 4; i++) { | ||
| 642 | mpic_ipi_write(i, | ||
| 643 | MPIC_VECPRI_MASK | | ||
| 644 | (10 << MPIC_VECPRI_PRIORITY_SHIFT) | | ||
| 645 | (MPIC_VEC_IPI_0 + i)); | ||
| 646 | #ifdef CONFIG_SMP | ||
| 647 | if (!(mpic->flags & MPIC_PRIMARY)) | ||
| 648 | continue; | ||
| 649 | irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU; | ||
| 650 | irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi; | ||
| 651 | #endif /* CONFIG_SMP */ | ||
| 652 | } | ||
| 653 | |||
| 654 | /* Initialize interrupt sources */ | ||
| 655 | if (mpic->irq_count == 0) | ||
| 656 | mpic->irq_count = mpic->num_sources; | ||
| 657 | |||
| 658 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
| 659 | /* Do the ioapic fixups on U3 broken mpic */ | ||
| 660 | DBG("MPIC flags: %x\n", mpic->flags); | ||
| 661 | if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY)) | ||
| 662 | mpic_scan_ioapics(mpic); | ||
| 663 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
| 664 | |||
| 665 | for (i = 0; i < mpic->num_sources; i++) { | ||
| 666 | /* start with vector = source number, and masked */ | ||
| 667 | u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); | ||
| 668 | int level = 0; | ||
| 669 | |||
| 670 | /* if it's an IPI, we skip it */ | ||
| 671 | if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) && | ||
| 672 | (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4)) | ||
| 673 | continue; | ||
| 674 | |||
| 675 | /* do senses munging */ | ||
| 676 | if (mpic->senses && i < mpic->senses_count) { | ||
| 677 | if (mpic->senses[i] & IRQ_SENSE_LEVEL) | ||
| 678 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; | ||
| 679 | if (mpic->senses[i] & IRQ_POLARITY_POSITIVE) | ||
| 680 | vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; | ||
| 681 | } else | ||
| 682 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; | ||
| 683 | |||
| 684 | /* remember if it was a level interrupts */ | ||
| 685 | level = (vecpri & MPIC_VECPRI_SENSE_LEVEL); | ||
| 686 | |||
| 687 | /* deal with broken U3 */ | ||
| 688 | if (mpic->flags & MPIC_BROKEN_U3) { | ||
| 689 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
| 690 | if (mpic_is_ht_interrupt(mpic, i)) { | ||
| 691 | vecpri &= ~(MPIC_VECPRI_SENSE_MASK | | ||
| 692 | MPIC_VECPRI_POLARITY_MASK); | ||
| 693 | vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; | ||
| 694 | } | ||
| 695 | #else | ||
| 696 | printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n"); | ||
| 697 | #endif | ||
| 698 | } | ||
| 699 | |||
| 700 | DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri, | ||
| 701 | (level != 0)); | ||
| 702 | |||
| 703 | /* init hw */ | ||
| 704 | mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); | ||
| 705 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | ||
| 706 | 1 << hard_smp_processor_id()); | ||
| 707 | |||
| 708 | /* init linux descriptors */ | ||
| 709 | if (i < mpic->irq_count) { | ||
| 710 | irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0; | ||
| 711 | irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq; | ||
| 712 | } | ||
| 713 | } | ||
| 714 | |||
| 715 | /* Init spurrious vector */ | ||
| 716 | mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS); | ||
| 717 | |||
| 718 | /* Disable 8259 passthrough */ | ||
| 719 | mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, | ||
| 720 | mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | ||
| 721 | | MPIC_GREG_GCONF_8259_PTHROU_DIS); | ||
| 722 | |||
| 723 | /* Set current processor priority to 0 */ | ||
| 724 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); | ||
| 725 | } | ||
| 726 | |||
| 727 | |||
| 728 | |||
| 729 | void mpic_irq_set_priority(unsigned int irq, unsigned int pri) | ||
| 730 | { | ||
| 731 | int is_ipi; | ||
| 732 | struct mpic *mpic = mpic_find(irq, &is_ipi); | ||
| 733 | unsigned long flags; | ||
| 734 | u32 reg; | ||
| 735 | |||
| 736 | spin_lock_irqsave(&mpic_lock, flags); | ||
| 737 | if (is_ipi) { | ||
| 738 | reg = mpic_ipi_read(irq - mpic->ipi_offset) & MPIC_VECPRI_PRIORITY_MASK; | ||
| 739 | mpic_ipi_write(irq - mpic->ipi_offset, | ||
| 740 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | ||
| 741 | } else { | ||
| 742 | reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI) | ||
| 743 | & MPIC_VECPRI_PRIORITY_MASK; | ||
| 744 | mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, | ||
| 745 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | ||
| 746 | } | ||
| 747 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
| 748 | } | ||
| 749 | |||
| 750 | unsigned int mpic_irq_get_priority(unsigned int irq) | ||
| 751 | { | ||
| 752 | int is_ipi; | ||
| 753 | struct mpic *mpic = mpic_find(irq, &is_ipi); | ||
| 754 | unsigned long flags; | ||
| 755 | u32 reg; | ||
| 756 | |||
| 757 | spin_lock_irqsave(&mpic_lock, flags); | ||
| 758 | if (is_ipi) | ||
| 759 | reg = mpic_ipi_read(irq - mpic->ipi_offset); | ||
| 760 | else | ||
| 761 | reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI); | ||
| 762 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
| 763 | return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; | ||
| 764 | } | ||
| 765 | |||
| 766 | void mpic_setup_this_cpu(void) | ||
| 767 | { | ||
| 768 | #ifdef CONFIG_SMP | ||
| 769 | struct mpic *mpic = mpic_primary; | ||
| 770 | unsigned long flags; | ||
| 771 | u32 msk = 1 << hard_smp_processor_id(); | ||
| 772 | unsigned int i; | ||
| 773 | |||
| 774 | BUG_ON(mpic == NULL); | ||
| 775 | |||
| 776 | DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); | ||
| 777 | |||
| 778 | spin_lock_irqsave(&mpic_lock, flags); | ||
| 779 | |||
| 780 | /* let the mpic know we want intrs. default affinity is 0xffffffff | ||
| 781 | * until changed via /proc. That's how it's done on x86. If we want | ||
| 782 | * it differently, then we should make sure we also change the default | ||
| 783 | * values of irq_affinity in irq.c. | ||
| 784 | */ | ||
| 785 | if (distribute_irqs) { | ||
| 786 | for (i = 0; i < mpic->num_sources ; i++) | ||
| 787 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | ||
| 788 | mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk); | ||
| 789 | } | ||
| 790 | |||
| 791 | /* Set current processor priority to 0 */ | ||
| 792 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); | ||
| 793 | |||
| 794 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
| 795 | #endif /* CONFIG_SMP */ | ||
| 796 | } | ||
| 797 | |||
| 798 | int mpic_cpu_get_priority(void) | ||
| 799 | { | ||
| 800 | struct mpic *mpic = mpic_primary; | ||
| 801 | |||
| 802 | return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI); | ||
| 803 | } | ||
| 804 | |||
| 805 | void mpic_cpu_set_priority(int prio) | ||
| 806 | { | ||
| 807 | struct mpic *mpic = mpic_primary; | ||
| 808 | |||
| 809 | prio &= MPIC_CPU_TASKPRI_MASK; | ||
| 810 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio); | ||
| 811 | } | ||
| 812 | |||
| 813 | /* | ||
| 814 | * XXX: someone who knows mpic should check this. | ||
| 815 | * do we need to eoi the ipi including for kexec cpu here (see xics comments)? | ||
| 816 | * or can we reset the mpic in the new kernel? | ||
| 817 | */ | ||
| 818 | void mpic_teardown_this_cpu(int secondary) | ||
| 819 | { | ||
| 820 | struct mpic *mpic = mpic_primary; | ||
| 821 | unsigned long flags; | ||
| 822 | u32 msk = 1 << hard_smp_processor_id(); | ||
| 823 | unsigned int i; | ||
| 824 | |||
| 825 | BUG_ON(mpic == NULL); | ||
| 826 | |||
| 827 | DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); | ||
| 828 | spin_lock_irqsave(&mpic_lock, flags); | ||
| 829 | |||
| 830 | /* let the mpic know we don't want intrs. */ | ||
| 831 | for (i = 0; i < mpic->num_sources ; i++) | ||
| 832 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | ||
| 833 | mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk); | ||
| 834 | |||
| 835 | /* Set current processor priority to max */ | ||
| 836 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); | ||
| 837 | |||
| 838 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
| 839 | } | ||
| 840 | |||
| 841 | |||
| 842 | void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) | ||
| 843 | { | ||
| 844 | struct mpic *mpic = mpic_primary; | ||
| 845 | |||
| 846 | BUG_ON(mpic == NULL); | ||
| 847 | |||
| 848 | DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); | ||
| 849 | |||
| 850 | mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10, | ||
| 851 | mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); | ||
| 852 | } | ||
| 853 | |||
| 854 | int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) | ||
| 855 | { | ||
| 856 | u32 irq; | ||
| 857 | |||
| 858 | irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; | ||
| 859 | DBG("%s: get_one_irq(): %d\n", mpic->name, irq); | ||
| 860 | |||
| 861 | if (mpic->cascade && irq == mpic->cascade_vec) { | ||
| 862 | DBG("%s: cascading ...\n", mpic->name); | ||
| 863 | irq = mpic->cascade(regs, mpic->cascade_data); | ||
| 864 | mpic_eoi(mpic); | ||
| 865 | return irq; | ||
| 866 | } | ||
| 867 | if (unlikely(irq == MPIC_VEC_SPURRIOUS)) | ||
| 868 | return -1; | ||
| 869 | if (irq < MPIC_VEC_IPI_0) | ||
| 870 | return irq + mpic->irq_offset; | ||
| 871 | DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); | ||
| 872 | return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; | ||
| 873 | } | ||
| 874 | |||
| 875 | int mpic_get_irq(struct pt_regs *regs) | ||
| 876 | { | ||
| 877 | struct mpic *mpic = mpic_primary; | ||
| 878 | |||
| 879 | BUG_ON(mpic == NULL); | ||
| 880 | |||
| 881 | return mpic_get_one_irq(mpic, regs); | ||
| 882 | } | ||
| 883 | |||
| 884 | |||
| 885 | #ifdef CONFIG_SMP | ||
| 886 | void mpic_request_ipis(void) | ||
| 887 | { | ||
| 888 | struct mpic *mpic = mpic_primary; | ||
| 889 | |||
| 890 | BUG_ON(mpic == NULL); | ||
| 891 | |||
| 892 | printk("requesting IPIs ... \n"); | ||
| 893 | |||
| 894 | /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ | ||
| 895 | request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT, | ||
| 896 | "IPI0 (call function)", mpic); | ||
| 897 | request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT, | ||
| 898 | "IPI1 (reschedule)", mpic); | ||
| 899 | request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT, | ||
| 900 | "IPI2 (unused)", mpic); | ||
| 901 | request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT, | ||
| 902 | "IPI3 (debugger break)", mpic); | ||
| 903 | |||
| 904 | printk("IPIs requested... \n"); | ||
| 905 | } | ||
| 906 | |||
| 907 | void smp_mpic_message_pass(int target, int msg) | ||
| 908 | { | ||
| 909 | /* make sure we're sending something that translates to an IPI */ | ||
| 910 | if ((unsigned int)msg > 3) { | ||
| 911 | printk("SMP %d: smp_message_pass: unknown msg %d\n", | ||
| 912 | smp_processor_id(), msg); | ||
| 913 | return; | ||
| 914 | } | ||
| 915 | switch (target) { | ||
| 916 | case MSG_ALL: | ||
| 917 | mpic_send_ipi(msg, 0xffffffff); | ||
| 918 | break; | ||
| 919 | case MSG_ALL_BUT_SELF: | ||
| 920 | mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id())); | ||
| 921 | break; | ||
| 922 | default: | ||
| 923 | mpic_send_ipi(msg, 1 << target); | ||
| 924 | break; | ||
| 925 | } | ||
| 926 | } | ||
| 927 | #endif /* CONFIG_SMP */ | ||
