diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-20 14:57:50 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-20 14:57:50 -0500 |
commit | c4a1745aa09fc110afdefea0e5d025043e348bae (patch) | |
tree | 6d28dc3a0c1bf18437b3d49f28e5c81b850cdb2f /arch/sparc64/kernel/irq.c | |
parent | 88dcb91177cfa5b26143a29074389a2aa259c7cf (diff) | |
parent | ac0eb3eb7e54b700386068be025a43d2a3958ee5 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: (230 commits)
[SPARC64]: Update defconfig.
[SPARC64]: Fix 2 bugs in huge page support.
[SPARC64]: CONFIG_BLK_DEV_RAM fix
[SPARC64]: Optimized TSB table initialization.
[SPARC64]: Allow CONFIG_MEMORY_HOTPLUG to build.
[SPARC64]: Use SLAB caches for TSB tables.
[SPARC64]: Don't kill the page allocator when growing a TSB.
[SPARC64]: Randomize mm->mmap_base when PF_RANDOMIZE is set.
[SPARC64]: Increase top of 32-bit process stack.
[SPARC64]: Top-down address space allocation for 32-bit tasks.
[SPARC64] bbc_i2c: Fix cpu check and add missing module license.
[SPARC64]: Fix and re-enable dynamic TSB sizing.
[SUNSU]: Fix missing spinlock initialization.
[TG3]: Do not try to access NIC_SRAM_DATA_SIG on Sun parts.
[SPARC64]: First cut at VIS simulator for Niagara.
[SPARC64]: Fix system type in /proc/cpuinfo and remove bogus OBP check.
[SPARC64]: Add SMT scheduling support for Niagara.
[SPARC64]: Fix 32-bit truncation which broke sparsemem.
[SPARC64]: Move over to sparsemem.
[SPARC64]: Fix new context version SMP handling.
...
Diffstat (limited to 'arch/sparc64/kernel/irq.c')
-rw-r--r-- | arch/sparc64/kernel/irq.c | 339 |
1 files changed, 252 insertions, 87 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 233526ba3abe..8c93ba655b33 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/proc_fs.h> | 22 | #include <linux/proc_fs.h> |
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include <linux/bootmem.h> | ||
24 | 25 | ||
25 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <asm/cache.h> | 40 | #include <asm/cache.h> |
40 | #include <asm/cpudata.h> | 41 | #include <asm/cpudata.h> |
41 | #include <asm/auxio.h> | 42 | #include <asm/auxio.h> |
43 | #include <asm/head.h> | ||
42 | 44 | ||
43 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
44 | static void distribute_irqs(void); | 46 | static void distribute_irqs(void); |
@@ -136,12 +138,48 @@ out_unlock: | |||
136 | return 0; | 138 | return 0; |
137 | } | 139 | } |
138 | 140 | ||
141 | extern unsigned long real_hard_smp_processor_id(void); | ||
142 | |||
143 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) | ||
144 | { | ||
145 | unsigned int tid; | ||
146 | |||
147 | if (this_is_starfire) { | ||
148 | tid = starfire_translate(imap, cpuid); | ||
149 | tid <<= IMAP_TID_SHIFT; | ||
150 | tid &= IMAP_TID_UPA; | ||
151 | } else { | ||
152 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
153 | unsigned long ver; | ||
154 | |||
155 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
156 | if ((ver >> 32UL) == __JALAPENO_ID || | ||
157 | (ver >> 32UL) == __SERRANO_ID) { | ||
158 | tid = cpuid << IMAP_TID_SHIFT; | ||
159 | tid &= IMAP_TID_JBUS; | ||
160 | } else { | ||
161 | unsigned int a = cpuid & 0x1f; | ||
162 | unsigned int n = (cpuid >> 5) & 0x1f; | ||
163 | |||
164 | tid = ((a << IMAP_AID_SHIFT) | | ||
165 | (n << IMAP_NID_SHIFT)); | ||
166 | tid &= (IMAP_AID_SAFARI | | ||
167 | IMAP_NID_SAFARI);; | ||
168 | } | ||
169 | } else { | ||
170 | tid = cpuid << IMAP_TID_SHIFT; | ||
171 | tid &= IMAP_TID_UPA; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | return tid; | ||
176 | } | ||
177 | |||
139 | /* Now these are always passed a true fully specified sun4u INO. */ | 178 | /* Now these are always passed a true fully specified sun4u INO. */ |
140 | void enable_irq(unsigned int irq) | 179 | void enable_irq(unsigned int irq) |
141 | { | 180 | { |
142 | struct ino_bucket *bucket = __bucket(irq); | 181 | struct ino_bucket *bucket = __bucket(irq); |
143 | unsigned long imap; | 182 | unsigned long imap, cpuid; |
144 | unsigned long tid; | ||
145 | 183 | ||
146 | imap = bucket->imap; | 184 | imap = bucket->imap; |
147 | if (imap == 0UL) | 185 | if (imap == 0UL) |
@@ -149,47 +187,38 @@ void enable_irq(unsigned int irq) | |||
149 | 187 | ||
150 | preempt_disable(); | 188 | preempt_disable(); |
151 | 189 | ||
152 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 190 | /* This gets the physical processor ID, even on uniprocessor, |
153 | unsigned long ver; | 191 | * so we can always program the interrupt target correctly. |
154 | 192 | */ | |
155 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | 193 | cpuid = real_hard_smp_processor_id(); |
156 | if ((ver >> 32) == 0x003e0016) { | 194 | |
157 | /* We set it to our JBUS ID. */ | 195 | if (tlb_type == hypervisor) { |
158 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | 196 | unsigned int ino = __irq_ino(irq); |
159 | : "=r" (tid) | 197 | int err; |
160 | : "i" (ASI_JBUS_CONFIG)); | 198 | |
161 | tid = ((tid & (0x1fUL<<17)) << 9); | 199 | err = sun4v_intr_settarget(ino, cpuid); |
162 | tid &= IMAP_TID_JBUS; | 200 | if (err != HV_EOK) |
163 | } else { | 201 | printk("sun4v_intr_settarget(%x,%lu): err(%d)\n", |
164 | /* We set it to our Safari AID. */ | 202 | ino, cpuid, err); |
165 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | 203 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
166 | : "=r" (tid) | 204 | if (err != HV_EOK) |
167 | : "i" (ASI_SAFARI_CONFIG)); | 205 | printk("sun4v_intr_setenabled(%x): err(%d)\n", |
168 | tid = ((tid & (0x3ffUL<<17)) << 9); | 206 | ino, err); |
169 | tid &= IMAP_AID_SAFARI; | ||
170 | } | ||
171 | } else if (this_is_starfire == 0) { | ||
172 | /* We set it to our UPA MID. */ | ||
173 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
174 | : "=r" (tid) | ||
175 | : "i" (ASI_UPA_CONFIG)); | ||
176 | tid = ((tid & UPA_CONFIG_MID) << 9); | ||
177 | tid &= IMAP_TID_UPA; | ||
178 | } else { | 207 | } else { |
179 | tid = (starfire_translate(imap, smp_processor_id()) << 26); | 208 | unsigned int tid = sun4u_compute_tid(imap, cpuid); |
180 | tid &= IMAP_TID_UPA; | 209 | |
210 | /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product | ||
211 | * of this SYSIO's preconfigured IGN in the SYSIO Control | ||
212 | * Register, the hardware just mirrors that value here. | ||
213 | * However for Graphics and UPA Slave devices the full | ||
214 | * IMAP_INR field can be set by the programmer here. | ||
215 | * | ||
216 | * Things like FFB can now be handled via the new IRQ | ||
217 | * mechanism. | ||
218 | */ | ||
219 | upa_writel(tid | IMAP_VALID, imap); | ||
181 | } | 220 | } |
182 | 221 | ||
183 | /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product | ||
184 | * of this SYSIO's preconfigured IGN in the SYSIO Control | ||
185 | * Register, the hardware just mirrors that value here. | ||
186 | * However for Graphics and UPA Slave devices the full | ||
187 | * IMAP_INR field can be set by the programmer here. | ||
188 | * | ||
189 | * Things like FFB can now be handled via the new IRQ mechanism. | ||
190 | */ | ||
191 | upa_writel(tid | IMAP_VALID, imap); | ||
192 | |||
193 | preempt_enable(); | 222 | preempt_enable(); |
194 | } | 223 | } |
195 | 224 | ||
@@ -201,16 +230,26 @@ void disable_irq(unsigned int irq) | |||
201 | 230 | ||
202 | imap = bucket->imap; | 231 | imap = bucket->imap; |
203 | if (imap != 0UL) { | 232 | if (imap != 0UL) { |
204 | u32 tmp; | 233 | if (tlb_type == hypervisor) { |
234 | unsigned int ino = __irq_ino(irq); | ||
235 | int err; | ||
236 | |||
237 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); | ||
238 | if (err != HV_EOK) | ||
239 | printk("sun4v_intr_setenabled(%x): " | ||
240 | "err(%d)\n", ino, err); | ||
241 | } else { | ||
242 | u32 tmp; | ||
205 | 243 | ||
206 | /* NOTE: We do not want to futz with the IRQ clear registers | 244 | /* NOTE: We do not want to futz with the IRQ clear registers |
207 | * and move the state to IDLE, the SCSI code does call | 245 | * and move the state to IDLE, the SCSI code does call |
208 | * disable_irq() to assure atomicity in the queue cmd | 246 | * disable_irq() to assure atomicity in the queue cmd |
209 | * SCSI adapter driver code. Thus we'd lose interrupts. | 247 | * SCSI adapter driver code. Thus we'd lose interrupts. |
210 | */ | 248 | */ |
211 | tmp = upa_readl(imap); | 249 | tmp = upa_readl(imap); |
212 | tmp &= ~IMAP_VALID; | 250 | tmp &= ~IMAP_VALID; |
213 | upa_writel(tmp, imap); | 251 | upa_writel(tmp, imap); |
252 | } | ||
214 | } | 253 | } |
215 | } | 254 | } |
216 | 255 | ||
@@ -248,6 +287,8 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
248 | return __irq(&pil0_dummy_bucket); | 287 | return __irq(&pil0_dummy_bucket); |
249 | } | 288 | } |
250 | 289 | ||
290 | BUG_ON(tlb_type == hypervisor); | ||
291 | |||
251 | /* RULE: Both must be specified in all other cases. */ | 292 | /* RULE: Both must be specified in all other cases. */ |
252 | if (iclr == 0UL || imap == 0UL) { | 293 | if (iclr == 0UL || imap == 0UL) { |
253 | prom_printf("Invalid build_irq %d %d %016lx %016lx\n", | 294 | prom_printf("Invalid build_irq %d %d %016lx %016lx\n", |
@@ -275,12 +316,11 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
275 | goto out; | 316 | goto out; |
276 | } | 317 | } |
277 | 318 | ||
278 | bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); | 319 | bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); |
279 | if (!bucket->irq_info) { | 320 | if (!bucket->irq_info) { |
280 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); | 321 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); |
281 | prom_halt(); | 322 | prom_halt(); |
282 | } | 323 | } |
283 | memset(bucket->irq_info, 0, sizeof(struct irq_desc)); | ||
284 | 324 | ||
285 | /* Ok, looks good, set it up. Don't touch the irq_chain or | 325 | /* Ok, looks good, set it up. Don't touch the irq_chain or |
286 | * the pending flag. | 326 | * the pending flag. |
@@ -294,6 +334,37 @@ out: | |||
294 | return __irq(bucket); | 334 | return __irq(bucket); |
295 | } | 335 | } |
296 | 336 | ||
337 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags) | ||
338 | { | ||
339 | struct ino_bucket *bucket; | ||
340 | unsigned long sysino; | ||
341 | |||
342 | sysino = sun4v_devino_to_sysino(devhandle, devino); | ||
343 | |||
344 | bucket = &ivector_table[sysino]; | ||
345 | |||
346 | /* Catch accidental accesses to these things. IMAP/ICLR handling | ||
347 | * is done by hypervisor calls on sun4v platforms, not by direct | ||
348 | * register accesses. | ||
349 | * | ||
350 | * But we need to make them look unique for the disable_irq() logic | ||
351 | * in free_irq(). | ||
352 | */ | ||
353 | bucket->imap = ~0UL - sysino; | ||
354 | bucket->iclr = ~0UL - sysino; | ||
355 | |||
356 | bucket->pil = pil; | ||
357 | bucket->flags = flags; | ||
358 | |||
359 | bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); | ||
360 | if (!bucket->irq_info) { | ||
361 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); | ||
362 | prom_halt(); | ||
363 | } | ||
364 | |||
365 | return __irq(bucket); | ||
366 | } | ||
367 | |||
297 | static void atomic_bucket_insert(struct ino_bucket *bucket) | 368 | static void atomic_bucket_insert(struct ino_bucket *bucket) |
298 | { | 369 | { |
299 | unsigned long pstate; | 370 | unsigned long pstate; |
@@ -482,7 +553,6 @@ void free_irq(unsigned int irq, void *dev_id) | |||
482 | bucket = __bucket(irq); | 553 | bucket = __bucket(irq); |
483 | if (bucket != &pil0_dummy_bucket) { | 554 | if (bucket != &pil0_dummy_bucket) { |
484 | struct irq_desc *desc = bucket->irq_info; | 555 | struct irq_desc *desc = bucket->irq_info; |
485 | unsigned long imap = bucket->imap; | ||
486 | int ent, i; | 556 | int ent, i; |
487 | 557 | ||
488 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { | 558 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { |
@@ -495,6 +565,8 @@ void free_irq(unsigned int irq, void *dev_id) | |||
495 | } | 565 | } |
496 | 566 | ||
497 | if (!desc->action_active_mask) { | 567 | if (!desc->action_active_mask) { |
568 | unsigned long imap = bucket->imap; | ||
569 | |||
498 | /* This unique interrupt source is now inactive. */ | 570 | /* This unique interrupt source is now inactive. */ |
499 | bucket->flags &= ~IBF_ACTIVE; | 571 | bucket->flags &= ~IBF_ACTIVE; |
500 | 572 | ||
@@ -592,7 +664,18 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) | |||
592 | break; | 664 | break; |
593 | } | 665 | } |
594 | if (bp->pil != 0) { | 666 | if (bp->pil != 0) { |
595 | upa_writel(ICLR_IDLE, bp->iclr); | 667 | if (tlb_type == hypervisor) { |
668 | unsigned int ino = __irq_ino(bp); | ||
669 | int err; | ||
670 | |||
671 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | ||
672 | if (err != HV_EOK) | ||
673 | printk("sun4v_intr_setstate(%x): " | ||
674 | "err(%d)\n", ino, err); | ||
675 | } else { | ||
676 | upa_writel(ICLR_IDLE, bp->iclr); | ||
677 | } | ||
678 | |||
596 | /* Test and add entropy */ | 679 | /* Test and add entropy */ |
597 | if (random & SA_SAMPLE_RANDOM) | 680 | if (random & SA_SAMPLE_RANDOM) |
598 | add_interrupt_randomness(irq); | 681 | add_interrupt_randomness(irq); |
@@ -694,7 +777,7 @@ irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) | |||
694 | val = readb(auxio_register); | 777 | val = readb(auxio_register); |
695 | val |= AUXIO_AUX1_FTCNT; | 778 | val |= AUXIO_AUX1_FTCNT; |
696 | writeb(val, auxio_register); | 779 | writeb(val, auxio_register); |
697 | val &= AUXIO_AUX1_FTCNT; | 780 | val &= ~AUXIO_AUX1_FTCNT; |
698 | writeb(val, auxio_register); | 781 | writeb(val, auxio_register); |
699 | 782 | ||
700 | doing_pdma = 0; | 783 | doing_pdma = 0; |
@@ -727,25 +810,23 @@ EXPORT_SYMBOL(probe_irq_off); | |||
727 | static int retarget_one_irq(struct irqaction *p, int goal_cpu) | 810 | static int retarget_one_irq(struct irqaction *p, int goal_cpu) |
728 | { | 811 | { |
729 | struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; | 812 | struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; |
730 | unsigned long imap = bucket->imap; | ||
731 | unsigned int tid; | ||
732 | 813 | ||
733 | while (!cpu_online(goal_cpu)) { | 814 | while (!cpu_online(goal_cpu)) { |
734 | if (++goal_cpu >= NR_CPUS) | 815 | if (++goal_cpu >= NR_CPUS) |
735 | goal_cpu = 0; | 816 | goal_cpu = 0; |
736 | } | 817 | } |
737 | 818 | ||
738 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 819 | if (tlb_type == hypervisor) { |
739 | tid = goal_cpu << 26; | 820 | unsigned int ino = __irq_ino(bucket); |
740 | tid &= IMAP_AID_SAFARI; | 821 | |
741 | } else if (this_is_starfire == 0) { | 822 | sun4v_intr_settarget(ino, goal_cpu); |
742 | tid = goal_cpu << 26; | 823 | sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
743 | tid &= IMAP_TID_UPA; | ||
744 | } else { | 824 | } else { |
745 | tid = (starfire_translate(imap, goal_cpu) << 26); | 825 | unsigned long imap = bucket->imap; |
746 | tid &= IMAP_TID_UPA; | 826 | unsigned int tid = sun4u_compute_tid(imap, goal_cpu); |
827 | |||
828 | upa_writel(tid | IMAP_VALID, imap); | ||
747 | } | 829 | } |
748 | upa_writel(tid | IMAP_VALID, imap); | ||
749 | 830 | ||
750 | do { | 831 | do { |
751 | if (++goal_cpu >= NR_CPUS) | 832 | if (++goal_cpu >= NR_CPUS) |
@@ -848,33 +929,114 @@ static void kill_prom_timer(void) | |||
848 | 929 | ||
849 | void init_irqwork_curcpu(void) | 930 | void init_irqwork_curcpu(void) |
850 | { | 931 | { |
851 | register struct irq_work_struct *workp asm("o2"); | ||
852 | register unsigned long tmp asm("o3"); | ||
853 | int cpu = hard_smp_processor_id(); | 932 | int cpu = hard_smp_processor_id(); |
854 | 933 | ||
855 | memset(__irq_work + cpu, 0, sizeof(*workp)); | 934 | memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); |
856 | 935 | } | |
857 | /* Make sure we are called with PSTATE_IE disabled. */ | 936 | |
858 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | 937 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) |
859 | : "=r" (tmp)); | 938 | { |
860 | if (tmp & PSTATE_IE) { | 939 | unsigned long num_entries = 128; |
861 | prom_printf("BUG: init_irqwork_curcpu() called with " | 940 | unsigned long status; |
862 | "PSTATE_IE enabled, bailing.\n"); | 941 | |
863 | __asm__ __volatile__("mov %%i7, %0\n\t" | 942 | status = sun4v_cpu_qconf(type, paddr, num_entries); |
864 | : "=r" (tmp)); | 943 | if (status != HV_EOK) { |
865 | prom_printf("BUG: Called from %lx\n", tmp); | 944 | prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " |
945 | "err %lu\n", type, paddr, num_entries, status); | ||
866 | prom_halt(); | 946 | prom_halt(); |
867 | } | 947 | } |
948 | } | ||
868 | 949 | ||
869 | /* Set interrupt globals. */ | 950 | static void __cpuinit sun4v_register_mondo_queues(int this_cpu) |
870 | workp = &__irq_work[cpu]; | 951 | { |
871 | __asm__ __volatile__( | 952 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
872 | "rdpr %%pstate, %0\n\t" | 953 | |
873 | "wrpr %0, %1, %%pstate\n\t" | 954 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); |
874 | "mov %2, %%g6\n\t" | 955 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); |
875 | "wrpr %0, 0x0, %%pstate\n\t" | 956 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); |
876 | : "=&r" (tmp) | 957 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); |
877 | : "i" (PSTATE_IG), "r" (workp)); | 958 | } |
959 | |||
960 | static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) | ||
961 | { | ||
962 | void *page; | ||
963 | |||
964 | if (use_bootmem) | ||
965 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
966 | else | ||
967 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
968 | |||
969 | if (!page) { | ||
970 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | ||
971 | prom_halt(); | ||
972 | } | ||
973 | |||
974 | *pa_ptr = __pa(page); | ||
975 | } | ||
976 | |||
977 | static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) | ||
978 | { | ||
979 | void *page; | ||
980 | |||
981 | if (use_bootmem) | ||
982 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
983 | else | ||
984 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
985 | |||
986 | if (!page) { | ||
987 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | ||
988 | prom_halt(); | ||
989 | } | ||
990 | |||
991 | *pa_ptr = __pa(page); | ||
992 | } | ||
993 | |||
994 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) | ||
995 | { | ||
996 | #ifdef CONFIG_SMP | ||
997 | void *page; | ||
998 | |||
999 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | ||
1000 | |||
1001 | if (use_bootmem) | ||
1002 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
1003 | else | ||
1004 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
1005 | |||
1006 | if (!page) { | ||
1007 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | ||
1008 | prom_halt(); | ||
1009 | } | ||
1010 | |||
1011 | tb->cpu_mondo_block_pa = __pa(page); | ||
1012 | tb->cpu_list_pa = __pa(page + 64); | ||
1013 | #endif | ||
1014 | } | ||
1015 | |||
1016 | /* Allocate and register the mondo and error queues for this cpu. */ | ||
1017 | void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load) | ||
1018 | { | ||
1019 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1020 | |||
1021 | if (alloc) { | ||
1022 | alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); | ||
1023 | alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); | ||
1024 | alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); | ||
1025 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); | ||
1026 | alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); | ||
1027 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); | ||
1028 | |||
1029 | init_cpu_send_mondo_info(tb, use_bootmem); | ||
1030 | } | ||
1031 | |||
1032 | if (load) { | ||
1033 | if (cpu != hard_smp_processor_id()) { | ||
1034 | prom_printf("SUN4V: init mondo on cpu %d not %d\n", | ||
1035 | cpu, hard_smp_processor_id()); | ||
1036 | prom_halt(); | ||
1037 | } | ||
1038 | sun4v_register_mondo_queues(cpu); | ||
1039 | } | ||
878 | } | 1040 | } |
879 | 1041 | ||
880 | /* Only invoked on boot processor. */ | 1042 | /* Only invoked on boot processor. */ |
@@ -884,6 +1046,9 @@ void __init init_IRQ(void) | |||
884 | kill_prom_timer(); | 1046 | kill_prom_timer(); |
885 | memset(&ivector_table[0], 0, sizeof(ivector_table)); | 1047 | memset(&ivector_table[0], 0, sizeof(ivector_table)); |
886 | 1048 | ||
1049 | if (tlb_type == hypervisor) | ||
1050 | sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1); | ||
1051 | |||
887 | /* We need to clear any IRQ's pending in the soft interrupt | 1052 | /* We need to clear any IRQ's pending in the soft interrupt |
888 | * registers, a spurious one could be left around from the | 1053 | * registers, a spurious one could be left around from the |
889 | * PROM timer which we just disabled. | 1054 | * PROM timer which we just disabled. |