aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/head.S3
-rw-r--r--arch/sparc64/kernel/irq.c16
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S349
-rw-r--r--arch/sparc64/kernel/traps.c184
-rw-r--r--arch/sparc64/kernel/ttable.S5
5 files changed, 554 insertions, 3 deletions
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index a304845f8c56..01980014aead 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -511,13 +511,14 @@ setup_tba:
511sparc64_boot_end: 511sparc64_boot_end:
512 512
513#include "systbls.S" 513#include "systbls.S"
514#include "sun4v_tlb_miss.S"
515#include "ktlb.S" 514#include "ktlb.S"
516#include "tsb.S" 515#include "tsb.S"
517#include "etrap.S" 516#include "etrap.S"
518#include "rtrap.S" 517#include "rtrap.S"
519#include "winfixup.S" 518#include "winfixup.S"
520#include "entry.S" 519#include "entry.S"
520#include "sun4v_tlb_miss.S"
521#include "sun4v_ivec.S"
521 522
522/* 523/*
523 * The following skip makes sure the trap table in ttable.S is aligned 524 * The following skip makes sure the trap table in ttable.S is aligned
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 3c1a2139f1b9..ff201c007e0c 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -888,7 +888,19 @@ static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type)
888 } 888 }
889} 889}
890 890
891/* Allocate and init the mondo queues for this cpu. */ 891static void __cpuinit init_one_kbuf(unsigned long *pa_ptr)
892{
893 unsigned long page = get_zeroed_page(GFP_ATOMIC);
894
895 if (!page) {
896 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
897 prom_halt();
898 }
899
900 *pa_ptr = __pa(page);
901}
902
903/* Allocate and init the mondo and error queues for this cpu. */
892void __cpuinit sun4v_init_mondo_queues(void) 904void __cpuinit sun4v_init_mondo_queues(void)
893{ 905{
894 int cpu = hard_smp_processor_id(); 906 int cpu = hard_smp_processor_id();
@@ -897,7 +909,9 @@ void __cpuinit sun4v_init_mondo_queues(void)
897 init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); 909 init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
898 init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); 910 init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
899 init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); 911 init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
912 init_one_kbuf(&tb->resum_kernel_buf_pa);
900 init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); 913 init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
914 init_one_kbuf(&tb->nonresum_kernel_buf_pa);
901} 915}
902 916
903/* Only invoked on boot processor. */ 917/* Only invoked on boot processor. */
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
new file mode 100644
index 000000000000..d9d442017d3d
--- /dev/null
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -0,0 +1,349 @@
1/* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6#include <asm/cpudata.h>
7#include <asm/intr_queue.h>
8
9 .text
10 .align 32
11
12sun4v_cpu_mondo:
13 /* Head offset in %g2, tail offset in %g4.
14 * If they are the same, no work.
15 */
16 mov INTRQ_CPU_MONDO_HEAD, %g2
17 ldxa [%g2] ASI_QUEUE, %g2
18 mov INTRQ_CPU_MONDO_TAIL, %g4
19 ldxa [%g4] ASI_QUEUE, %g4
20 cmp %g2, %g4
21 be,pn %xcc, sun4v_cpu_mondo_queue_empty
22 nop
23
24 /* Get &trap_block[smp_processor_id()] into %g3. */
25 __GET_CPUID(%g1)
26 sethi %hi(trap_block), %g3
27 sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
28 or %g3, %lo(trap_block), %g3
29 add %g3, %g7, %g3
30
31 /* Get CPU mondo queue base phys address into %g7. */
32 ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
33
34 /* Now get the cross-call arguments and handler PC, same
35 * layout as sun4u:
36 *
37 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
38 * high half is context arg to MMU flushes, into %g5
39 * 2nd 64-bit word: 64-bit arg, load into %g1
40 * 3rd 64-bit word: 64-bit arg, load into %g7
41 */
42 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
43 add %g2, 0x8, %g2
44 srlx %g3, 32, %g5
45 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
46 add %g2, 0x8, %g2
47 srl %g3, 0, %g3
48 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
49 add %g2, 0x40 - 0x8 - 0x8, %g2
50
51 /* Update queue head pointer. */
52 sethi %hi(8192 - 1), %g4
53 or %g4, %lo(8192 - 1), %g4
54 and %g2, %g4, %g2
55
56 mov INTRQ_CPU_MONDO_HEAD, %g4
57 stxa %g2, [%g4] ASI_QUEUE
58 membar #Sync
59
60 jmpl %g3, %g0
61 nop
62
63sun4v_cpu_mondo_queue_empty:
64 retry
65
66sun4v_dev_mondo:
67 /* Head offset in %g2, tail offset in %g4. */
68 mov INTRQ_DEVICE_MONDO_HEAD, %g2
69 ldxa [%g2] ASI_QUEUE, %g2
70 mov INTRQ_DEVICE_MONDO_TAIL, %g4
71 ldxa [%g4] ASI_QUEUE, %g4
72 cmp %g2, %g4
73 be,pn %xcc, sun4v_dev_mondo_queue_empty
74 nop
75
76 /* Get &trap_block[smp_processor_id()] into %g3. */
77 __GET_CPUID(%g1)
78 sethi %hi(trap_block), %g3
79 sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
80 or %g3, %lo(trap_block), %g3
81 add %g3, %g7, %g3
82
83 /* Get DEV mondo queue base phys address into %g5. */
84 ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
85
86 /* Load IVEC into %g3. */
87 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
88 add %g2, 0x40, %g2
89
90 /* XXX There can be a full 64-byte block of data here.
91 * XXX This is how we can get at MSI vector data.
92 * XXX Current we do not capture this, but when we do we'll
93 * XXX need to add a 64-byte storage area in the struct ino_bucket
94 * XXX or the struct irq_desc.
95 */
96
97 /* Update queue head pointer, this frees up some registers. */
98 sethi %hi(8192 - 1), %g4
99 or %g4, %lo(8192 - 1), %g4
100 and %g2, %g4, %g2
101
102 mov INTRQ_DEVICE_MONDO_HEAD, %g4
103 stxa %g2, [%g4] ASI_QUEUE
104 membar #Sync
105
106 /* Get &__irq_work[smp_processor_id()] into %g1. */
107 sethi %hi(__irq_work), %g4
108 sllx %g1, 6, %g1
109 or %g4, %lo(__irq_work), %g4
110 add %g4, %g1, %g1
111
112 /* Get &ivector_table[IVEC] into %g4. */
113 sethi %hi(ivector_table), %g4
114 sllx %g3, 5, %g3
115 or %g4, %lo(ivector_table), %g4
116 add %g4, %g3, %g4
117
118 /* Load IRQ %pil into %g5. */
119 ldub [%g4 + 0x04], %g5
120
121 /* Insert ivector_table[] entry into __irq_work[] queue. */
122 sllx %g5, 2, %g3
123 lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */
124 stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
125 stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */
126
127 /* Signal the interrupt by setting (1 << pil) in %softint. */
128 mov 1, %g2
129 sllx %g2, %g5, %g2
130 wr %g2, 0x0, %set_softint
131
132sun4v_dev_mondo_queue_empty:
133 retry
134
135sun4v_res_mondo:
136 /* Head offset in %g2, tail offset in %g4. */
137 mov INTRQ_RESUM_MONDO_HEAD, %g2
138 ldxa [%g2] ASI_QUEUE, %g2
139 mov INTRQ_RESUM_MONDO_TAIL, %g4
140 ldxa [%g4] ASI_QUEUE, %g4
141 cmp %g2, %g4
142 be,pn %xcc, sun4v_res_mondo_queue_empty
143 nop
144
145 /* Get &trap_block[smp_processor_id()] into %g3. */
146 __GET_CPUID(%g1)
147 sethi %hi(trap_block), %g3
148 sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
149 or %g3, %lo(trap_block), %g3
150 add %g3, %g7, %g3
151
152 /* Get RES mondo queue base phys address into %g5. */
153 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
154
155 /* Get RES kernel buffer base phys address into %g7. */
156 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
157
158 /* If the first word is non-zero, queue is full. */
159 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
160 brnz,pn %g1, sun4v_res_mondo_queue_full
161 nop
162
163 /* Remember this entry's offset in %g1. */
164 mov %g2, %g1
165
166 /* Copy 64-byte queue entry into kernel buffer. */
167 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
168 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
169 add %g2, 0x08, %g2
170 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
171 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
172 add %g2, 0x08, %g2
173 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
174 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
175 add %g2, 0x08, %g2
176 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
177 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
178 add %g2, 0x08, %g2
179 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
180 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
181 add %g2, 0x08, %g2
182 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
183 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
184 add %g2, 0x08, %g2
185 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
186 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
187 add %g2, 0x08, %g2
188 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
189 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
190 add %g2, 0x08, %g2
191
192 /* Update queue head pointer. */
193 sethi %hi(8192 - 1), %g4
194 or %g4, %lo(8192 - 1), %g4
195 and %g2, %g4, %g2
196
197 mov INTRQ_RESUM_MONDO_HEAD, %g4
198 stxa %g2, [%g4] ASI_QUEUE
199 membar #Sync
200
201 /* Disable interrupts and save register state so we can call
202 * C code. The etrap handling will leave %g4 in %l4 for us
203 * when it's done.
204 */
205 rdpr %pil, %g2
206 wrpr %g0, 15, %pil
207 mov %g1, %g4
208 ba,pt %xcc, etrap_irq
209 rd %pc, %g7
210
211 /* Log the event. */
212 add %sp, PTREGS_OFF, %o0
213 call sun4v_resum_error
214 mov %l4, %o1
215
216 /* Return from trap. */
217 ba,pt %xcc, rtrap_irq
218 nop
219
220sun4v_res_mondo_queue_empty:
221 retry
222
223sun4v_res_mondo_queue_full:
224 /* The queue is full, consolidate our damage by setting
225 * the head equal to the tail. We'll just trap again otherwise.
226 * Call C code to log the event.
227 */
228 mov INTRQ_RESUM_MONDO_HEAD, %g2
229 stxa %g4, [%g2] ASI_QUEUE
230 membar #Sync
231
232 rdpr %pil, %g2
233 wrpr %g0, 15, %pil
234 ba,pt %xcc, etrap_irq
235 rd %pc, %g7
236
237 call sun4v_resum_overflow
238 add %sp, PTREGS_OFF, %o0
239
240 ba,pt %xcc, rtrap_irq
241 nop
242
243sun4v_nonres_mondo:
244 /* Head offset in %g2, tail offset in %g4. */
245 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
246 ldxa [%g2] ASI_QUEUE, %g2
247 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
248 ldxa [%g4] ASI_QUEUE, %g4
249 cmp %g2, %g4
250 be,pn %xcc, sun4v_nonres_mondo_queue_empty
251 nop
252
253 /* Get &trap_block[smp_processor_id()] into %g3. */
254 __GET_CPUID(%g1)
255 sethi %hi(trap_block), %g3
256 sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
257 or %g3, %lo(trap_block), %g3
258 add %g3, %g7, %g3
259
260 /* Get RES mondo queue base phys address into %g5. */
261 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
262
263 /* Get RES kernel buffer base phys address into %g7. */
264 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
265
266 /* If the first word is non-zero, queue is full. */
267 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
268 brnz,pn %g1, sun4v_nonres_mondo_queue_full
269 nop
270
271 /* Remember this entry's offset in %g1. */
272 mov %g2, %g1
273
274 /* Copy 64-byte queue entry into kernel buffer. */
275 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
276 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
277 add %g2, 0x08, %g2
278 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
279 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
280 add %g2, 0x08, %g2
281 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
282 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
283 add %g2, 0x08, %g2
284 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
285 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
286 add %g2, 0x08, %g2
287 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
288 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
289 add %g2, 0x08, %g2
290 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
291 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
292 add %g2, 0x08, %g2
293 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
294 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
295 add %g2, 0x08, %g2
296 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
297 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
298 add %g2, 0x08, %g2
299
300 /* Update queue head pointer. */
301 sethi %hi(8192 - 1), %g4
302 or %g4, %lo(8192 - 1), %g4
303 and %g2, %g4, %g2
304
305 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
306 stxa %g2, [%g4] ASI_QUEUE
307 membar #Sync
308
309 /* Disable interrupts and save register state so we can call
310 * C code. The etrap handling will leave %g4 in %l4 for us
311 * when it's done.
312 */
313 rdpr %pil, %g2
314 wrpr %g0, 15, %pil
315 mov %g1, %g4
316 ba,pt %xcc, etrap_irq
317 rd %pc, %g7
318
319 /* Log the event. */
320 add %sp, PTREGS_OFF, %o0
321 call sun4v_nonresum_error
322 mov %l4, %o1
323
324 /* Return from trap. */
325 ba,pt %xcc, rtrap_irq
326 nop
327
328sun4v_nonres_mondo_queue_empty:
329 retry
330
331sun4v_nonres_mondo_queue_full:
332 /* The queue is full, consolidate our damage by setting
333 * the head equal to the tail. We'll just trap again otherwise.
334 * Call C code to log the event.
335 */
336 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
337 stxa %g4, [%g2] ASI_QUEUE
338 membar #Sync
339
340 rdpr %pil, %g2
341 wrpr %g0, 15, %pil
342 ba,pt %xcc, etrap_irq
343 rd %pc, %g7
344
345 call sun4v_nonresum_overflow
346 add %sp, PTREGS_OFF, %o0
347
348 ba,pt %xcc, rtrap_irq
349 nop
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 8f3fce24359d..5417ff1b9345 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1668,6 +1668,186 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1668 regs->tpc); 1668 regs->tpc);
1669} 1669}
1670 1670
1671struct sun4v_error_entry {
1672 u64 err_handle;
1673 u64 err_stick;
1674
1675 u32 err_type;
1676#define SUN4V_ERR_TYPE_UNDEFINED 0
1677#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1678#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1679#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1680#define SUN4V_ERR_TYPE_WARNING_RES 4
1681
1682 u32 err_attrs;
1683#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1684#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1685#define SUN4V_ERR_ATTRS_PIO 0x00000004
1686#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1687#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1688#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1689#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1690#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1691
1692 u64 err_raddr;
1693 u32 err_size;
1694 u16 err_cpu;
1695 u16 err_pad;
1696};
1697
1698static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1699static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1700
1701static const char *sun4v_err_type_to_str(u32 type)
1702{
1703 switch (type) {
1704 case SUN4V_ERR_TYPE_UNDEFINED:
1705 return "undefined";
1706 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1707 return "uncorrected resumable";
1708 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1709 return "precise nonresumable";
1710 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1711 return "deferred nonresumable";
1712 case SUN4V_ERR_TYPE_WARNING_RES:
1713 return "warning resumable";
1714 default:
1715 return "unknown";
1716 };
1717}
1718
1719static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1720{
1721 int cnt;
1722
1723 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1724 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1725 pfx,
1726 ent->err_handle, ent->err_stick,
1727 ent->err_type,
1728 sun4v_err_type_to_str(ent->err_type));
1729 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1730 pfx,
1731 ent->err_attrs,
1732 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1733 "processor" : ""),
1734 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1735 "memory" : ""),
1736 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1737 "pio" : ""),
1738 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1739 "integer-regs" : ""),
1740 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1741 "fpu-regs" : ""),
1742 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1743 "user" : ""),
1744 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1745 "privileged" : ""),
1746 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1747 "queue-full" : ""));
1748 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1749 pfx,
1750 ent->err_raddr, ent->err_size, ent->err_cpu);
1751
1752 if ((cnt = atomic_read(ocnt)) != 0) {
1753 atomic_set(ocnt, 0);
1754 wmb();
1755 printk("%s: Queue overflowed %d times.\n",
1756 pfx, cnt);
1757 }
1758}
1759
1760/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1761 * Log the event and clear the first word of the entry.
1762 */
1763void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1764{
1765 struct sun4v_error_entry *ent, local_copy;
1766 struct trap_per_cpu *tb;
1767 unsigned long paddr;
1768 int cpu;
1769
1770 cpu = get_cpu();
1771
1772 tb = &trap_block[cpu];
1773 paddr = tb->resum_kernel_buf_pa + offset;
1774 ent = __va(paddr);
1775
1776 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1777
1778 /* We have a local copy now, so release the entry. */
1779 ent->err_handle = 0;
1780 wmb();
1781
1782 put_cpu();
1783
1784 sun4v_log_error(&local_copy, cpu,
1785 KERN_ERR "RESUMABLE ERROR",
1786 &sun4v_resum_oflow_cnt);
1787}
1788
1789/* If we try to printk() we'll probably make matters worse, by trying
1790 * to retake locks this cpu already holds or causing more errors. So
1791 * just bump a counter, and we'll report these counter bumps above.
1792 */
1793void sun4v_resum_overflow(struct pt_regs *regs)
1794{
1795 atomic_inc(&sun4v_resum_oflow_cnt);
1796}
1797
1798/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1799 * Log the event, clear the first word of the entry, and die.
1800 */
1801void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1802{
1803 struct sun4v_error_entry *ent, local_copy;
1804 struct trap_per_cpu *tb;
1805 unsigned long paddr;
1806 int cpu;
1807
1808 cpu = get_cpu();
1809
1810 tb = &trap_block[cpu];
1811 paddr = tb->nonresum_kernel_buf_pa + offset;
1812 ent = __va(paddr);
1813
1814 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1815
1816 /* We have a local copy now, so release the entry. */
1817 ent->err_handle = 0;
1818 wmb();
1819
1820 put_cpu();
1821
1822#ifdef CONFIG_PCI
1823 /* Check for the special PCI poke sequence. */
1824 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1825 pci_poke_faulted = 1;
1826 regs->tpc += 4;
1827 regs->tnpc = regs->tpc + 4;
1828 return;
1829 }
1830#endif
1831
1832 sun4v_log_error(&local_copy, cpu,
1833 KERN_EMERG "NON-RESUMABLE ERROR",
1834 &sun4v_nonresum_oflow_cnt);
1835
1836 panic("Non-resumable error.");
1837}
1838
1839/* If we try to printk() we'll probably make matters worse, by trying
1840 * to retake locks this cpu already holds or causing more errors. So
1841 * just bump a counter, and we'll report these counter bumps above.
1842 */
1843void sun4v_nonresum_overflow(struct pt_regs *regs)
1844{
1845 /* XXX Actually even this can make not that much sense. Perhaps
1846 * XXX we should just pull the plug and panic directly from here?
1847 */
1848 atomic_inc(&sun4v_nonresum_oflow_cnt);
1849}
1850
1671void do_fpe_common(struct pt_regs *regs) 1851void do_fpe_common(struct pt_regs *regs)
1672{ 1852{
1673 if (regs->tstate & TSTATE_PRIV) { 1853 if (regs->tstate & TSTATE_PRIV) {
@@ -2190,8 +2370,12 @@ void __init trap_init(void)
2190 offsetof(struct trap_per_cpu, dev_mondo_pa)) || 2370 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2191 (TRAP_PER_CPU_RESUM_MONDO_PA != 2371 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2192 offsetof(struct trap_per_cpu, resum_mondo_pa)) || 2372 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2373 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2374 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2193 (TRAP_PER_CPU_NONRESUM_MONDO_PA != 2375 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2194 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || 2376 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2377 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2378 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2195 (TRAP_PER_CPU_FAULT_INFO != 2379 (TRAP_PER_CPU_FAULT_INFO !=
2196 offsetof(struct trap_per_cpu, fault_info))) 2380 offsetof(struct trap_per_cpu, fault_info)))
2197 trap_per_cpu_offsets_are_bolixed_dave(); 2381 trap_per_cpu_offsets_are_bolixed_dave();
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 2679b6e253ae..1608ba4bf1c1 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -88,7 +88,10 @@ tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */
88tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ 88tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */
89tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) 89tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
90tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) 90tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
91tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f) 91tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo)
92tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo)
93tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo)
94tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo)
92tl0_s0n: SPILL_0_NORMAL 95tl0_s0n: SPILL_0_NORMAL
93tl0_s1n: SPILL_1_NORMAL 96tl0_s1n: SPILL_1_NORMAL
94tl0_s2n: SPILL_2_NORMAL 97tl0_s2n: SPILL_2_NORMAL