aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/traps.c')
-rw-r--r--arch/sparc64/kernel/traps.c439
1 files changed, 416 insertions, 23 deletions
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 8d44ae5a15e3..ff090bb9734b 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -38,22 +38,24 @@
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/kdebug.h> 40#include <asm/kdebug.h>
41#include <asm/head.h>
41#ifdef CONFIG_KMOD 42#ifdef CONFIG_KMOD
42#include <linux/kmod.h> 43#include <linux/kmod.h>
43#endif 44#endif
44 45
45struct notifier_block *sparc64die_chain; 46ATOMIC_NOTIFIER_HEAD(sparc64die_chain);
46static DEFINE_SPINLOCK(die_notifier_lock);
47 47
48int register_die_notifier(struct notifier_block *nb) 48int register_die_notifier(struct notifier_block *nb)
49{ 49{
50 int err = 0; 50 return atomic_notifier_chain_register(&sparc64die_chain, nb);
51 unsigned long flags;
52 spin_lock_irqsave(&die_notifier_lock, flags);
53 err = notifier_chain_register(&sparc64die_chain, nb);
54 spin_unlock_irqrestore(&die_notifier_lock, flags);
55 return err;
56} 51}
52EXPORT_SYMBOL(register_die_notifier);
53
54int unregister_die_notifier(struct notifier_block *nb)
55{
56 return atomic_notifier_chain_unregister(&sparc64die_chain, nb);
57}
58EXPORT_SYMBOL(unregister_die_notifier);
57 59
58/* When an irrecoverable trap occurs at tl > 0, the trap entry 60/* When an irrecoverable trap occurs at tl > 0, the trap entry
59 * code logs the trap state registers at every level in the trap 61 * code logs the trap state registers at every level in the trap
@@ -72,12 +74,14 @@ struct tl1_traplog {
72 74
73static void dump_tl1_traplog(struct tl1_traplog *p) 75static void dump_tl1_traplog(struct tl1_traplog *p)
74{ 76{
75 int i; 77 int i, limit;
78
79 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
80 "dumping track stack.\n", p->tl);
76 81
77 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n", 82 limit = (tlb_type == hypervisor) ? 2 : 4;
78 p->tl); 83 for (i = 0; i < limit; i++) {
79 for (i = 0; i < 4; i++) { 84 printk(KERN_EMERG
80 printk(KERN_CRIT
81 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " 85 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
82 "TNPC[%016lx] TT[%lx]\n", 86 "TNPC[%016lx] TT[%lx]\n",
83 i + 1, 87 i + 1,
@@ -179,6 +183,45 @@ void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr
179 spitfire_insn_access_exception(regs, sfsr, sfar); 183 spitfire_insn_access_exception(regs, sfsr, sfar);
180} 184}
181 185
186void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
187{
188 unsigned short type = (type_ctx >> 16);
189 unsigned short ctx = (type_ctx & 0xffff);
190 siginfo_t info;
191
192 if (notify_die(DIE_TRAP, "instruction access exception", regs,
193 0, 0x8, SIGTRAP) == NOTIFY_STOP)
194 return;
195
196 if (regs->tstate & TSTATE_PRIV) {
197 printk("sun4v_insn_access_exception: ADDR[%016lx] "
198 "CTX[%04x] TYPE[%04x], going.\n",
199 addr, ctx, type);
200 die_if_kernel("Iax", regs);
201 }
202
203 if (test_thread_flag(TIF_32BIT)) {
204 regs->tpc &= 0xffffffff;
205 regs->tnpc &= 0xffffffff;
206 }
207 info.si_signo = SIGSEGV;
208 info.si_errno = 0;
209 info.si_code = SEGV_MAPERR;
210 info.si_addr = (void __user *) addr;
211 info.si_trapno = 0;
212 force_sig_info(SIGSEGV, &info, current);
213}
214
215void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
216{
217 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
218 0, 0x8, SIGTRAP) == NOTIFY_STOP)
219 return;
220
221 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
222 sun4v_insn_access_exception(regs, addr, type_ctx);
223}
224
182void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) 225void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
183{ 226{
184 siginfo_t info; 227 siginfo_t info;
@@ -227,6 +270,45 @@ void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr
227 spitfire_data_access_exception(regs, sfsr, sfar); 270 spitfire_data_access_exception(regs, sfsr, sfar);
228} 271}
229 272
273void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
274{
275 unsigned short type = (type_ctx >> 16);
276 unsigned short ctx = (type_ctx & 0xffff);
277 siginfo_t info;
278
279 if (notify_die(DIE_TRAP, "data access exception", regs,
280 0, 0x8, SIGTRAP) == NOTIFY_STOP)
281 return;
282
283 if (regs->tstate & TSTATE_PRIV) {
284 printk("sun4v_data_access_exception: ADDR[%016lx] "
285 "CTX[%04x] TYPE[%04x], going.\n",
286 addr, ctx, type);
287 die_if_kernel("Dax", regs);
288 }
289
290 if (test_thread_flag(TIF_32BIT)) {
291 regs->tpc &= 0xffffffff;
292 regs->tnpc &= 0xffffffff;
293 }
294 info.si_signo = SIGSEGV;
295 info.si_errno = 0;
296 info.si_code = SEGV_MAPERR;
297 info.si_addr = (void __user *) addr;
298 info.si_trapno = 0;
299 force_sig_info(SIGSEGV, &info, current);
300}
301
302void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
303{
304 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
305 0, 0x8, SIGTRAP) == NOTIFY_STOP)
306 return;
307
308 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
309 sun4v_data_access_exception(regs, addr, type_ctx);
310}
311
230#ifdef CONFIG_PCI 312#ifdef CONFIG_PCI
231/* This is really pathetic... */ 313/* This is really pathetic... */
232extern volatile int pci_poke_in_progress; 314extern volatile int pci_poke_in_progress;
@@ -788,7 +870,8 @@ void __init cheetah_ecache_flush_init(void)
788 cheetah_error_log[i].afsr = CHAFSR_INVALID; 870 cheetah_error_log[i].afsr = CHAFSR_INVALID;
789 871
790 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 872 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
791 if ((ver >> 32) == 0x003e0016) { 873 if ((ver >> 32) == __JALAPENO_ID ||
874 (ver >> 32) == __SERRANO_ID) {
792 cheetah_error_table = &__jalapeno_error_table[0]; 875 cheetah_error_table = &__jalapeno_error_table[0];
793 cheetah_afsr_errors = JPAFSR_ERRORS; 876 cheetah_afsr_errors = JPAFSR_ERRORS;
794 } else if ((ver >> 32) == 0x003e0015) { 877 } else if ((ver >> 32) == 0x003e0015) {
@@ -1666,6 +1749,238 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1666 regs->tpc); 1749 regs->tpc);
1667} 1750}
1668 1751
1752struct sun4v_error_entry {
1753 u64 err_handle;
1754 u64 err_stick;
1755
1756 u32 err_type;
1757#define SUN4V_ERR_TYPE_UNDEFINED 0
1758#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1759#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1760#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1761#define SUN4V_ERR_TYPE_WARNING_RES 4
1762
1763 u32 err_attrs;
1764#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1765#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1766#define SUN4V_ERR_ATTRS_PIO 0x00000004
1767#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1768#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1769#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1770#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1771#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1772
1773 u64 err_raddr;
1774 u32 err_size;
1775 u16 err_cpu;
1776 u16 err_pad;
1777};
1778
1779static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1780static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1781
1782static const char *sun4v_err_type_to_str(u32 type)
1783{
1784 switch (type) {
1785 case SUN4V_ERR_TYPE_UNDEFINED:
1786 return "undefined";
1787 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1788 return "uncorrected resumable";
1789 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1790 return "precise nonresumable";
1791 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1792 return "deferred nonresumable";
1793 case SUN4V_ERR_TYPE_WARNING_RES:
1794 return "warning resumable";
1795 default:
1796 return "unknown";
1797 };
1798}
1799
1800static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1801{
1802 int cnt;
1803
1804 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1805 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1806 pfx,
1807 ent->err_handle, ent->err_stick,
1808 ent->err_type,
1809 sun4v_err_type_to_str(ent->err_type));
1810 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1811 pfx,
1812 ent->err_attrs,
1813 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1814 "processor" : ""),
1815 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1816 "memory" : ""),
1817 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1818 "pio" : ""),
1819 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1820 "integer-regs" : ""),
1821 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1822 "fpu-regs" : ""),
1823 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1824 "user" : ""),
1825 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1826 "privileged" : ""),
1827 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1828 "queue-full" : ""));
1829 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1830 pfx,
1831 ent->err_raddr, ent->err_size, ent->err_cpu);
1832
1833 if ((cnt = atomic_read(ocnt)) != 0) {
1834 atomic_set(ocnt, 0);
1835 wmb();
1836 printk("%s: Queue overflowed %d times.\n",
1837 pfx, cnt);
1838 }
1839}
1840
1841/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1842 * Log the event and clear the first word of the entry.
1843 */
1844void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1845{
1846 struct sun4v_error_entry *ent, local_copy;
1847 struct trap_per_cpu *tb;
1848 unsigned long paddr;
1849 int cpu;
1850
1851 cpu = get_cpu();
1852
1853 tb = &trap_block[cpu];
1854 paddr = tb->resum_kernel_buf_pa + offset;
1855 ent = __va(paddr);
1856
1857 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1858
1859 /* We have a local copy now, so release the entry. */
1860 ent->err_handle = 0;
1861 wmb();
1862
1863 put_cpu();
1864
1865 sun4v_log_error(&local_copy, cpu,
1866 KERN_ERR "RESUMABLE ERROR",
1867 &sun4v_resum_oflow_cnt);
1868}
1869
1870/* If we try to printk() we'll probably make matters worse, by trying
1871 * to retake locks this cpu already holds or causing more errors. So
1872 * just bump a counter, and we'll report these counter bumps above.
1873 */
1874void sun4v_resum_overflow(struct pt_regs *regs)
1875{
1876 atomic_inc(&sun4v_resum_oflow_cnt);
1877}
1878
1879/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1880 * Log the event, clear the first word of the entry, and die.
1881 */
1882void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1883{
1884 struct sun4v_error_entry *ent, local_copy;
1885 struct trap_per_cpu *tb;
1886 unsigned long paddr;
1887 int cpu;
1888
1889 cpu = get_cpu();
1890
1891 tb = &trap_block[cpu];
1892 paddr = tb->nonresum_kernel_buf_pa + offset;
1893 ent = __va(paddr);
1894
1895 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1896
1897 /* We have a local copy now, so release the entry. */
1898 ent->err_handle = 0;
1899 wmb();
1900
1901 put_cpu();
1902
1903#ifdef CONFIG_PCI
1904 /* Check for the special PCI poke sequence. */
1905 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1906 pci_poke_faulted = 1;
1907 regs->tpc += 4;
1908 regs->tnpc = regs->tpc + 4;
1909 return;
1910 }
1911#endif
1912
1913 sun4v_log_error(&local_copy, cpu,
1914 KERN_EMERG "NON-RESUMABLE ERROR",
1915 &sun4v_nonresum_oflow_cnt);
1916
1917 panic("Non-resumable error.");
1918}
1919
1920/* If we try to printk() we'll probably make matters worse, by trying
1921 * to retake locks this cpu already holds or causing more errors. So
1922 * just bump a counter, and we'll report these counter bumps above.
1923 */
1924void sun4v_nonresum_overflow(struct pt_regs *regs)
1925{
1926 /* XXX Actually even this can make not that much sense. Perhaps
1927 * XXX we should just pull the plug and panic directly from here?
1928 */
1929 atomic_inc(&sun4v_nonresum_oflow_cnt);
1930}
1931
1932unsigned long sun4v_err_itlb_vaddr;
1933unsigned long sun4v_err_itlb_ctx;
1934unsigned long sun4v_err_itlb_pte;
1935unsigned long sun4v_err_itlb_error;
1936
1937void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1938{
1939 if (tl > 1)
1940 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1941
1942 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1943 regs->tpc, tl);
1944 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1945 "pte[%lx] error[%lx]\n",
1946 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1947 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1948
1949 prom_halt();
1950}
1951
1952unsigned long sun4v_err_dtlb_vaddr;
1953unsigned long sun4v_err_dtlb_ctx;
1954unsigned long sun4v_err_dtlb_pte;
1955unsigned long sun4v_err_dtlb_error;
1956
1957void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1958{
1959 if (tl > 1)
1960 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1961
1962 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1963 regs->tpc, tl);
1964 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1965 "pte[%lx] error[%lx]\n",
1966 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1967 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1968
1969 prom_halt();
1970}
1971
1972void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1973{
1974 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1975 err, op);
1976}
1977
1978void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1979{
1980 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1981 err, op);
1982}
1983
1669void do_fpe_common(struct pt_regs *regs) 1984void do_fpe_common(struct pt_regs *regs)
1670{ 1985{
1671 if (regs->tstate & TSTATE_PRIV) { 1986 if (regs->tstate & TSTATE_PRIV) {
@@ -1924,10 +2239,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
1924 } 2239 }
1925 user_instruction_dump ((unsigned int __user *) regs->tpc); 2240 user_instruction_dump ((unsigned int __user *) regs->tpc);
1926 } 2241 }
2242#if 0
1927#ifdef CONFIG_SMP 2243#ifdef CONFIG_SMP
1928 smp_report_regs(); 2244 smp_report_regs();
1929#endif 2245#endif
1930 2246#endif
1931 if (regs->tstate & TSTATE_PRIV) 2247 if (regs->tstate & TSTATE_PRIV)
1932 do_exit(SIGKILL); 2248 do_exit(SIGKILL);
1933 do_exit(SIGSEGV); 2249 do_exit(SIGSEGV);
@@ -1958,6 +2274,11 @@ void do_illegal_instruction(struct pt_regs *regs)
1958 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { 2274 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1959 if (handle_ldf_stq(insn, regs)) 2275 if (handle_ldf_stq(insn, regs))
1960 return; 2276 return;
2277 } else if (tlb_type == hypervisor) {
2278 extern int vis_emul(struct pt_regs *, unsigned int);
2279
2280 if (!vis_emul(regs, insn))
2281 return;
1961 } 2282 }
1962 } 2283 }
1963 info.si_signo = SIGILL; 2284 info.si_signo = SIGILL;
@@ -1968,6 +2289,8 @@ void do_illegal_instruction(struct pt_regs *regs)
1968 force_sig_info(SIGILL, &info, current); 2289 force_sig_info(SIGILL, &info, current);
1969} 2290}
1970 2291
2292extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2293
1971void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) 2294void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1972{ 2295{
1973 siginfo_t info; 2296 siginfo_t info;
@@ -1977,13 +2300,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
1977 return; 2300 return;
1978 2301
1979 if (regs->tstate & TSTATE_PRIV) { 2302 if (regs->tstate & TSTATE_PRIV) {
1980 extern void kernel_unaligned_trap(struct pt_regs *regs, 2303 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
1981 unsigned int insn,
1982 unsigned long sfar,
1983 unsigned long sfsr);
1984
1985 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1986 sfar, sfsr);
1987 return; 2304 return;
1988 } 2305 }
1989 info.si_signo = SIGBUS; 2306 info.si_signo = SIGBUS;
@@ -1994,6 +2311,26 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
1994 force_sig_info(SIGBUS, &info, current); 2311 force_sig_info(SIGBUS, &info, current);
1995} 2312}
1996 2313
2314void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2315{
2316 siginfo_t info;
2317
2318 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2319 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2320 return;
2321
2322 if (regs->tstate & TSTATE_PRIV) {
2323 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2324 return;
2325 }
2326 info.si_signo = SIGBUS;
2327 info.si_errno = 0;
2328 info.si_code = BUS_ADRALN;
2329 info.si_addr = (void __user *) addr;
2330 info.si_trapno = 0;
2331 force_sig_info(SIGBUS, &info, current);
2332}
2333
1997void do_privop(struct pt_regs *regs) 2334void do_privop(struct pt_regs *regs)
1998{ 2335{
1999 siginfo_t info; 2336 siginfo_t info;
@@ -2130,7 +2467,23 @@ void do_getpsr(struct pt_regs *regs)
2130 } 2467 }
2131} 2468}
2132 2469
2470struct trap_per_cpu trap_block[NR_CPUS];
2471
2472/* This can get invoked before sched_init() so play it super safe
2473 * and use hard_smp_processor_id().
2474 */
2475void init_cur_cpu_trap(struct thread_info *t)
2476{
2477 int cpu = hard_smp_processor_id();
2478 struct trap_per_cpu *p = &trap_block[cpu];
2479
2480 p->thread = t;
2481 p->pgd_paddr = 0;
2482}
2483
2133extern void thread_info_offsets_are_bolixed_dave(void); 2484extern void thread_info_offsets_are_bolixed_dave(void);
2485extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2486extern void tsb_config_offsets_are_bolixed_dave(void);
2134 2487
2135/* Only invoked on boot processor. */ 2488/* Only invoked on boot processor. */
2136void __init trap_init(void) 2489void __init trap_init(void)
@@ -2154,7 +2507,6 @@ void __init trap_init(void)
2154 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || 2507 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2155 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || 2508 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2156 TI_PCR != offsetof(struct thread_info, pcr_reg) || 2509 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2157 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2158 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || 2510 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2159 TI_NEW_CHILD != offsetof(struct thread_info, new_child) || 2511 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2160 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || 2512 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
@@ -2165,6 +2517,47 @@ void __init trap_init(void)
2165 (TI_FPREGS & (64 - 1))) 2517 (TI_FPREGS & (64 - 1)))
2166 thread_info_offsets_are_bolixed_dave(); 2518 thread_info_offsets_are_bolixed_dave();
2167 2519
2520 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2521 (TRAP_PER_CPU_PGD_PADDR !=
2522 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2523 (TRAP_PER_CPU_CPU_MONDO_PA !=
2524 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2525 (TRAP_PER_CPU_DEV_MONDO_PA !=
2526 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2527 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2528 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2529 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2530 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2531 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2532 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2533 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2534 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2535 (TRAP_PER_CPU_FAULT_INFO !=
2536 offsetof(struct trap_per_cpu, fault_info)) ||
2537 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2538 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2539 (TRAP_PER_CPU_CPU_LIST_PA !=
2540 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2541 (TRAP_PER_CPU_TSB_HUGE !=
2542 offsetof(struct trap_per_cpu, tsb_huge)) ||
2543 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2544 offsetof(struct trap_per_cpu, tsb_huge_temp)))
2545 trap_per_cpu_offsets_are_bolixed_dave();
2546
2547 if ((TSB_CONFIG_TSB !=
2548 offsetof(struct tsb_config, tsb)) ||
2549 (TSB_CONFIG_RSS_LIMIT !=
2550 offsetof(struct tsb_config, tsb_rss_limit)) ||
2551 (TSB_CONFIG_NENTRIES !=
2552 offsetof(struct tsb_config, tsb_nentries)) ||
2553 (TSB_CONFIG_REG_VAL !=
2554 offsetof(struct tsb_config, tsb_reg_val)) ||
2555 (TSB_CONFIG_MAP_VADDR !=
2556 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2557 (TSB_CONFIG_MAP_PTE !=
2558 offsetof(struct tsb_config, tsb_map_pte)))
2559 tsb_config_offsets_are_bolixed_dave();
2560
2168 /* Attach to the address space of init_task. On SMP we 2561 /* Attach to the address space of init_task. On SMP we
2169 * do this in smp.c:smp_callin for other cpus. 2562 * do this in smp.c:smp_callin for other cpus.
2170 */ 2563 */