diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/alpha/kernel |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/alpha/kernel')
76 files changed, 34945 insertions, 0 deletions
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile new file mode 100644 index 000000000000..ab6fa54b3860 --- /dev/null +++ b/arch/alpha/kernel/Makefile | |||
@@ -0,0 +1,104 @@ | |||
1 | # | ||
2 | # Makefile for the linux kernel. | ||
3 | # | ||
4 | |||
5 | extra-y := head.o vmlinux.lds | ||
6 | EXTRA_AFLAGS := $(CFLAGS) | ||
7 | EXTRA_CFLAGS := -Werror -Wno-sign-compare | ||
8 | |||
9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ | ||
10 | irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \ | ||
11 | alpha_ksyms.o systbls.o err_common.o io.o | ||
12 | |||
13 | obj-$(CONFIG_VGA_HOSE) += console.o | ||
14 | obj-$(CONFIG_SMP) += smp.o | ||
15 | obj-$(CONFIG_PCI) += pci.o pci_iommu.o | ||
16 | obj-$(CONFIG_SRM_ENV) += srm_env.o | ||
17 | obj-$(CONFIG_MODULES) += module.o | ||
18 | |||
19 | ifdef CONFIG_ALPHA_GENERIC | ||
20 | |||
21 | obj-y += core_apecs.o core_cia.o core_irongate.o core_lca.o \ | ||
22 | core_mcpcia.o core_polaris.o core_t2.o \ | ||
23 | core_tsunami.o | ||
24 | |||
25 | obj-y += sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eb64p.o sys_eiger.o \ | ||
26 | sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o \ | ||
27 | sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \ | ||
28 | sys_sable.o sys_sio.o sys_sx164.o sys_takara.o | ||
29 | |||
30 | ifndef CONFIG_ALPHA_LEGACY_START_ADDRESS | ||
31 | obj-y += core_marvel.o core_titan.o core_wildfire.o | ||
32 | obj-y += sys_marvel.o sys_titan.o sys_wildfire.o | ||
33 | obj-y += err_ev7.o err_titan.o err_marvel.o | ||
34 | endif | ||
35 | |||
36 | obj-y += irq_pyxis.o irq_i8259.o irq_srm.o | ||
37 | obj-y += err_ev6.o | ||
38 | obj-y += es1888.o smc37c669.o smc37c93x.o ns87312.o gct.o | ||
39 | obj-y += srmcons.o | ||
40 | |||
41 | else | ||
42 | |||
43 | # Misc support | ||
44 | obj-$(CONFIG_ALPHA_SRM) += srmcons.o | ||
45 | |||
46 | # Core logic support | ||
47 | obj-$(CONFIG_ALPHA_APECS) += core_apecs.o | ||
48 | obj-$(CONFIG_ALPHA_CIA) += core_cia.o | ||
49 | obj-$(CONFIG_ALPHA_IRONGATE) += core_irongate.o | ||
50 | obj-$(CONFIG_ALPHA_LCA) += core_lca.o | ||
51 | obj-$(CONFIG_ALPHA_MARVEL) += core_marvel.o gct.o | ||
52 | obj-$(CONFIG_ALPHA_MCPCIA) += core_mcpcia.o | ||
53 | obj-$(CONFIG_ALPHA_POLARIS) += core_polaris.o | ||
54 | obj-$(CONFIG_ALPHA_T2) += core_t2.o | ||
55 | obj-$(CONFIG_ALPHA_TSUNAMI) += core_tsunami.o | ||
56 | obj-$(CONFIG_ALPHA_TITAN) += core_titan.o | ||
57 | obj-$(CONFIG_ALPHA_WILDFIRE) += core_wildfire.o | ||
58 | |||
59 | # Board support | ||
60 | obj-$(CONFIG_ALPHA_ALCOR) += sys_alcor.o irq_i8259.o irq_srm.o | ||
61 | obj-$(CONFIG_ALPHA_CABRIOLET) += sys_cabriolet.o irq_i8259.o irq_srm.o \ | ||
62 | ns87312.o | ||
63 | obj-$(CONFIG_ALPHA_EB164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ | ||
64 | ns87312.o | ||
65 | obj-$(CONFIG_ALPHA_EB66P) += sys_cabriolet.o irq_i8259.o irq_srm.o \ | ||
66 | ns87312.o | ||
67 | obj-$(CONFIG_ALPHA_LX164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ | ||
68 | smc37c93x.o | ||
69 | obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ | ||
70 | smc37c93x.o | ||
71 | obj-$(CONFIG_ALPHA_DP264) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o | ||
72 | obj-$(CONFIG_ALPHA_SHARK) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o | ||
73 | obj-$(CONFIG_ALPHA_TITAN) += sys_titan.o irq_i8259.o smc37c669.o | ||
74 | obj-$(CONFIG_ALPHA_EB64P) += sys_eb64p.o irq_i8259.o | ||
75 | obj-$(CONFIG_ALPHA_EB66) += sys_eb64p.o irq_i8259.o | ||
76 | obj-$(CONFIG_ALPHA_EIGER) += sys_eiger.o irq_i8259.o | ||
77 | obj-$(CONFIG_ALPHA_JENSEN) += sys_jensen.o pci-noop.o irq_i8259.o | ||
78 | obj-$(CONFIG_ALPHA_MARVEL) += sys_marvel.o | ||
79 | obj-$(CONFIG_ALPHA_MIATA) += sys_miata.o irq_pyxis.o irq_i8259.o \ | ||
80 | es1888.o smc37c669.o | ||
81 | obj-$(CONFIG_ALPHA_MIKASA) += sys_mikasa.o irq_i8259.o irq_srm.o | ||
82 | obj-$(CONFIG_ALPHA_NAUTILUS) += sys_nautilus.o irq_i8259.o irq_srm.o | ||
83 | obj-$(CONFIG_ALPHA_NORITAKE) += sys_noritake.o irq_i8259.o | ||
84 | obj-$(CONFIG_ALPHA_RAWHIDE) += sys_rawhide.o irq_i8259.o | ||
85 | obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o | ||
86 | obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o | ||
87 | obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o | ||
88 | obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o | ||
89 | obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o | ||
90 | obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o | ||
91 | obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o | ||
92 | obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o | ||
93 | obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o | ||
94 | obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o irq_pyxis.o irq_i8259.o \ | ||
95 | irq_srm.o smc37c669.o | ||
96 | obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o ns87312.o | ||
97 | obj-$(CONFIG_ALPHA_WILDFIRE) += sys_wildfire.o irq_i8259.o | ||
98 | |||
99 | # Error support | ||
100 | obj-$(CONFIG_ALPHA_MARVEL) += err_ev7.o err_marvel.o | ||
101 | obj-$(CONFIG_ALPHA_NAUTILUS) += err_ev6.o | ||
102 | obj-$(CONFIG_ALPHA_TITAN) += err_ev6.o err_titan.o | ||
103 | |||
104 | endif # GENERIC | ||
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c new file mode 100644 index 000000000000..fc5ef90c4fc9 --- /dev/null +++ b/arch/alpha/kernel/alpha_ksyms.c | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/ksyms.c | ||
3 | * | ||
4 | * Export the alpha-specific functions that are needed for loadable | ||
5 | * modules. | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/user.h> | ||
12 | #include <linux/elfcore.h> | ||
13 | #include <linux/socket.h> | ||
14 | #include <linux/syscalls.h> | ||
15 | #include <linux/in.h> | ||
16 | #include <linux/in6.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/tty.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | |||
23 | #include <asm/io.h> | ||
24 | #include <asm/console.h> | ||
25 | #include <asm/hwrpb.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/processor.h> | ||
28 | #include <asm/checksum.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <asm/fpu.h> | ||
31 | #include <asm/irq.h> | ||
32 | #include <asm/machvec.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/semaphore.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | #include <asm/cacheflush.h> | ||
37 | #include <asm/vga.h> | ||
38 | |||
39 | #define __KERNEL_SYSCALLS__ | ||
40 | #include <asm/unistd.h> | ||
41 | |||
42 | extern struct hwrpb_struct *hwrpb; | ||
43 | extern void dump_thread(struct pt_regs *, struct user *); | ||
44 | extern spinlock_t rtc_lock; | ||
45 | |||
46 | /* these are C runtime functions with special calling conventions: */ | ||
47 | extern void __divl (void); | ||
48 | extern void __reml (void); | ||
49 | extern void __divq (void); | ||
50 | extern void __remq (void); | ||
51 | extern void __divlu (void); | ||
52 | extern void __remlu (void); | ||
53 | extern void __divqu (void); | ||
54 | extern void __remqu (void); | ||
55 | |||
56 | EXPORT_SYMBOL(alpha_mv); | ||
57 | EXPORT_SYMBOL(enable_irq); | ||
58 | EXPORT_SYMBOL(disable_irq); | ||
59 | EXPORT_SYMBOL(disable_irq_nosync); | ||
60 | EXPORT_SYMBOL(probe_irq_mask); | ||
61 | EXPORT_SYMBOL(screen_info); | ||
62 | EXPORT_SYMBOL(perf_irq); | ||
63 | EXPORT_SYMBOL(callback_getenv); | ||
64 | EXPORT_SYMBOL(callback_setenv); | ||
65 | EXPORT_SYMBOL(callback_save_env); | ||
66 | #ifdef CONFIG_ALPHA_GENERIC | ||
67 | EXPORT_SYMBOL(alpha_using_srm); | ||
68 | #endif /* CONFIG_ALPHA_GENERIC */ | ||
69 | |||
70 | /* platform dependent support */ | ||
71 | EXPORT_SYMBOL(strcat); | ||
72 | EXPORT_SYMBOL(strcmp); | ||
73 | EXPORT_SYMBOL(strcpy); | ||
74 | EXPORT_SYMBOL(strlen); | ||
75 | EXPORT_SYMBOL(strncmp); | ||
76 | EXPORT_SYMBOL(strncpy); | ||
77 | EXPORT_SYMBOL(strnlen); | ||
78 | EXPORT_SYMBOL(strncat); | ||
79 | EXPORT_SYMBOL(strstr); | ||
80 | EXPORT_SYMBOL(strpbrk); | ||
81 | EXPORT_SYMBOL(strchr); | ||
82 | EXPORT_SYMBOL(strrchr); | ||
83 | EXPORT_SYMBOL(memcmp); | ||
84 | EXPORT_SYMBOL(memmove); | ||
85 | EXPORT_SYMBOL(memscan); | ||
86 | EXPORT_SYMBOL(__memcpy); | ||
87 | EXPORT_SYMBOL(__memset); | ||
88 | EXPORT_SYMBOL(__memsetw); | ||
89 | EXPORT_SYMBOL(__constant_c_memset); | ||
90 | EXPORT_SYMBOL(copy_page); | ||
91 | EXPORT_SYMBOL(clear_page); | ||
92 | |||
93 | EXPORT_SYMBOL(__direct_map_base); | ||
94 | EXPORT_SYMBOL(__direct_map_size); | ||
95 | |||
96 | #ifdef CONFIG_PCI | ||
97 | EXPORT_SYMBOL(pci_alloc_consistent); | ||
98 | EXPORT_SYMBOL(pci_free_consistent); | ||
99 | EXPORT_SYMBOL(pci_map_single); | ||
100 | EXPORT_SYMBOL(pci_map_page); | ||
101 | EXPORT_SYMBOL(pci_unmap_single); | ||
102 | EXPORT_SYMBOL(pci_unmap_page); | ||
103 | EXPORT_SYMBOL(pci_map_sg); | ||
104 | EXPORT_SYMBOL(pci_unmap_sg); | ||
105 | EXPORT_SYMBOL(pci_dma_supported); | ||
106 | EXPORT_SYMBOL(pci_dac_dma_supported); | ||
107 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
108 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
109 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
110 | EXPORT_SYMBOL(alpha_gendev_to_pci); | ||
111 | #endif | ||
112 | EXPORT_SYMBOL(dma_set_mask); | ||
113 | |||
114 | EXPORT_SYMBOL(dump_thread); | ||
115 | EXPORT_SYMBOL(dump_elf_thread); | ||
116 | EXPORT_SYMBOL(dump_elf_task); | ||
117 | EXPORT_SYMBOL(dump_elf_task_fp); | ||
118 | EXPORT_SYMBOL(hwrpb); | ||
119 | EXPORT_SYMBOL(start_thread); | ||
120 | EXPORT_SYMBOL(alpha_read_fp_reg); | ||
121 | EXPORT_SYMBOL(alpha_read_fp_reg_s); | ||
122 | EXPORT_SYMBOL(alpha_write_fp_reg); | ||
123 | EXPORT_SYMBOL(alpha_write_fp_reg_s); | ||
124 | |||
125 | /* In-kernel system calls. */ | ||
126 | EXPORT_SYMBOL(kernel_thread); | ||
127 | EXPORT_SYMBOL(sys_open); | ||
128 | EXPORT_SYMBOL(sys_dup); | ||
129 | EXPORT_SYMBOL(sys_exit); | ||
130 | EXPORT_SYMBOL(sys_write); | ||
131 | EXPORT_SYMBOL(sys_read); | ||
132 | EXPORT_SYMBOL(sys_lseek); | ||
133 | EXPORT_SYMBOL(execve); | ||
134 | EXPORT_SYMBOL(sys_setsid); | ||
135 | EXPORT_SYMBOL(sys_wait4); | ||
136 | |||
137 | /* Networking helper routines. */ | ||
138 | EXPORT_SYMBOL(csum_tcpudp_magic); | ||
139 | EXPORT_SYMBOL(ip_compute_csum); | ||
140 | EXPORT_SYMBOL(ip_fast_csum); | ||
141 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
142 | EXPORT_SYMBOL(csum_partial_copy_from_user); | ||
143 | EXPORT_SYMBOL(csum_ipv6_magic); | ||
144 | |||
145 | #ifdef CONFIG_MATHEMU_MODULE | ||
146 | extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long); | ||
147 | extern long (*alpha_fp_emul) (unsigned long pc); | ||
148 | EXPORT_SYMBOL(alpha_fp_emul_imprecise); | ||
149 | EXPORT_SYMBOL(alpha_fp_emul); | ||
150 | #endif | ||
151 | |||
152 | #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK | ||
153 | EXPORT_SYMBOL(__min_ipl); | ||
154 | #endif | ||
155 | |||
156 | /* | ||
157 | * The following are specially called from the uaccess assembly stubs. | ||
158 | */ | ||
159 | EXPORT_SYMBOL(__copy_user); | ||
160 | EXPORT_SYMBOL(__do_clear_user); | ||
161 | EXPORT_SYMBOL(__strncpy_from_user); | ||
162 | EXPORT_SYMBOL(__strnlen_user); | ||
163 | |||
164 | /* Semaphore helper functions. */ | ||
165 | EXPORT_SYMBOL(__down_failed); | ||
166 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
167 | EXPORT_SYMBOL(__up_wakeup); | ||
168 | EXPORT_SYMBOL(down); | ||
169 | EXPORT_SYMBOL(down_interruptible); | ||
170 | EXPORT_SYMBOL(down_trylock); | ||
171 | EXPORT_SYMBOL(up); | ||
172 | |||
173 | /* | ||
174 | * SMP-specific symbols. | ||
175 | */ | ||
176 | |||
177 | #ifdef CONFIG_SMP | ||
178 | EXPORT_SYMBOL(synchronize_irq); | ||
179 | EXPORT_SYMBOL(flush_tlb_mm); | ||
180 | EXPORT_SYMBOL(flush_tlb_range); | ||
181 | EXPORT_SYMBOL(flush_tlb_page); | ||
182 | EXPORT_SYMBOL(smp_imb); | ||
183 | EXPORT_SYMBOL(cpu_data); | ||
184 | EXPORT_SYMBOL(smp_num_cpus); | ||
185 | EXPORT_SYMBOL(smp_call_function); | ||
186 | EXPORT_SYMBOL(smp_call_function_on_cpu); | ||
187 | EXPORT_SYMBOL(_atomic_dec_and_lock); | ||
188 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
189 | EXPORT_SYMBOL(_raw_spin_unlock); | ||
190 | EXPORT_SYMBOL(debug_spin_lock); | ||
191 | EXPORT_SYMBOL(debug_spin_trylock); | ||
192 | #endif | ||
193 | #ifdef CONFIG_DEBUG_RWLOCK | ||
194 | EXPORT_SYMBOL(_raw_write_lock); | ||
195 | EXPORT_SYMBOL(_raw_read_lock); | ||
196 | #endif | ||
197 | EXPORT_SYMBOL(cpu_present_mask); | ||
198 | #endif /* CONFIG_SMP */ | ||
199 | |||
200 | /* | ||
201 | * NUMA specific symbols | ||
202 | */ | ||
203 | #ifdef CONFIG_DISCONTIGMEM | ||
204 | EXPORT_SYMBOL(node_data); | ||
205 | #endif /* CONFIG_DISCONTIGMEM */ | ||
206 | |||
207 | EXPORT_SYMBOL(rtc_lock); | ||
208 | |||
209 | /* | ||
210 | * The following are special because they're not called | ||
211 | * explicitly (the C compiler or assembler generates them in | ||
212 | * response to division operations). Fortunately, their | ||
213 | * interface isn't gonna change any time soon now, so it's OK | ||
214 | * to leave it out of version control. | ||
215 | */ | ||
216 | # undef memcpy | ||
217 | # undef memset | ||
218 | EXPORT_SYMBOL(__divl); | ||
219 | EXPORT_SYMBOL(__divlu); | ||
220 | EXPORT_SYMBOL(__divq); | ||
221 | EXPORT_SYMBOL(__divqu); | ||
222 | EXPORT_SYMBOL(__reml); | ||
223 | EXPORT_SYMBOL(__remlu); | ||
224 | EXPORT_SYMBOL(__remq); | ||
225 | EXPORT_SYMBOL(__remqu); | ||
226 | EXPORT_SYMBOL(memcpy); | ||
227 | EXPORT_SYMBOL(memset); | ||
228 | EXPORT_SYMBOL(memchr); | ||
229 | |||
230 | EXPORT_SYMBOL(get_wchan); | ||
231 | |||
232 | #ifdef CONFIG_ALPHA_IRONGATE | ||
233 | EXPORT_SYMBOL(irongate_ioremap); | ||
234 | EXPORT_SYMBOL(irongate_iounmap); | ||
235 | #endif | ||
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c new file mode 100644 index 000000000000..8f2e5c718b50 --- /dev/null +++ b/arch/alpha/kernel/asm-offsets.c | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed to extract | ||
4 | * and format the required data. | ||
5 | */ | ||
6 | |||
7 | #include <linux/types.h> | ||
8 | #include <linux/stddef.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/ptrace.h> | ||
11 | #include <asm/io.h> | ||
12 | |||
13 | #define DEFINE(sym, val) \ | ||
14 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
15 | |||
16 | #define BLANK() asm volatile("\n->" : : ) | ||
17 | |||
18 | void foo(void) | ||
19 | { | ||
20 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | ||
21 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | ||
22 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | ||
23 | BLANK(); | ||
24 | |||
25 | DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); | ||
26 | DEFINE(TASK_UID, offsetof(struct task_struct, uid)); | ||
27 | DEFINE(TASK_EUID, offsetof(struct task_struct, euid)); | ||
28 | DEFINE(TASK_GID, offsetof(struct task_struct, gid)); | ||
29 | DEFINE(TASK_EGID, offsetof(struct task_struct, egid)); | ||
30 | DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); | ||
31 | DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); | ||
32 | BLANK(); | ||
33 | |||
34 | DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); | ||
35 | DEFINE(PT_PTRACED, PT_PTRACED); | ||
36 | DEFINE(CLONE_VM, CLONE_VM); | ||
37 | DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); | ||
38 | DEFINE(SIGCHLD, SIGCHLD); | ||
39 | BLANK(); | ||
40 | |||
41 | DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache)); | ||
42 | DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register)); | ||
43 | } | ||
diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c new file mode 100644 index 000000000000..cb3e739fbad8 --- /dev/null +++ b/arch/alpha/kernel/console.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/console.c | ||
3 | * | ||
4 | * Architecture-specific specific support for VGA device on | ||
5 | * non-0 I/O hose | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/tty.h> | ||
12 | #include <linux/console.h> | ||
13 | #include <asm/vga.h> | ||
14 | #include <asm/machvec.h> | ||
15 | |||
16 | #ifdef CONFIG_VGA_HOSE | ||
17 | |||
18 | /* | ||
19 | * Externally-visible vga hose bases | ||
20 | */ | ||
21 | unsigned long __vga_hose_io_base = 0; /* base for default hose */ | ||
22 | unsigned long __vga_hose_mem_base = 0; /* base for default hose */ | ||
23 | |||
24 | static struct pci_controller * __init | ||
25 | default_vga_hose_select(struct pci_controller *h1, struct pci_controller *h2) | ||
26 | { | ||
27 | if (h2->index < h1->index) | ||
28 | return h2; | ||
29 | |||
30 | return h1; | ||
31 | } | ||
32 | |||
33 | void __init | ||
34 | set_vga_hose(struct pci_controller *hose) | ||
35 | { | ||
36 | if (hose) { | ||
37 | __vga_hose_io_base = hose->io_space->start; | ||
38 | __vga_hose_mem_base = hose->mem_space->start; | ||
39 | } | ||
40 | } | ||
41 | |||
42 | void __init | ||
43 | locate_and_init_vga(void *(*sel_func)(void *, void *)) | ||
44 | { | ||
45 | struct pci_controller *hose = NULL; | ||
46 | struct pci_dev *dev = NULL; | ||
47 | |||
48 | if (!sel_func) sel_func = (void *)default_vga_hose_select; | ||
49 | |||
50 | for(dev=NULL; (dev=pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, dev));) { | ||
51 | if (!hose) hose = dev->sysdata; | ||
52 | else hose = sel_func(hose, dev->sysdata); | ||
53 | } | ||
54 | |||
55 | /* Did we already inititialize the correct one? */ | ||
56 | if (conswitchp == &vga_con && | ||
57 | __vga_hose_io_base == hose->io_space->start && | ||
58 | __vga_hose_mem_base == hose->mem_space->start) | ||
59 | return; | ||
60 | |||
61 | /* Set the VGA hose and init the new console */ | ||
62 | set_vga_hose(hose); | ||
63 | take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1); | ||
64 | } | ||
65 | |||
66 | #endif | ||
diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c new file mode 100644 index 000000000000..a27ba12ba35e --- /dev/null +++ b/arch/alpha/kernel/core_apecs.c | |||
@@ -0,0 +1,418 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_apecs.c | ||
3 | * | ||
4 | * Rewritten for Apecs from the lca.c from: | ||
5 | * | ||
6 | * Written by David Mosberger (davidm@cs.arizona.edu) with some code | ||
7 | * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit | ||
8 | * bios code. | ||
9 | * | ||
10 | * Code common to all APECS core logic chips. | ||
11 | */ | ||
12 | |||
13 | #define __EXTERN_INLINE inline | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/core_apecs.h> | ||
16 | #undef __EXTERN_INLINE | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/init.h> | ||
21 | |||
22 | #include <asm/ptrace.h> | ||
23 | #include <asm/smp.h> | ||
24 | |||
25 | #include "proto.h" | ||
26 | #include "pci_impl.h" | ||
27 | |||
28 | /* | ||
29 | * NOTE: Herein lie back-to-back mb instructions. They are magic. | ||
30 | * One plausible explanation is that the i/o controller does not properly | ||
31 | * handle the system transaction. Another involves timing. Ho hum. | ||
32 | */ | ||
33 | |||
34 | /* | ||
35 | * BIOS32-style PCI interface: | ||
36 | */ | ||
37 | |||
38 | #define DEBUG_CONFIG 0 | ||
39 | |||
40 | #if DEBUG_CONFIG | ||
41 | # define DBGC(args) printk args | ||
42 | #else | ||
43 | # define DBGC(args) | ||
44 | #endif | ||
45 | |||
46 | #define vuip volatile unsigned int * | ||
47 | |||
48 | /* | ||
49 | * Given a bus, device, and function number, compute resulting | ||
50 | * configuration space address and setup the APECS_HAXR2 register | ||
51 | * accordingly. It is therefore not safe to have concurrent | ||
52 | * invocations to configuration space access routines, but there | ||
53 | * really shouldn't be any need for this. | ||
54 | * | ||
55 | * Type 0: | ||
56 | * | ||
57 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
58 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
59 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
60 | * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0| | ||
61 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
62 | * | ||
63 | * 31:11 Device select bit. | ||
64 | * 10:8 Function number | ||
65 | * 7:2 Register number | ||
66 | * | ||
67 | * Type 1: | ||
68 | * | ||
69 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
70 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
71 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
72 | * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| | ||
73 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
74 | * | ||
75 | * 31:24 reserved | ||
76 | * 23:16 bus number (8 bits = 128 possible buses) | ||
77 | * 15:11 Device number (5 bits) | ||
78 | * 10:8 function number | ||
79 | * 7:2 register number | ||
80 | * | ||
81 | * Notes: | ||
82 | * The function number selects which function of a multi-function device | ||
83 | * (e.g., SCSI and Ethernet). | ||
84 | * | ||
85 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
86 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
87 | * bits. | ||
88 | */ | ||
89 | |||
90 | static int | ||
91 | mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, | ||
92 | unsigned long *pci_addr, unsigned char *type1) | ||
93 | { | ||
94 | unsigned long addr; | ||
95 | u8 bus = pbus->number; | ||
96 | |||
97 | DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," | ||
98 | " pci_addr=0x%p, type1=0x%p)\n", | ||
99 | bus, device_fn, where, pci_addr, type1)); | ||
100 | |||
101 | if (bus == 0) { | ||
102 | int device = device_fn >> 3; | ||
103 | |||
104 | /* type 0 configuration cycle: */ | ||
105 | |||
106 | if (device > 20) { | ||
107 | DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n", | ||
108 | device)); | ||
109 | return -1; | ||
110 | } | ||
111 | |||
112 | *type1 = 0; | ||
113 | addr = (device_fn << 8) | (where); | ||
114 | } else { | ||
115 | /* type 1 configuration cycle: */ | ||
116 | *type1 = 1; | ||
117 | addr = (bus << 16) | (device_fn << 8) | (where); | ||
118 | } | ||
119 | *pci_addr = addr; | ||
120 | DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static unsigned int | ||
125 | conf_read(unsigned long addr, unsigned char type1) | ||
126 | { | ||
127 | unsigned long flags; | ||
128 | unsigned int stat0, value; | ||
129 | unsigned int haxr2 = 0; | ||
130 | |||
131 | local_irq_save(flags); /* avoid getting hit by machine check */ | ||
132 | |||
133 | DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1)); | ||
134 | |||
135 | /* Reset status register to avoid losing errors. */ | ||
136 | stat0 = *(vuip)APECS_IOC_DCSR; | ||
137 | *(vuip)APECS_IOC_DCSR = stat0; | ||
138 | mb(); | ||
139 | DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0)); | ||
140 | |||
141 | /* If Type1 access, must set HAE #2. */ | ||
142 | if (type1) { | ||
143 | haxr2 = *(vuip)APECS_IOC_HAXR2; | ||
144 | mb(); | ||
145 | *(vuip)APECS_IOC_HAXR2 = haxr2 | 1; | ||
146 | DBGC(("conf_read: TYPE1 access\n")); | ||
147 | } | ||
148 | |||
149 | draina(); | ||
150 | mcheck_expected(0) = 1; | ||
151 | mcheck_taken(0) = 0; | ||
152 | mb(); | ||
153 | |||
154 | /* Access configuration space. */ | ||
155 | |||
156 | /* Some SRMs step on these registers during a machine check. */ | ||
157 | asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr) | ||
158 | : "$9", "$10", "$11", "$12", "$13", "$14", "memory"); | ||
159 | |||
160 | if (mcheck_taken(0)) { | ||
161 | mcheck_taken(0) = 0; | ||
162 | value = 0xffffffffU; | ||
163 | mb(); | ||
164 | } | ||
165 | mcheck_expected(0) = 0; | ||
166 | mb(); | ||
167 | |||
168 | #if 1 | ||
169 | /* | ||
170 | * david.rusling@reo.mts.dec.com. This code is needed for the | ||
171 | * EB64+ as it does not generate a machine check (why I don't | ||
172 | * know). When we build kernels for one particular platform | ||
173 | * then we can make this conditional on the type. | ||
174 | */ | ||
175 | draina(); | ||
176 | |||
177 | /* Now look for any errors. */ | ||
178 | stat0 = *(vuip)APECS_IOC_DCSR; | ||
179 | DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0)); | ||
180 | |||
181 | /* Is any error bit set? */ | ||
182 | if (stat0 & 0xffe0U) { | ||
183 | /* If not NDEV, print status. */ | ||
184 | if (!(stat0 & 0x0800)) { | ||
185 | printk("apecs.c:conf_read: got stat0=%x\n", stat0); | ||
186 | } | ||
187 | |||
188 | /* Reset error status. */ | ||
189 | *(vuip)APECS_IOC_DCSR = stat0; | ||
190 | mb(); | ||
191 | wrmces(0x7); /* reset machine check */ | ||
192 | value = 0xffffffff; | ||
193 | } | ||
194 | #endif | ||
195 | |||
196 | /* If Type1 access, must reset HAE #2 so normal IO space ops work. */ | ||
197 | if (type1) { | ||
198 | *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1; | ||
199 | mb(); | ||
200 | } | ||
201 | local_irq_restore(flags); | ||
202 | |||
203 | return value; | ||
204 | } | ||
205 | |||
206 | static void | ||
207 | conf_write(unsigned long addr, unsigned int value, unsigned char type1) | ||
208 | { | ||
209 | unsigned long flags; | ||
210 | unsigned int stat0; | ||
211 | unsigned int haxr2 = 0; | ||
212 | |||
213 | local_irq_save(flags); /* avoid getting hit by machine check */ | ||
214 | |||
215 | /* Reset status register to avoid losing errors. */ | ||
216 | stat0 = *(vuip)APECS_IOC_DCSR; | ||
217 | *(vuip)APECS_IOC_DCSR = stat0; | ||
218 | mb(); | ||
219 | |||
220 | /* If Type1 access, must set HAE #2. */ | ||
221 | if (type1) { | ||
222 | haxr2 = *(vuip)APECS_IOC_HAXR2; | ||
223 | mb(); | ||
224 | *(vuip)APECS_IOC_HAXR2 = haxr2 | 1; | ||
225 | } | ||
226 | |||
227 | draina(); | ||
228 | mcheck_expected(0) = 1; | ||
229 | mb(); | ||
230 | |||
231 | /* Access configuration space. */ | ||
232 | *(vuip)addr = value; | ||
233 | mb(); | ||
234 | mb(); /* magic */ | ||
235 | mcheck_expected(0) = 0; | ||
236 | mb(); | ||
237 | |||
238 | #if 1 | ||
239 | /* | ||
240 | * david.rusling@reo.mts.dec.com. This code is needed for the | ||
241 | * EB64+ as it does not generate a machine check (why I don't | ||
242 | * know). When we build kernels for one particular platform | ||
243 | * then we can make this conditional on the type. | ||
244 | */ | ||
245 | draina(); | ||
246 | |||
247 | /* Now look for any errors. */ | ||
248 | stat0 = *(vuip)APECS_IOC_DCSR; | ||
249 | |||
250 | /* Is any error bit set? */ | ||
251 | if (stat0 & 0xffe0U) { | ||
252 | /* If not NDEV, print status. */ | ||
253 | if (!(stat0 & 0x0800)) { | ||
254 | printk("apecs.c:conf_write: got stat0=%x\n", stat0); | ||
255 | } | ||
256 | |||
257 | /* Reset error status. */ | ||
258 | *(vuip)APECS_IOC_DCSR = stat0; | ||
259 | mb(); | ||
260 | wrmces(0x7); /* reset machine check */ | ||
261 | } | ||
262 | #endif | ||
263 | |||
264 | /* If Type1 access, must reset HAE #2 so normal IO space ops work. */ | ||
265 | if (type1) { | ||
266 | *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1; | ||
267 | mb(); | ||
268 | } | ||
269 | local_irq_restore(flags); | ||
270 | } | ||
271 | |||
272 | static int | ||
273 | apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
274 | int size, u32 *value) | ||
275 | { | ||
276 | unsigned long addr, pci_addr; | ||
277 | unsigned char type1; | ||
278 | long mask; | ||
279 | int shift; | ||
280 | |||
281 | if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) | ||
282 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
283 | |||
284 | mask = (size - 1) * 8; | ||
285 | shift = (where & 3) * 8; | ||
286 | addr = (pci_addr << 5) + mask + APECS_CONF; | ||
287 | *value = conf_read(addr, type1) >> (shift); | ||
288 | return PCIBIOS_SUCCESSFUL; | ||
289 | } | ||
290 | |||
291 | static int | ||
292 | apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
293 | int size, u32 value) | ||
294 | { | ||
295 | unsigned long addr, pci_addr; | ||
296 | unsigned char type1; | ||
297 | long mask; | ||
298 | |||
299 | if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) | ||
300 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
301 | |||
302 | mask = (size - 1) * 8; | ||
303 | addr = (pci_addr << 5) + mask + APECS_CONF; | ||
304 | conf_write(addr, value << ((where & 3) * 8), type1); | ||
305 | return PCIBIOS_SUCCESSFUL; | ||
306 | } | ||
307 | |||
308 | struct pci_ops apecs_pci_ops = | ||
309 | { | ||
310 | .read = apecs_read_config, | ||
311 | .write = apecs_write_config, | ||
312 | }; | ||
313 | |||
314 | void | ||
315 | apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
316 | { | ||
317 | wmb(); | ||
318 | *(vip)APECS_IOC_TBIA = 0; | ||
319 | mb(); | ||
320 | } | ||
321 | |||
322 | void __init | ||
323 | apecs_init_arch(void) | ||
324 | { | ||
325 | struct pci_controller *hose; | ||
326 | |||
327 | /* | ||
328 | * Create our single hose. | ||
329 | */ | ||
330 | |||
331 | pci_isa_hose = hose = alloc_pci_controller(); | ||
332 | hose->io_space = &ioport_resource; | ||
333 | hose->mem_space = &iomem_resource; | ||
334 | hose->index = 0; | ||
335 | |||
336 | hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR; | ||
337 | hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR; | ||
338 | hose->sparse_io_base = APECS_IO - IDENT_ADDR; | ||
339 | hose->dense_io_base = 0; | ||
340 | |||
341 | /* | ||
342 | * Set up the PCI to main memory translation windows. | ||
343 | * | ||
344 | * Window 1 is direct access 1GB at 1GB | ||
345 | * Window 2 is scatter-gather 8MB at 8MB (for isa) | ||
346 | */ | ||
347 | hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); | ||
348 | hose->sg_pci = NULL; | ||
349 | __direct_map_base = 0x40000000; | ||
350 | __direct_map_size = 0x40000000; | ||
351 | |||
352 | *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000; | ||
353 | *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U; | ||
354 | *(vuip)APECS_IOC_TB1R = 0; | ||
355 | |||
356 | *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000; | ||
357 | *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000; | ||
358 | *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1; | ||
359 | |||
360 | apecs_pci_tbi(hose, 0, -1); | ||
361 | |||
362 | /* | ||
363 | * Finally, clear the HAXR2 register, which gets used | ||
364 | * for PCI Config Space accesses. That is the way | ||
365 | * we want to use it, and we do not want to depend on | ||
366 | * what ARC or SRM might have left behind... | ||
367 | */ | ||
368 | *(vuip)APECS_IOC_HAXR2 = 0; | ||
369 | mb(); | ||
370 | } | ||
371 | |||
372 | void | ||
373 | apecs_pci_clr_err(void) | ||
374 | { | ||
375 | unsigned int jd; | ||
376 | |||
377 | jd = *(vuip)APECS_IOC_DCSR; | ||
378 | if (jd & 0xffe0L) { | ||
379 | *(vuip)APECS_IOC_SEAR; | ||
380 | *(vuip)APECS_IOC_DCSR = jd | 0xffe1L; | ||
381 | mb(); | ||
382 | *(vuip)APECS_IOC_DCSR; | ||
383 | } | ||
384 | *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA; | ||
385 | mb(); | ||
386 | *(vuip)APECS_IOC_TBIA; | ||
387 | } | ||
388 | |||
389 | void | ||
390 | apecs_machine_check(unsigned long vector, unsigned long la_ptr, | ||
391 | struct pt_regs * regs) | ||
392 | { | ||
393 | struct el_common *mchk_header; | ||
394 | struct el_apecs_procdata *mchk_procdata; | ||
395 | struct el_apecs_sysdata_mcheck *mchk_sysdata; | ||
396 | |||
397 | mchk_header = (struct el_common *)la_ptr; | ||
398 | |||
399 | mchk_procdata = (struct el_apecs_procdata *) | ||
400 | (la_ptr + mchk_header->proc_offset | ||
401 | - sizeof(mchk_procdata->paltemp)); | ||
402 | |||
403 | mchk_sysdata = (struct el_apecs_sysdata_mcheck *) | ||
404 | (la_ptr + mchk_header->sys_offset); | ||
405 | |||
406 | |||
407 | /* Clear the error before any reporting. */ | ||
408 | mb(); | ||
409 | mb(); /* magic */ | ||
410 | draina(); | ||
411 | apecs_pci_clr_err(); | ||
412 | wrmces(0x7); /* reset machine check pending flag */ | ||
413 | mb(); | ||
414 | |||
415 | process_mcheck_info(vector, la_ptr, regs, "APECS", | ||
416 | (mcheck_expected(0) | ||
417 | && (mchk_sysdata->epic_dcsr & 0x0c00UL))); | ||
418 | } | ||
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c new file mode 100644 index 000000000000..fd563064363c --- /dev/null +++ b/arch/alpha/kernel/core_cia.c | |||
@@ -0,0 +1,1212 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_cia.c | ||
3 | * | ||
4 | * Written by David A Rusling (david.rusling@reo.mts.dec.com). | ||
5 | * December 1995. | ||
6 | * | ||
7 | * Copyright (C) 1995 David A Rusling | ||
8 | * Copyright (C) 1997, 1998 Jay Estabrook | ||
9 | * Copyright (C) 1998, 1999, 2000 Richard Henderson | ||
10 | * | ||
11 | * Code common to all CIA core logic chips. | ||
12 | */ | ||
13 | |||
14 | #define __EXTERN_INLINE inline | ||
15 | #include <asm/io.h> | ||
16 | #include <asm/core_cia.h> | ||
17 | #undef __EXTERN_INLINE | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/bootmem.h> | ||
24 | |||
25 | #include <asm/ptrace.h> | ||
26 | |||
27 | #include "proto.h" | ||
28 | #include "pci_impl.h" | ||
29 | |||
30 | |||
31 | /* | ||
32 | * NOTE: Herein lie back-to-back mb instructions. They are magic. | ||
33 | * One plausible explanation is that the i/o controller does not properly | ||
34 | * handle the system transaction. Another involves timing. Ho hum. | ||
35 | */ | ||
36 | |||
37 | #define DEBUG_CONFIG 0 | ||
38 | #if DEBUG_CONFIG | ||
39 | # define DBGC(args) printk args | ||
40 | #else | ||
41 | # define DBGC(args) | ||
42 | #endif | ||
43 | |||
44 | #define vip volatile int * | ||
45 | |||
46 | /* | ||
47 | * Given a bus, device, and function number, compute resulting | ||
48 | * configuration space address. It is therefore not safe to have | ||
49 | * concurrent invocations to configuration space access routines, but | ||
50 | * there really shouldn't be any need for this. | ||
51 | * | ||
52 | * Type 0: | ||
53 | * | ||
54 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
55 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
56 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
57 | * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| | ||
58 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
59 | * | ||
60 | * 31:11 Device select bit. | ||
61 | * 10:8 Function number | ||
62 | * 7:2 Register number | ||
63 | * | ||
64 | * Type 1: | ||
65 | * | ||
66 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
67 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
68 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
69 | * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| | ||
70 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
71 | * | ||
72 | * 31:24 reserved | ||
73 | * 23:16 bus number (8 bits = 128 possible buses) | ||
74 | * 15:11 Device number (5 bits) | ||
75 | * 10:8 function number | ||
76 | * 7:2 register number | ||
77 | * | ||
78 | * Notes: | ||
79 | * The function number selects which function of a multi-function device | ||
80 | * (e.g., SCSI and Ethernet). | ||
81 | * | ||
82 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
83 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
84 | * bits. | ||
85 | */ | ||
86 | |||
87 | static int | ||
88 | mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where, | ||
89 | unsigned long *pci_addr, unsigned char *type1) | ||
90 | { | ||
91 | u8 bus = bus_dev->number; | ||
92 | |||
93 | *type1 = (bus != 0); | ||
94 | *pci_addr = (bus << 16) | (device_fn << 8) | where; | ||
95 | |||
96 | DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," | ||
97 | " returning address 0x%p\n" | ||
98 | bus, device_fn, where, *pci_addr)); | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static unsigned int | ||
104 | conf_read(unsigned long addr, unsigned char type1) | ||
105 | { | ||
106 | unsigned long flags; | ||
107 | int stat0, value; | ||
108 | int cia_cfg = 0; | ||
109 | |||
110 | DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1)); | ||
111 | local_irq_save(flags); | ||
112 | |||
113 | /* Reset status register to avoid losing errors. */ | ||
114 | stat0 = *(vip)CIA_IOC_CIA_ERR; | ||
115 | *(vip)CIA_IOC_CIA_ERR = stat0; | ||
116 | mb(); | ||
117 | *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */ | ||
118 | |||
119 | /* If Type1 access, must set CIA CFG. */ | ||
120 | if (type1) { | ||
121 | cia_cfg = *(vip)CIA_IOC_CFG; | ||
122 | *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1; | ||
123 | mb(); | ||
124 | *(vip)CIA_IOC_CFG; | ||
125 | } | ||
126 | |||
127 | mb(); | ||
128 | draina(); | ||
129 | mcheck_expected(0) = 1; | ||
130 | mcheck_taken(0) = 0; | ||
131 | mb(); | ||
132 | |||
133 | /* Access configuration space. */ | ||
134 | value = *(vip)addr; | ||
135 | mb(); | ||
136 | mb(); /* magic */ | ||
137 | if (mcheck_taken(0)) { | ||
138 | mcheck_taken(0) = 0; | ||
139 | value = 0xffffffff; | ||
140 | mb(); | ||
141 | } | ||
142 | mcheck_expected(0) = 0; | ||
143 | mb(); | ||
144 | |||
145 | /* If Type1 access, must reset IOC CFG so normal IO space ops work. */ | ||
146 | if (type1) { | ||
147 | *(vip)CIA_IOC_CFG = cia_cfg; | ||
148 | mb(); | ||
149 | *(vip)CIA_IOC_CFG; | ||
150 | } | ||
151 | |||
152 | local_irq_restore(flags); | ||
153 | DBGC(("done\n")); | ||
154 | |||
155 | return value; | ||
156 | } | ||
157 | |||
158 | static void | ||
159 | conf_write(unsigned long addr, unsigned int value, unsigned char type1) | ||
160 | { | ||
161 | unsigned long flags; | ||
162 | int stat0, cia_cfg = 0; | ||
163 | |||
164 | DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1)); | ||
165 | local_irq_save(flags); | ||
166 | |||
167 | /* Reset status register to avoid losing errors. */ | ||
168 | stat0 = *(vip)CIA_IOC_CIA_ERR; | ||
169 | *(vip)CIA_IOC_CIA_ERR = stat0; | ||
170 | mb(); | ||
171 | *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */ | ||
172 | |||
173 | /* If Type1 access, must set CIA CFG. */ | ||
174 | if (type1) { | ||
175 | cia_cfg = *(vip)CIA_IOC_CFG; | ||
176 | *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1; | ||
177 | mb(); | ||
178 | *(vip)CIA_IOC_CFG; | ||
179 | } | ||
180 | |||
181 | mb(); | ||
182 | draina(); | ||
183 | mcheck_expected(0) = 1; | ||
184 | mcheck_taken(0) = 0; | ||
185 | mb(); | ||
186 | |||
187 | /* Access configuration space. */ | ||
188 | *(vip)addr = value; | ||
189 | mb(); | ||
190 | *(vip)addr; /* read back to force the write */ | ||
191 | |||
192 | mcheck_expected(0) = 0; | ||
193 | mb(); | ||
194 | |||
195 | /* If Type1 access, must reset IOC CFG so normal IO space ops work. */ | ||
196 | if (type1) { | ||
197 | *(vip)CIA_IOC_CFG = cia_cfg; | ||
198 | mb(); | ||
199 | *(vip)CIA_IOC_CFG; | ||
200 | } | ||
201 | |||
202 | local_irq_restore(flags); | ||
203 | DBGC(("done\n")); | ||
204 | } | ||
205 | |||
206 | static int | ||
207 | cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, | ||
208 | u32 *value) | ||
209 | { | ||
210 | unsigned long addr, pci_addr; | ||
211 | long mask; | ||
212 | unsigned char type1; | ||
213 | int shift; | ||
214 | |||
215 | if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) | ||
216 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
217 | |||
218 | mask = (size - 1) * 8; | ||
219 | shift = (where & 3) * 8; | ||
220 | addr = (pci_addr << 5) + mask + CIA_CONF; | ||
221 | *value = conf_read(addr, type1) >> (shift); | ||
222 | return PCIBIOS_SUCCESSFUL; | ||
223 | } | ||
224 | |||
225 | static int | ||
226 | cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, | ||
227 | u32 value) | ||
228 | { | ||
229 | unsigned long addr, pci_addr; | ||
230 | long mask; | ||
231 | unsigned char type1; | ||
232 | |||
233 | if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) | ||
234 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
235 | |||
236 | mask = (size - 1) * 8; | ||
237 | addr = (pci_addr << 5) + mask + CIA_CONF; | ||
238 | conf_write(addr, value << ((where & 3) * 8), type1); | ||
239 | return PCIBIOS_SUCCESSFUL; | ||
240 | } | ||
241 | |||
242 | struct pci_ops cia_pci_ops = | ||
243 | { | ||
244 | .read = cia_read_config, | ||
245 | .write = cia_write_config, | ||
246 | }; | ||
247 | |||
248 | /* | ||
249 | * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb. | ||
250 | * It cannot be invalidated. Rather than hard code the pass numbers, | ||
251 | * actually try the tbia to see if it works. | ||
252 | */ | ||
253 | |||
254 | void | ||
255 | cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
256 | { | ||
257 | wmb(); | ||
258 | *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */ | ||
259 | mb(); | ||
260 | *(vip)CIA_IOC_PCI_TBIA; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * On PYXIS, even if the tbia works, we cannot use it. It effectively locks | ||
265 | * the chip (as well as direct write to the tag registers) if there is a | ||
266 | * SG DMA operation in progress. This is true at least for PYXIS rev. 1, | ||
267 | * so always use the method below. | ||
268 | */ | ||
269 | /* | ||
270 | * This is the method NT and NetBSD use. | ||
271 | * | ||
272 | * Allocate mappings, and put the chip into DMA loopback mode to read a | ||
273 | * garbage page. This works by causing TLB misses, causing old entries to | ||
274 | * be purged to make room for the new entries coming in for the garbage page. | ||
275 | */ | ||
276 | |||
277 | #define CIA_BROKEN_TBIA_BASE 0x30000000 | ||
278 | #define CIA_BROKEN_TBIA_SIZE 1024 | ||
279 | |||
280 | /* Always called with interrupts disabled */ | ||
281 | void | ||
282 | cia_pci_tbi_try2(struct pci_controller *hose, | ||
283 | dma_addr_t start, dma_addr_t end) | ||
284 | { | ||
285 | void __iomem *bus_addr; | ||
286 | int ctrl; | ||
287 | |||
288 | /* Put the chip into PCI loopback mode. */ | ||
289 | mb(); | ||
290 | ctrl = *(vip)CIA_IOC_CIA_CTRL; | ||
291 | *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN; | ||
292 | mb(); | ||
293 | *(vip)CIA_IOC_CIA_CTRL; | ||
294 | mb(); | ||
295 | |||
296 | /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on | ||
297 | each read. This forces SG TLB misses. NetBSD claims that the | ||
298 | TLB entries are not quite LRU, meaning that we need to read more | ||
299 | times than there are actual tags. The 2117x docs claim strict | ||
300 | round-robin. Oh well, we've come this far... */ | ||
301 | /* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can | ||
302 | be filled by the TLB misses *only once* after being invalidated | ||
303 | (by tbia or direct write). Next misses won't update them even | ||
304 | though the lock bits are cleared. Tags 4-7 are "quite LRU" though, | ||
305 | so use them and read at window 3 base exactly 4 times. Reading | ||
306 | more sometimes makes the chip crazy. -ink */ | ||
307 | |||
308 | bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4); | ||
309 | |||
310 | cia_readl(bus_addr + 0x00000); | ||
311 | cia_readl(bus_addr + 0x08000); | ||
312 | cia_readl(bus_addr + 0x10000); | ||
313 | cia_readl(bus_addr + 0x18000); | ||
314 | |||
315 | cia_iounmap(bus_addr); | ||
316 | |||
317 | /* Restore normal PCI operation. */ | ||
318 | mb(); | ||
319 | *(vip)CIA_IOC_CIA_CTRL = ctrl; | ||
320 | mb(); | ||
321 | *(vip)CIA_IOC_CIA_CTRL; | ||
322 | mb(); | ||
323 | } | ||
324 | |||
325 | static inline void | ||
326 | cia_prepare_tbia_workaround(int window) | ||
327 | { | ||
328 | unsigned long *ppte, pte; | ||
329 | long i; | ||
330 | |||
331 | /* Use minimal 1K map. */ | ||
332 | ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0); | ||
333 | pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1; | ||
334 | |||
335 | for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i) | ||
336 | ppte[i] = pte; | ||
337 | |||
338 | *(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3; | ||
339 | *(vip)CIA_IOC_PCI_Wn_MASK(window) | ||
340 | = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000; | ||
341 | *(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2; | ||
342 | } | ||
343 | |||
344 | static void __init | ||
345 | verify_tb_operation(void) | ||
346 | { | ||
347 | static int page[PAGE_SIZE/4] | ||
348 | __attribute__((aligned(PAGE_SIZE))) | ||
349 | __initdata = { 0 }; | ||
350 | |||
351 | struct pci_iommu_arena *arena = pci_isa_hose->sg_isa; | ||
352 | int ctrl, addr0, tag0, pte0, data0; | ||
353 | int temp, use_tbia_try2 = 0; | ||
354 | void __iomem *bus_addr; | ||
355 | |||
356 | /* pyxis -- tbia is broken */ | ||
357 | if (pci_isa_hose->dense_io_base) | ||
358 | use_tbia_try2 = 1; | ||
359 | |||
360 | /* Put the chip into PCI loopback mode. */ | ||
361 | mb(); | ||
362 | ctrl = *(vip)CIA_IOC_CIA_CTRL; | ||
363 | *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN; | ||
364 | mb(); | ||
365 | *(vip)CIA_IOC_CIA_CTRL; | ||
366 | mb(); | ||
367 | |||
368 | /* Write a valid entry directly into the TLB registers. */ | ||
369 | |||
370 | addr0 = arena->dma_base; | ||
371 | tag0 = addr0 | 1; | ||
372 | pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1; | ||
373 | |||
374 | *(vip)CIA_IOC_TB_TAGn(0) = tag0; | ||
375 | *(vip)CIA_IOC_TB_TAGn(1) = 0; | ||
376 | *(vip)CIA_IOC_TB_TAGn(2) = 0; | ||
377 | *(vip)CIA_IOC_TB_TAGn(3) = 0; | ||
378 | *(vip)CIA_IOC_TB_TAGn(4) = 0; | ||
379 | *(vip)CIA_IOC_TB_TAGn(5) = 0; | ||
380 | *(vip)CIA_IOC_TB_TAGn(6) = 0; | ||
381 | *(vip)CIA_IOC_TB_TAGn(7) = 0; | ||
382 | *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0; | ||
383 | *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0; | ||
384 | *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0; | ||
385 | *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0; | ||
386 | mb(); | ||
387 | |||
388 | /* Get a usable bus address */ | ||
389 | bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE); | ||
390 | |||
391 | /* First, verify we can read back what we've written. If | ||
392 | this fails, we can't be sure of any of the other testing | ||
393 | we're going to do, so bail. */ | ||
394 | /* ??? Actually, we could do the work with machine checks. | ||
395 | By passing this register update test, we pretty much | ||
396 | guarantee that cia_pci_tbi_try1 works. If this test | ||
397 | fails, cia_pci_tbi_try2 might still work. */ | ||
398 | |||
399 | temp = *(vip)CIA_IOC_TB_TAGn(0); | ||
400 | if (temp != tag0) { | ||
401 | printk("pci: failed tb register update test " | ||
402 | "(tag0 %#x != %#x)\n", temp, tag0); | ||
403 | goto failed; | ||
404 | } | ||
405 | temp = *(vip)CIA_IOC_TB_TAGn(1); | ||
406 | if (temp != 0) { | ||
407 | printk("pci: failed tb register update test " | ||
408 | "(tag1 %#x != 0)\n", temp); | ||
409 | goto failed; | ||
410 | } | ||
411 | temp = *(vip)CIA_IOC_TBn_PAGEm(0,0); | ||
412 | if (temp != pte0) { | ||
413 | printk("pci: failed tb register update test " | ||
414 | "(pte0 %#x != %#x)\n", temp, pte0); | ||
415 | goto failed; | ||
416 | } | ||
417 | printk("pci: passed tb register update test\n"); | ||
418 | |||
419 | /* Second, verify we can actually do I/O through this entry. */ | ||
420 | |||
421 | data0 = 0xdeadbeef; | ||
422 | page[0] = data0; | ||
423 | mcheck_expected(0) = 1; | ||
424 | mcheck_taken(0) = 0; | ||
425 | mb(); | ||
426 | temp = cia_readl(bus_addr); | ||
427 | mb(); | ||
428 | mcheck_expected(0) = 0; | ||
429 | mb(); | ||
430 | if (mcheck_taken(0)) { | ||
431 | printk("pci: failed sg loopback i/o read test (mcheck)\n"); | ||
432 | goto failed; | ||
433 | } | ||
434 | if (temp != data0) { | ||
435 | printk("pci: failed sg loopback i/o read test " | ||
436 | "(%#x != %#x)\n", temp, data0); | ||
437 | goto failed; | ||
438 | } | ||
439 | printk("pci: passed sg loopback i/o read test\n"); | ||
440 | |||
441 | /* Third, try to invalidate the TLB. */ | ||
442 | |||
443 | if (! use_tbia_try2) { | ||
444 | cia_pci_tbi(arena->hose, 0, -1); | ||
445 | temp = *(vip)CIA_IOC_TB_TAGn(0); | ||
446 | if (temp & 1) { | ||
447 | use_tbia_try2 = 1; | ||
448 | printk("pci: failed tbia test; workaround available\n"); | ||
449 | } else { | ||
450 | printk("pci: passed tbia test\n"); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | /* Fourth, verify the TLB snoops the EV5's caches when | ||
455 | doing a tlb fill. */ | ||
456 | |||
457 | data0 = 0x5adda15e; | ||
458 | page[0] = data0; | ||
459 | arena->ptes[4] = pte0; | ||
460 | mcheck_expected(0) = 1; | ||
461 | mcheck_taken(0) = 0; | ||
462 | mb(); | ||
463 | temp = cia_readl(bus_addr + 4*PAGE_SIZE); | ||
464 | mb(); | ||
465 | mcheck_expected(0) = 0; | ||
466 | mb(); | ||
467 | if (mcheck_taken(0)) { | ||
468 | printk("pci: failed pte write cache snoop test (mcheck)\n"); | ||
469 | goto failed; | ||
470 | } | ||
471 | if (temp != data0) { | ||
472 | printk("pci: failed pte write cache snoop test " | ||
473 | "(%#x != %#x)\n", temp, data0); | ||
474 | goto failed; | ||
475 | } | ||
476 | printk("pci: passed pte write cache snoop test\n"); | ||
477 | |||
478 | /* Fifth, verify that a previously invalid PTE entry gets | ||
479 | filled from the page table. */ | ||
480 | |||
481 | data0 = 0xabcdef12; | ||
482 | page[0] = data0; | ||
483 | arena->ptes[5] = pte0; | ||
484 | mcheck_expected(0) = 1; | ||
485 | mcheck_taken(0) = 0; | ||
486 | mb(); | ||
487 | temp = cia_readl(bus_addr + 5*PAGE_SIZE); | ||
488 | mb(); | ||
489 | mcheck_expected(0) = 0; | ||
490 | mb(); | ||
491 | if (mcheck_taken(0)) { | ||
492 | printk("pci: failed valid tag invalid pte reload test " | ||
493 | "(mcheck; workaround available)\n"); | ||
494 | /* Work around this bug by aligning new allocations | ||
495 | on 4 page boundaries. */ | ||
496 | arena->align_entry = 4; | ||
497 | } else if (temp != data0) { | ||
498 | printk("pci: failed valid tag invalid pte reload test " | ||
499 | "(%#x != %#x)\n", temp, data0); | ||
500 | goto failed; | ||
501 | } else { | ||
502 | printk("pci: passed valid tag invalid pte reload test\n"); | ||
503 | } | ||
504 | |||
505 | /* Sixth, verify machine checks are working. Test invalid | ||
506 | pte under the same valid tag as we used above. */ | ||
507 | |||
508 | mcheck_expected(0) = 1; | ||
509 | mcheck_taken(0) = 0; | ||
510 | mb(); | ||
511 | temp = cia_readl(bus_addr + 6*PAGE_SIZE); | ||
512 | mb(); | ||
513 | mcheck_expected(0) = 0; | ||
514 | mb(); | ||
515 | printk("pci: %s pci machine check test\n", | ||
516 | mcheck_taken(0) ? "passed" : "failed"); | ||
517 | |||
518 | /* Clean up after the tests. */ | ||
519 | arena->ptes[4] = 0; | ||
520 | arena->ptes[5] = 0; | ||
521 | |||
522 | if (use_tbia_try2) { | ||
523 | alpha_mv.mv_pci_tbi = cia_pci_tbi_try2; | ||
524 | |||
525 | /* Tags 0-3 must be disabled if we use this workaraund. */ | ||
526 | wmb(); | ||
527 | *(vip)CIA_IOC_TB_TAGn(0) = 2; | ||
528 | *(vip)CIA_IOC_TB_TAGn(1) = 2; | ||
529 | *(vip)CIA_IOC_TB_TAGn(2) = 2; | ||
530 | *(vip)CIA_IOC_TB_TAGn(3) = 2; | ||
531 | |||
532 | printk("pci: tbia workaround enabled\n"); | ||
533 | } | ||
534 | alpha_mv.mv_pci_tbi(arena->hose, 0, -1); | ||
535 | |||
536 | exit: | ||
537 | /* unmap the bus addr */ | ||
538 | cia_iounmap(bus_addr); | ||
539 | |||
540 | /* Restore normal PCI operation. */ | ||
541 | mb(); | ||
542 | *(vip)CIA_IOC_CIA_CTRL = ctrl; | ||
543 | mb(); | ||
544 | *(vip)CIA_IOC_CIA_CTRL; | ||
545 | mb(); | ||
546 | return; | ||
547 | |||
548 | failed: | ||
549 | printk("pci: disabling sg translation window\n"); | ||
550 | *(vip)CIA_IOC_PCI_W0_BASE = 0; | ||
551 | *(vip)CIA_IOC_PCI_W1_BASE = 0; | ||
552 | pci_isa_hose->sg_isa = NULL; | ||
553 | alpha_mv.mv_pci_tbi = NULL; | ||
554 | goto exit; | ||
555 | } | ||
556 | |||
557 | #if defined(ALPHA_RESTORE_SRM_SETUP) | ||
558 | /* Save CIA configuration data as the console had it set up. */ | ||
559 | struct | ||
560 | { | ||
561 | unsigned int hae_mem; | ||
562 | unsigned int hae_io; | ||
563 | unsigned int pci_dac_offset; | ||
564 | unsigned int err_mask; | ||
565 | unsigned int cia_ctrl; | ||
566 | unsigned int cia_cnfg; | ||
567 | struct { | ||
568 | unsigned int w_base; | ||
569 | unsigned int w_mask; | ||
570 | unsigned int t_base; | ||
571 | } window[4]; | ||
572 | } saved_config __attribute((common)); | ||
573 | |||
574 | void | ||
575 | cia_save_srm_settings(int is_pyxis) | ||
576 | { | ||
577 | int i; | ||
578 | |||
579 | /* Save some important registers. */ | ||
580 | saved_config.err_mask = *(vip)CIA_IOC_ERR_MASK; | ||
581 | saved_config.cia_ctrl = *(vip)CIA_IOC_CIA_CTRL; | ||
582 | saved_config.hae_mem = *(vip)CIA_IOC_HAE_MEM; | ||
583 | saved_config.hae_io = *(vip)CIA_IOC_HAE_IO; | ||
584 | saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC; | ||
585 | |||
586 | if (is_pyxis) | ||
587 | saved_config.cia_cnfg = *(vip)CIA_IOC_CIA_CNFG; | ||
588 | else | ||
589 | saved_config.cia_cnfg = 0; | ||
590 | |||
591 | /* Save DMA windows configuration. */ | ||
592 | for (i = 0; i < 4; i++) { | ||
593 | saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i); | ||
594 | saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i); | ||
595 | saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i); | ||
596 | } | ||
597 | mb(); | ||
598 | } | ||
599 | |||
600 | void | ||
601 | cia_restore_srm_settings(void) | ||
602 | { | ||
603 | int i; | ||
604 | |||
605 | for (i = 0; i < 4; i++) { | ||
606 | *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base; | ||
607 | *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask; | ||
608 | *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base; | ||
609 | } | ||
610 | |||
611 | *(vip)CIA_IOC_HAE_MEM = saved_config.hae_mem; | ||
612 | *(vip)CIA_IOC_HAE_IO = saved_config.hae_io; | ||
613 | *(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset; | ||
614 | *(vip)CIA_IOC_ERR_MASK = saved_config.err_mask; | ||
615 | *(vip)CIA_IOC_CIA_CTRL = saved_config.cia_ctrl; | ||
616 | |||
617 | if (saved_config.cia_cnfg) /* Must be pyxis. */ | ||
618 | *(vip)CIA_IOC_CIA_CNFG = saved_config.cia_cnfg; | ||
619 | |||
620 | mb(); | ||
621 | } | ||
622 | #else /* ALPHA_RESTORE_SRM_SETUP */ | ||
623 | #define cia_save_srm_settings(p) do {} while (0) | ||
624 | #define cia_restore_srm_settings() do {} while (0) | ||
625 | #endif /* ALPHA_RESTORE_SRM_SETUP */ | ||
626 | |||
627 | |||
628 | static void __init | ||
629 | do_init_arch(int is_pyxis) | ||
630 | { | ||
631 | struct pci_controller *hose; | ||
632 | int temp, cia_rev, tbia_window; | ||
633 | |||
634 | cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK; | ||
635 | printk("pci: cia revision %d%s\n", | ||
636 | cia_rev, is_pyxis ? " (pyxis)" : ""); | ||
637 | |||
638 | if (alpha_using_srm) | ||
639 | cia_save_srm_settings(is_pyxis); | ||
640 | |||
641 | /* Set up error reporting. */ | ||
642 | temp = *(vip)CIA_IOC_ERR_MASK; | ||
643 | temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV | ||
644 | | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT); | ||
645 | *(vip)CIA_IOC_ERR_MASK = temp; | ||
646 | |||
647 | /* Clear all currently pending errors. */ | ||
648 | temp = *(vip)CIA_IOC_CIA_ERR; | ||
649 | *(vip)CIA_IOC_CIA_ERR = temp; | ||
650 | |||
651 | /* Turn on mchecks. */ | ||
652 | temp = *(vip)CIA_IOC_CIA_CTRL; | ||
653 | temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN; | ||
654 | *(vip)CIA_IOC_CIA_CTRL = temp; | ||
655 | |||
656 | /* Clear the CFG register, which gets used for PCI config space | ||
657 | accesses. That is the way we want to use it, and we do not | ||
658 | want to depend on what ARC or SRM might have left behind. */ | ||
659 | *(vip)CIA_IOC_CFG = 0; | ||
660 | |||
661 | /* Zero the HAEs. */ | ||
662 | *(vip)CIA_IOC_HAE_MEM = 0; | ||
663 | *(vip)CIA_IOC_HAE_IO = 0; | ||
664 | |||
665 | /* For PYXIS, we always use BWX bus and i/o accesses. To that end, | ||
666 | make sure they're enabled on the controller. At the same time, | ||
667 | enable the monster window. */ | ||
668 | if (is_pyxis) { | ||
669 | temp = *(vip)CIA_IOC_CIA_CNFG; | ||
670 | temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN; | ||
671 | *(vip)CIA_IOC_CIA_CNFG = temp; | ||
672 | } | ||
673 | |||
674 | /* Synchronize with all previous changes. */ | ||
675 | mb(); | ||
676 | *(vip)CIA_IOC_CIA_REV; | ||
677 | |||
678 | /* | ||
679 | * Create our single hose. | ||
680 | */ | ||
681 | |||
682 | pci_isa_hose = hose = alloc_pci_controller(); | ||
683 | hose->io_space = &ioport_resource; | ||
684 | hose->mem_space = &iomem_resource; | ||
685 | hose->index = 0; | ||
686 | |||
687 | if (! is_pyxis) { | ||
688 | struct resource *hae_mem = alloc_resource(); | ||
689 | hose->mem_space = hae_mem; | ||
690 | |||
691 | hae_mem->start = 0; | ||
692 | hae_mem->end = CIA_MEM_R1_MASK; | ||
693 | hae_mem->name = pci_hae0_name; | ||
694 | hae_mem->flags = IORESOURCE_MEM; | ||
695 | |||
696 | if (request_resource(&iomem_resource, hae_mem) < 0) | ||
697 | printk(KERN_ERR "Failed to request HAE_MEM\n"); | ||
698 | |||
699 | hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR; | ||
700 | hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR; | ||
701 | hose->sparse_io_base = CIA_IO - IDENT_ADDR; | ||
702 | hose->dense_io_base = 0; | ||
703 | } else { | ||
704 | hose->sparse_mem_base = 0; | ||
705 | hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR; | ||
706 | hose->sparse_io_base = 0; | ||
707 | hose->dense_io_base = CIA_BW_IO - IDENT_ADDR; | ||
708 | } | ||
709 | |||
710 | /* | ||
711 | * Set up the PCI to main memory translation windows. | ||
712 | * | ||
713 | * Window 0 is S/G 8MB at 8MB (for isa) | ||
714 | * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1) | ||
715 | * Window 2 is direct access 2GB at 2GB | ||
716 | * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1) | ||
717 | * | ||
718 | * ??? NetBSD hints that page tables must be aligned to 32K, | ||
719 | * possibly due to a hardware bug. This is over-aligned | ||
720 | * from the 8K alignment one would expect for an 8MB window. | ||
721 | * No description of what revisions affected. | ||
722 | */ | ||
723 | |||
724 | hose->sg_pci = NULL; | ||
725 | hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768); | ||
726 | |||
727 | __direct_map_base = 0x80000000; | ||
728 | __direct_map_size = 0x80000000; | ||
729 | |||
730 | *(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3; | ||
731 | *(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000; | ||
732 | *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2; | ||
733 | |||
734 | *(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1; | ||
735 | *(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000; | ||
736 | *(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2; | ||
737 | |||
738 | /* On PYXIS we have the monster window, selected by bit 40, so | ||
739 | there is no need for window3 to be enabled. | ||
740 | |||
741 | On CIA, we don't have true arbitrary addressing -- bits <39:32> | ||
742 | are compared against W_DAC. We can, however, directly map 4GB, | ||
743 | which is better than before. However, due to assumptions made | ||
744 | elsewhere, we should not claim that we support DAC unless that | ||
745 | 4GB covers all of physical memory. | ||
746 | |||
747 | On CIA rev 1, apparently W1 and W2 can't be used for SG. | ||
748 | At least, there are reports that it doesn't work for Alcor. | ||
749 | In that case, we have no choice but to use W3 for the TBIA | ||
750 | workaround, which means we can't use DAC at all. */ | ||
751 | |||
752 | tbia_window = 1; | ||
753 | if (is_pyxis) { | ||
754 | *(vip)CIA_IOC_PCI_W3_BASE = 0; | ||
755 | } else if (cia_rev == 1) { | ||
756 | *(vip)CIA_IOC_PCI_W1_BASE = 0; | ||
757 | tbia_window = 3; | ||
758 | } else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) { | ||
759 | *(vip)CIA_IOC_PCI_W3_BASE = 0; | ||
760 | } else { | ||
761 | *(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8; | ||
762 | *(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000; | ||
763 | *(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2; | ||
764 | |||
765 | alpha_mv.pci_dac_offset = 0x200000000UL; | ||
766 | *(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32; | ||
767 | } | ||
768 | |||
769 | /* Prepare workaround for apparently broken tbia. */ | ||
770 | cia_prepare_tbia_workaround(tbia_window); | ||
771 | } | ||
772 | |||
773 | void __init | ||
774 | cia_init_arch(void) | ||
775 | { | ||
776 | do_init_arch(0); | ||
777 | } | ||
778 | |||
779 | void __init | ||
780 | pyxis_init_arch(void) | ||
781 | { | ||
782 | /* On pyxis machines we can precisely calculate the | ||
783 | CPU clock frequency using pyxis real time counter. | ||
784 | It's especially useful for SX164 with broken RTC. | ||
785 | |||
786 | Both CPU and chipset are driven by the single 16.666M | ||
787 | or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is | ||
788 | 66.66 MHz. -ink */ | ||
789 | |||
790 | unsigned int cc0, cc1; | ||
791 | unsigned long pyxis_cc; | ||
792 | |||
793 | __asm__ __volatile__ ("rpcc %0" : "=r"(cc0)); | ||
794 | pyxis_cc = *(vulp)PYXIS_RT_COUNT; | ||
795 | do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096); | ||
796 | __asm__ __volatile__ ("rpcc %0" : "=r"(cc1)); | ||
797 | cc1 -= cc0; | ||
798 | hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3; | ||
799 | hwrpb_update_checksum(hwrpb); | ||
800 | |||
801 | do_init_arch(1); | ||
802 | } | ||
803 | |||
804 | void | ||
805 | cia_kill_arch(int mode) | ||
806 | { | ||
807 | if (alpha_using_srm) | ||
808 | cia_restore_srm_settings(); | ||
809 | } | ||
810 | |||
811 | void __init | ||
812 | cia_init_pci(void) | ||
813 | { | ||
814 | /* Must delay this from init_arch, as we need machine checks. */ | ||
815 | verify_tb_operation(); | ||
816 | common_init_pci(); | ||
817 | } | ||
818 | |||
819 | static inline void | ||
820 | cia_pci_clr_err(void) | ||
821 | { | ||
822 | int jd; | ||
823 | |||
824 | jd = *(vip)CIA_IOC_CIA_ERR; | ||
825 | *(vip)CIA_IOC_CIA_ERR = jd; | ||
826 | mb(); | ||
827 | *(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */ | ||
828 | } | ||
829 | |||
830 | #ifdef CONFIG_VERBOSE_MCHECK | ||
831 | static void | ||
832 | cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) | ||
833 | { | ||
834 | static const char * const pci_cmd_desc[16] = { | ||
835 | "Interrupt Acknowledge", "Special Cycle", "I/O Read", | ||
836 | "I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read", | ||
837 | "Memory Write", "Reserved 0x8", "Reserved 0x9", | ||
838 | "Configuration Read", "Configuration Write", | ||
839 | "Memory Read Multiple", "Dual Address Cycle", | ||
840 | "Memory Read Line", "Memory Write and Invalidate" | ||
841 | }; | ||
842 | |||
843 | if (cia->cia_err & (CIA_ERR_COR_ERR | ||
844 | | CIA_ERR_UN_COR_ERR | ||
845 | | CIA_ERR_MEM_NEM | ||
846 | | CIA_ERR_PA_PTE_INV)) { | ||
847 | static const char * const window_desc[6] = { | ||
848 | "No window active", "Window 0 hit", "Window 1 hit", | ||
849 | "Window 2 hit", "Window 3 hit", "Monster window hit" | ||
850 | }; | ||
851 | |||
852 | const char *window; | ||
853 | const char *cmd; | ||
854 | unsigned long addr, tmp; | ||
855 | int lock, dac; | ||
856 | |||
857 | cmd = pci_cmd_desc[cia->pci_err0 & 0x7]; | ||
858 | lock = (cia->pci_err0 >> 4) & 1; | ||
859 | dac = (cia->pci_err0 >> 5) & 1; | ||
860 | |||
861 | tmp = (cia->pci_err0 >> 8) & 0x1F; | ||
862 | tmp = ffs(tmp); | ||
863 | window = window_desc[tmp]; | ||
864 | |||
865 | addr = cia->pci_err1; | ||
866 | if (dac) { | ||
867 | tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL; | ||
868 | addr |= tmp << 32; | ||
869 | } | ||
870 | |||
871 | printk(KERN_CRIT "CIA machine check: %s\n", msg); | ||
872 | printk(KERN_CRIT " DMA command: %s\n", cmd); | ||
873 | printk(KERN_CRIT " PCI address: %#010lx\n", addr); | ||
874 | printk(KERN_CRIT " %s, Lock: %d, DAC: %d\n", | ||
875 | window, lock, dac); | ||
876 | } else if (cia->cia_err & (CIA_ERR_PERR | ||
877 | | CIA_ERR_PCI_ADDR_PE | ||
878 | | CIA_ERR_RCVD_MAS_ABT | ||
879 | | CIA_ERR_RCVD_TAR_ABT | ||
880 | | CIA_ERR_IOA_TIMEOUT)) { | ||
881 | static const char * const master_st_desc[16] = { | ||
882 | "Idle", "Drive bus", "Address step cycle", | ||
883 | "Address cycle", "Data cycle", "Last read data cycle", | ||
884 | "Last write data cycle", "Read stop cycle", | ||
885 | "Write stop cycle", "Read turnaround cycle", | ||
886 | "Write turnaround cycle", "Reserved 0xB", | ||
887 | "Reserved 0xC", "Reserved 0xD", "Reserved 0xE", | ||
888 | "Unknown state" | ||
889 | }; | ||
890 | static const char * const target_st_desc[16] = { | ||
891 | "Idle", "Busy", "Read data cycle", "Write data cycle", | ||
892 | "Read stop cycle", "Write stop cycle", | ||
893 | "Read turnaround cycle", "Write turnaround cycle", | ||
894 | "Read wait cycle", "Write wait cycle", | ||
895 | "Reserved 0xA", "Reserved 0xB", "Reserved 0xC", | ||
896 | "Reserved 0xD", "Reserved 0xE", "Unknown state" | ||
897 | }; | ||
898 | |||
899 | const char *cmd; | ||
900 | const char *master, *target; | ||
901 | unsigned long addr, tmp; | ||
902 | int dac; | ||
903 | |||
904 | master = master_st_desc[(cia->pci_err0 >> 16) & 0xF]; | ||
905 | target = target_st_desc[(cia->pci_err0 >> 20) & 0xF]; | ||
906 | cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF]; | ||
907 | dac = (cia->pci_err0 >> 28) & 1; | ||
908 | |||
909 | addr = cia->pci_err2; | ||
910 | if (dac) { | ||
911 | tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL; | ||
912 | addr |= tmp << 32; | ||
913 | } | ||
914 | |||
915 | printk(KERN_CRIT "CIA machine check: %s\n", msg); | ||
916 | printk(KERN_CRIT " PCI command: %s\n", cmd); | ||
917 | printk(KERN_CRIT " Master state: %s, Target state: %s\n", | ||
918 | master, target); | ||
919 | printk(KERN_CRIT " PCI address: %#010lx, DAC: %d\n", | ||
920 | addr, dac); | ||
921 | } else { | ||
922 | printk(KERN_CRIT "CIA machine check: %s\n", msg); | ||
923 | printk(KERN_CRIT " Unknown PCI error\n"); | ||
924 | printk(KERN_CRIT " PCI_ERR0 = %#08lx", cia->pci_err0); | ||
925 | printk(KERN_CRIT " PCI_ERR1 = %#08lx", cia->pci_err1); | ||
926 | printk(KERN_CRIT " PCI_ERR2 = %#08lx", cia->pci_err2); | ||
927 | } | ||
928 | } | ||
929 | |||
930 | static void | ||
931 | cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) | ||
932 | { | ||
933 | unsigned long mem_port_addr; | ||
934 | unsigned long mem_port_mask; | ||
935 | const char *mem_port_cmd; | ||
936 | const char *seq_state; | ||
937 | const char *set_select; | ||
938 | unsigned long tmp; | ||
939 | |||
940 | /* If this is a DMA command, also decode the PCI bits. */ | ||
941 | if ((cia->mem_err1 >> 20) & 1) | ||
942 | cia_decode_pci_error(cia, msg); | ||
943 | else | ||
944 | printk(KERN_CRIT "CIA machine check: %s\n", msg); | ||
945 | |||
946 | mem_port_addr = cia->mem_err0 & 0xfffffff0; | ||
947 | mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32; | ||
948 | |||
949 | mem_port_mask = (cia->mem_err1 >> 12) & 0xF; | ||
950 | |||
951 | tmp = (cia->mem_err1 >> 8) & 0xF; | ||
952 | tmp |= ((cia->mem_err1 >> 20) & 1) << 4; | ||
953 | if ((tmp & 0x1E) == 0x06) | ||
954 | mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK"; | ||
955 | else if ((tmp & 0x1C) == 0x08) | ||
956 | mem_port_cmd = "READ MISS or READ MISS MODIFY"; | ||
957 | else if (tmp == 0x1C) | ||
958 | mem_port_cmd = "BC VICTIM"; | ||
959 | else if ((tmp & 0x1E) == 0x0E) | ||
960 | mem_port_cmd = "READ MISS MODIFY"; | ||
961 | else if ((tmp & 0x1C) == 0x18) | ||
962 | mem_port_cmd = "DMA READ or DMA READ MODIFY"; | ||
963 | else if ((tmp & 0x1E) == 0x12) | ||
964 | mem_port_cmd = "DMA WRITE"; | ||
965 | else | ||
966 | mem_port_cmd = "Unknown"; | ||
967 | |||
968 | tmp = (cia->mem_err1 >> 16) & 0xF; | ||
969 | switch (tmp) { | ||
970 | case 0x0: | ||
971 | seq_state = "Idle"; | ||
972 | break; | ||
973 | case 0x1: | ||
974 | seq_state = "DMA READ or DMA WRITE"; | ||
975 | break; | ||
976 | case 0x2: case 0x3: | ||
977 | seq_state = "READ MISS (or READ MISS MODIFY) with victim"; | ||
978 | break; | ||
979 | case 0x4: case 0x5: case 0x6: | ||
980 | seq_state = "READ MISS (or READ MISS MODIFY) with no victim"; | ||
981 | break; | ||
982 | case 0x8: case 0x9: case 0xB: | ||
983 | seq_state = "Refresh"; | ||
984 | break; | ||
985 | case 0xC: | ||
986 | seq_state = "Idle, waiting for DMA pending read"; | ||
987 | break; | ||
988 | case 0xE: case 0xF: | ||
989 | seq_state = "Idle, ras precharge"; | ||
990 | break; | ||
991 | default: | ||
992 | seq_state = "Unknown"; | ||
993 | break; | ||
994 | } | ||
995 | |||
996 | tmp = (cia->mem_err1 >> 24) & 0x1F; | ||
997 | switch (tmp) { | ||
998 | case 0x00: set_select = "Set 0 selected"; break; | ||
999 | case 0x01: set_select = "Set 1 selected"; break; | ||
1000 | case 0x02: set_select = "Set 2 selected"; break; | ||
1001 | case 0x03: set_select = "Set 3 selected"; break; | ||
1002 | case 0x04: set_select = "Set 4 selected"; break; | ||
1003 | case 0x05: set_select = "Set 5 selected"; break; | ||
1004 | case 0x06: set_select = "Set 6 selected"; break; | ||
1005 | case 0x07: set_select = "Set 7 selected"; break; | ||
1006 | case 0x08: set_select = "Set 8 selected"; break; | ||
1007 | case 0x09: set_select = "Set 9 selected"; break; | ||
1008 | case 0x0A: set_select = "Set A selected"; break; | ||
1009 | case 0x0B: set_select = "Set B selected"; break; | ||
1010 | case 0x0C: set_select = "Set C selected"; break; | ||
1011 | case 0x0D: set_select = "Set D selected"; break; | ||
1012 | case 0x0E: set_select = "Set E selected"; break; | ||
1013 | case 0x0F: set_select = "Set F selected"; break; | ||
1014 | case 0x10: set_select = "No set selected"; break; | ||
1015 | case 0x1F: set_select = "Refresh cycle"; break; | ||
1016 | default: set_select = "Unknown"; break; | ||
1017 | } | ||
1018 | |||
1019 | printk(KERN_CRIT " Memory port command: %s\n", mem_port_cmd); | ||
1020 | printk(KERN_CRIT " Memory port address: %#010lx, mask: %#lx\n", | ||
1021 | mem_port_addr, mem_port_mask); | ||
1022 | printk(KERN_CRIT " Memory sequencer state: %s\n", seq_state); | ||
1023 | printk(KERN_CRIT " Memory set: %s\n", set_select); | ||
1024 | } | ||
1025 | |||
1026 | static void | ||
1027 | cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) | ||
1028 | { | ||
1029 | long syn; | ||
1030 | long i; | ||
1031 | const char *fmt; | ||
1032 | |||
1033 | cia_decode_mem_error(cia, msg); | ||
1034 | |||
1035 | syn = cia->cia_syn & 0xff; | ||
1036 | if (syn == (syn & -syn)) { | ||
1037 | fmt = KERN_CRIT " ECC syndrome %#x -- check bit %d\n"; | ||
1038 | i = ffs(syn) - 1; | ||
1039 | } else { | ||
1040 | static unsigned char const data_bit[64] = { | ||
1041 | 0xCE, 0xCB, 0xD3, 0xD5, | ||
1042 | 0xD6, 0xD9, 0xDA, 0xDC, | ||
1043 | 0x23, 0x25, 0x26, 0x29, | ||
1044 | 0x2A, 0x2C, 0x31, 0x34, | ||
1045 | 0x0E, 0x0B, 0x13, 0x15, | ||
1046 | 0x16, 0x19, 0x1A, 0x1C, | ||
1047 | 0xE3, 0xE5, 0xE6, 0xE9, | ||
1048 | 0xEA, 0xEC, 0xF1, 0xF4, | ||
1049 | 0x4F, 0x4A, 0x52, 0x54, | ||
1050 | 0x57, 0x58, 0x5B, 0x5D, | ||
1051 | 0xA2, 0xA4, 0xA7, 0xA8, | ||
1052 | 0xAB, 0xAD, 0xB0, 0xB5, | ||
1053 | 0x8F, 0x8A, 0x92, 0x94, | ||
1054 | 0x97, 0x98, 0x9B, 0x9D, | ||
1055 | 0x62, 0x64, 0x67, 0x68, | ||
1056 | 0x6B, 0x6D, 0x70, 0x75 | ||
1057 | }; | ||
1058 | |||
1059 | for (i = 0; i < 64; ++i) | ||
1060 | if (data_bit[i] == syn) | ||
1061 | break; | ||
1062 | |||
1063 | if (i < 64) | ||
1064 | fmt = KERN_CRIT " ECC syndrome %#x -- data bit %d\n"; | ||
1065 | else | ||
1066 | fmt = KERN_CRIT " ECC syndrome %#x -- unknown bit\n"; | ||
1067 | } | ||
1068 | |||
1069 | printk (fmt, syn, i); | ||
1070 | } | ||
1071 | |||
1072 | static void | ||
1073 | cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia) | ||
1074 | { | ||
1075 | static const char * const cmd_desc[16] = { | ||
1076 | "NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER", | ||
1077 | "SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK", | ||
1078 | "READ MISS0", "READ MISS1", "READ MISS MOD0", | ||
1079 | "READ MISS MOD1", "BCACHE VICTIM", "Spare", | ||
1080 | "READ MISS MOD STC0", "READ MISS MOD STC1" | ||
1081 | }; | ||
1082 | |||
1083 | unsigned long addr; | ||
1084 | unsigned long mask; | ||
1085 | const char *cmd; | ||
1086 | int par; | ||
1087 | |||
1088 | addr = cia->cpu_err0 & 0xfffffff0; | ||
1089 | addr |= (cia->cpu_err1 & 0x83UL) << 32; | ||
1090 | cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF]; | ||
1091 | mask = (cia->cpu_err1 >> 12) & 0xF; | ||
1092 | par = (cia->cpu_err1 >> 21) & 1; | ||
1093 | |||
1094 | printk(KERN_CRIT "CIA machine check: System bus parity error\n"); | ||
1095 | printk(KERN_CRIT " Command: %s, Parity bit: %d\n", cmd, par); | ||
1096 | printk(KERN_CRIT " Address: %#010lx, Mask: %#lx\n", addr, mask); | ||
1097 | } | ||
1098 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
1099 | |||
1100 | |||
1101 | static int | ||
1102 | cia_decode_mchk(unsigned long la_ptr) | ||
1103 | { | ||
1104 | struct el_common *com; | ||
1105 | struct el_CIA_sysdata_mcheck *cia; | ||
1106 | |||
1107 | com = (void *)la_ptr; | ||
1108 | cia = (void *)(la_ptr + com->sys_offset); | ||
1109 | |||
1110 | if ((cia->cia_err & CIA_ERR_VALID) == 0) | ||
1111 | return 0; | ||
1112 | |||
1113 | #ifdef CONFIG_VERBOSE_MCHECK | ||
1114 | if (!alpha_verbose_mcheck) | ||
1115 | return 1; | ||
1116 | |||
1117 | switch (ffs(cia->cia_err & 0xfff) - 1) { | ||
1118 | case 0: /* CIA_ERR_COR_ERR */ | ||
1119 | cia_decode_ecc_error(cia, "Corrected ECC error"); | ||
1120 | break; | ||
1121 | case 1: /* CIA_ERR_UN_COR_ERR */ | ||
1122 | cia_decode_ecc_error(cia, "Uncorrected ECC error"); | ||
1123 | break; | ||
1124 | case 2: /* CIA_ERR_CPU_PE */ | ||
1125 | cia_decode_parity_error(cia); | ||
1126 | break; | ||
1127 | case 3: /* CIA_ERR_MEM_NEM */ | ||
1128 | cia_decode_mem_error(cia, "Access to nonexistent memory"); | ||
1129 | break; | ||
1130 | case 4: /* CIA_ERR_PCI_SERR */ | ||
1131 | cia_decode_pci_error(cia, "PCI bus system error"); | ||
1132 | break; | ||
1133 | case 5: /* CIA_ERR_PERR */ | ||
1134 | cia_decode_pci_error(cia, "PCI data parity error"); | ||
1135 | break; | ||
1136 | case 6: /* CIA_ERR_PCI_ADDR_PE */ | ||
1137 | cia_decode_pci_error(cia, "PCI address parity error"); | ||
1138 | break; | ||
1139 | case 7: /* CIA_ERR_RCVD_MAS_ABT */ | ||
1140 | cia_decode_pci_error(cia, "PCI master abort"); | ||
1141 | break; | ||
1142 | case 8: /* CIA_ERR_RCVD_TAR_ABT */ | ||
1143 | cia_decode_pci_error(cia, "PCI target abort"); | ||
1144 | break; | ||
1145 | case 9: /* CIA_ERR_PA_PTE_INV */ | ||
1146 | cia_decode_pci_error(cia, "PCI invalid PTE"); | ||
1147 | break; | ||
1148 | case 10: /* CIA_ERR_FROM_WRT_ERR */ | ||
1149 | cia_decode_mem_error(cia, "Write to flash ROM attempted"); | ||
1150 | break; | ||
1151 | case 11: /* CIA_ERR_IOA_TIMEOUT */ | ||
1152 | cia_decode_pci_error(cia, "I/O timeout"); | ||
1153 | break; | ||
1154 | } | ||
1155 | |||
1156 | if (cia->cia_err & CIA_ERR_LOST_CORR_ERR) | ||
1157 | printk(KERN_CRIT "CIA lost machine check: " | ||
1158 | "Correctable ECC error\n"); | ||
1159 | if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR) | ||
1160 | printk(KERN_CRIT "CIA lost machine check: " | ||
1161 | "Uncorrectable ECC error\n"); | ||
1162 | if (cia->cia_err & CIA_ERR_LOST_CPU_PE) | ||
1163 | printk(KERN_CRIT "CIA lost machine check: " | ||
1164 | "System bus parity error\n"); | ||
1165 | if (cia->cia_err & CIA_ERR_LOST_MEM_NEM) | ||
1166 | printk(KERN_CRIT "CIA lost machine check: " | ||
1167 | "Access to nonexistent memory\n"); | ||
1168 | if (cia->cia_err & CIA_ERR_LOST_PERR) | ||
1169 | printk(KERN_CRIT "CIA lost machine check: " | ||
1170 | "PCI data parity error\n"); | ||
1171 | if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE) | ||
1172 | printk(KERN_CRIT "CIA lost machine check: " | ||
1173 | "PCI address parity error\n"); | ||
1174 | if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT) | ||
1175 | printk(KERN_CRIT "CIA lost machine check: " | ||
1176 | "PCI master abort\n"); | ||
1177 | if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT) | ||
1178 | printk(KERN_CRIT "CIA lost machine check: " | ||
1179 | "PCI target abort\n"); | ||
1180 | if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV) | ||
1181 | printk(KERN_CRIT "CIA lost machine check: " | ||
1182 | "PCI invalid PTE\n"); | ||
1183 | if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR) | ||
1184 | printk(KERN_CRIT "CIA lost machine check: " | ||
1185 | "Write to flash ROM attempted\n"); | ||
1186 | if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT) | ||
1187 | printk(KERN_CRIT "CIA lost machine check: " | ||
1188 | "I/O timeout\n"); | ||
1189 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
1190 | |||
1191 | return 1; | ||
1192 | } | ||
1193 | |||
1194 | void | ||
1195 | cia_machine_check(unsigned long vector, unsigned long la_ptr, | ||
1196 | struct pt_regs * regs) | ||
1197 | { | ||
1198 | int expected; | ||
1199 | |||
1200 | /* Clear the error before any reporting. */ | ||
1201 | mb(); | ||
1202 | mb(); /* magic */ | ||
1203 | draina(); | ||
1204 | cia_pci_clr_err(); | ||
1205 | wrmces(rdmces()); /* reset machine check pending flag. */ | ||
1206 | mb(); | ||
1207 | |||
1208 | expected = mcheck_expected(0); | ||
1209 | if (!expected && vector == 0x660) | ||
1210 | expected = cia_decode_mchk(la_ptr); | ||
1211 | process_mcheck_info(vector, la_ptr, regs, "CIA", expected); | ||
1212 | } | ||
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c new file mode 100644 index 000000000000..138d497d1cca --- /dev/null +++ b/arch/alpha/kernel/core_irongate.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_irongate.c | ||
3 | * | ||
4 | * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com). | ||
5 | * | ||
6 | * Copyright (C) 1999 Alpha Processor, Inc., | ||
7 | * (David Daniel, Stig Telfer, Soohoon Lee) | ||
8 | * | ||
9 | * Code common to all IRONGATE core logic chips. | ||
10 | */ | ||
11 | |||
12 | #define __EXTERN_INLINE inline | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/core_irongate.h> | ||
15 | #undef __EXTERN_INLINE | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/initrd.h> | ||
22 | #include <linux/bootmem.h> | ||
23 | |||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/pci.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | #include "proto.h" | ||
30 | #include "pci_impl.h" | ||
31 | |||
32 | /* | ||
33 | * BIOS32-style PCI interface: | ||
34 | */ | ||
35 | |||
36 | #define DEBUG_CONFIG 0 | ||
37 | |||
38 | #if DEBUG_CONFIG | ||
39 | # define DBG_CFG(args) printk args | ||
40 | #else | ||
41 | # define DBG_CFG(args) | ||
42 | #endif | ||
43 | |||
44 | igcsr32 *IronECC; | ||
45 | |||
46 | /* | ||
47 | * Given a bus, device, and function number, compute resulting | ||
48 | * configuration space address accordingly. It is therefore not safe | ||
49 | * to have concurrent invocations to configuration space access | ||
50 | * routines, but there really shouldn't be any need for this. | ||
51 | * | ||
52 | * addr[31:24] reserved | ||
53 | * addr[23:16] bus number (8 bits = 128 possible buses) | ||
54 | * addr[15:11] Device number (5 bits) | ||
55 | * addr[10: 8] function number | ||
56 | * addr[ 7: 2] register number | ||
57 | * | ||
58 | * For IRONGATE: | ||
59 | * if (bus = addr[23:16]) == 0 | ||
60 | * then | ||
61 | * type 0 config cycle: | ||
62 | * addr_on_pci[31:11] = id selection for device = addr[15:11] | ||
63 | * addr_on_pci[10: 2] = addr[10: 2] ??? | ||
64 | * addr_on_pci[ 1: 0] = 00 | ||
65 | * else | ||
66 | * type 1 config cycle (pass on with no decoding): | ||
67 | * addr_on_pci[31:24] = 0 | ||
68 | * addr_on_pci[23: 2] = addr[23: 2] | ||
69 | * addr_on_pci[ 1: 0] = 01 | ||
70 | * fi | ||
71 | * | ||
72 | * Notes: | ||
73 | * The function number selects which function of a multi-function device | ||
74 | * (e.g., SCSI and Ethernet). | ||
75 | * | ||
76 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
77 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
78 | * bits. | ||
79 | */ | ||
80 | |||
81 | static int | ||
82 | mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, | ||
83 | unsigned long *pci_addr, unsigned char *type1) | ||
84 | { | ||
85 | unsigned long addr; | ||
86 | u8 bus = pbus->number; | ||
87 | |||
88 | DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " | ||
89 | "pci_addr=0x%p, type1=0x%p)\n", | ||
90 | bus, device_fn, where, pci_addr, type1)); | ||
91 | |||
92 | *type1 = (bus != 0); | ||
93 | |||
94 | addr = (bus << 16) | (device_fn << 8) | where; | ||
95 | addr |= IRONGATE_CONF; | ||
96 | |||
97 | *pci_addr = addr; | ||
98 | DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static int | ||
103 | irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
104 | int size, u32 *value) | ||
105 | { | ||
106 | unsigned long addr; | ||
107 | unsigned char type1; | ||
108 | |||
109 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
110 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
111 | |||
112 | switch (size) { | ||
113 | case 1: | ||
114 | *value = __kernel_ldbu(*(vucp)addr); | ||
115 | break; | ||
116 | case 2: | ||
117 | *value = __kernel_ldwu(*(vusp)addr); | ||
118 | break; | ||
119 | case 4: | ||
120 | *value = *(vuip)addr; | ||
121 | break; | ||
122 | } | ||
123 | |||
124 | return PCIBIOS_SUCCESSFUL; | ||
125 | } | ||
126 | |||
127 | static int | ||
128 | irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
129 | int size, u32 value) | ||
130 | { | ||
131 | unsigned long addr; | ||
132 | unsigned char type1; | ||
133 | |||
134 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
135 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
136 | |||
137 | switch (size) { | ||
138 | case 1: | ||
139 | __kernel_stb(value, *(vucp)addr); | ||
140 | mb(); | ||
141 | __kernel_ldbu(*(vucp)addr); | ||
142 | break; | ||
143 | case 2: | ||
144 | __kernel_stw(value, *(vusp)addr); | ||
145 | mb(); | ||
146 | __kernel_ldwu(*(vusp)addr); | ||
147 | break; | ||
148 | case 4: | ||
149 | *(vuip)addr = value; | ||
150 | mb(); | ||
151 | *(vuip)addr; | ||
152 | break; | ||
153 | } | ||
154 | |||
155 | return PCIBIOS_SUCCESSFUL; | ||
156 | } | ||
157 | |||
158 | struct pci_ops irongate_pci_ops = | ||
159 | { | ||
160 | .read = irongate_read_config, | ||
161 | .write = irongate_write_config, | ||
162 | }; | ||
163 | |||
164 | int | ||
165 | irongate_pci_clr_err(void) | ||
166 | { | ||
167 | unsigned int nmi_ctl=0; | ||
168 | unsigned int IRONGATE_jd; | ||
169 | |||
170 | again: | ||
171 | IRONGATE_jd = IRONGATE0->stat_cmd; | ||
172 | printk("Iron stat_cmd %x\n", IRONGATE_jd); | ||
173 | IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */ | ||
174 | mb(); | ||
175 | IRONGATE_jd = IRONGATE0->stat_cmd; /* re-read to force write */ | ||
176 | |||
177 | IRONGATE_jd = *IronECC; | ||
178 | printk("Iron ECC %x\n", IRONGATE_jd); | ||
179 | *IronECC = IRONGATE_jd; /* write again clears error bits */ | ||
180 | mb(); | ||
181 | IRONGATE_jd = *IronECC; /* re-read to force write */ | ||
182 | |||
183 | /* Clear ALI NMI */ | ||
184 | nmi_ctl = inb(0x61); | ||
185 | nmi_ctl |= 0x0c; | ||
186 | outb(nmi_ctl, 0x61); | ||
187 | nmi_ctl &= ~0x0c; | ||
188 | outb(nmi_ctl, 0x61); | ||
189 | |||
190 | IRONGATE_jd = *IronECC; | ||
191 | if (IRONGATE_jd & 0x300) goto again; | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | #define IRONGATE_3GB 0xc0000000UL | ||
197 | |||
198 | /* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some | ||
199 | memory for PCI. At this point we just reserve memory above 3Gb. Most | ||
200 | of this memory will be freed after PCI setup is done. */ | ||
201 | static void __init | ||
202 | albacore_init_arch(void) | ||
203 | { | ||
204 | unsigned long memtop = max_low_pfn << PAGE_SHIFT; | ||
205 | unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL; | ||
206 | struct percpu_struct *cpu; | ||
207 | int pal_rev, pal_var; | ||
208 | |||
209 | cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); | ||
210 | pal_rev = cpu->pal_revision & 0xffff; | ||
211 | pal_var = (cpu->pal_revision >> 16) & 0xff; | ||
212 | |||
213 | /* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up | ||
214 | the CPU incorrectly (leave speculative stores enabled), | ||
215 | which causes memory corruption under certain conditions. | ||
216 | Issue a warning for such consoles. */ | ||
217 | if (alpha_using_srm && | ||
218 | (pal_rev < 0x13e || (pal_rev == 0x13e && pal_var < 2))) | ||
219 | printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 " | ||
220 | "or later\n"); | ||
221 | |||
222 | if (pci_mem > IRONGATE_3GB) | ||
223 | pci_mem = IRONGATE_3GB; | ||
224 | IRONGATE0->pci_mem = pci_mem; | ||
225 | alpha_mv.min_mem_address = pci_mem; | ||
226 | if (memtop > pci_mem) { | ||
227 | #ifdef CONFIG_BLK_DEV_INITRD | ||
228 | extern unsigned long initrd_start, initrd_end; | ||
229 | extern void *move_initrd(unsigned long); | ||
230 | |||
231 | /* Move the initrd out of the way. */ | ||
232 | if (initrd_end && __pa(initrd_end) > pci_mem) { | ||
233 | unsigned long size; | ||
234 | |||
235 | size = initrd_end - initrd_start; | ||
236 | free_bootmem_node(NODE_DATA(0), __pa(initrd_start), | ||
237 | PAGE_ALIGN(size)); | ||
238 | if (!move_initrd(pci_mem)) | ||
239 | printk("irongate_init_arch: initrd too big " | ||
240 | "(%ldK)\ndisabling initrd\n", | ||
241 | size / 1024); | ||
242 | } | ||
243 | #endif | ||
244 | reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop - pci_mem); | ||
245 | printk("irongate_init_arch: temporarily reserving " | ||
246 | "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | static void __init | ||
251 | irongate_setup_agp(void) | ||
252 | { | ||
253 | /* Disable the GART window. AGPGART doesn't work due to yet | ||
254 | unresolved memory coherency issues... */ | ||
255 | IRONGATE0->agpva = IRONGATE0->agpva & ~0xf; | ||
256 | alpha_agpgart_size = 0; | ||
257 | } | ||
258 | |||
259 | void __init | ||
260 | irongate_init_arch(void) | ||
261 | { | ||
262 | struct pci_controller *hose; | ||
263 | int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */ | ||
264 | |||
265 | IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms; | ||
266 | |||
267 | irongate_pci_clr_err(); | ||
268 | |||
269 | if (amd761) | ||
270 | albacore_init_arch(); | ||
271 | |||
272 | irongate_setup_agp(); | ||
273 | |||
274 | /* | ||
275 | * Create our single hose. | ||
276 | */ | ||
277 | |||
278 | pci_isa_hose = hose = alloc_pci_controller(); | ||
279 | hose->io_space = &ioport_resource; | ||
280 | hose->mem_space = &iomem_resource; | ||
281 | hose->index = 0; | ||
282 | |||
283 | /* This is for userland consumption. For some reason, the 40-bit | ||
284 | PIO bias that we use in the kernel through KSEG didn't work for | ||
285 | the page table based user mappings. So make sure we get the | ||
286 | 43-bit PIO bias. */ | ||
287 | hose->sparse_mem_base = 0; | ||
288 | hose->sparse_io_base = 0; | ||
289 | hose->dense_mem_base | ||
290 | = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL; | ||
291 | hose->dense_io_base | ||
292 | = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL; | ||
293 | |||
294 | hose->sg_isa = hose->sg_pci = NULL; | ||
295 | __direct_map_base = 0; | ||
296 | __direct_map_size = 0xffffffff; | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * IO map and AGP support | ||
301 | */ | ||
302 | #include <linux/vmalloc.h> | ||
303 | #include <linux/agp_backend.h> | ||
304 | #include <linux/agpgart.h> | ||
305 | #include <asm/pgalloc.h> | ||
306 | |||
307 | #define GET_PAGE_DIR_OFF(addr) (addr >> 22) | ||
308 | #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr)) | ||
309 | |||
310 | #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) | ||
311 | #define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)]) | ||
312 | |||
313 | void __iomem * | ||
314 | irongate_ioremap(unsigned long addr, unsigned long size) | ||
315 | { | ||
316 | struct vm_struct *area; | ||
317 | unsigned long vaddr; | ||
318 | unsigned long baddr, last; | ||
319 | u32 *mmio_regs, *gatt_pages, *cur_gatt, pte; | ||
320 | unsigned long gart_bus_addr; | ||
321 | |||
322 | if (!alpha_agpgart_size) | ||
323 | return (void __iomem *)(addr + IRONGATE_MEM); | ||
324 | |||
325 | gart_bus_addr = (unsigned long)IRONGATE0->bar0 & | ||
326 | PCI_BASE_ADDRESS_MEM_MASK; | ||
327 | |||
328 | /* | ||
329 | * Check for within the AGP aperture... | ||
330 | */ | ||
331 | do { | ||
332 | /* | ||
333 | * Check the AGP area | ||
334 | */ | ||
335 | if (addr >= gart_bus_addr && addr + size - 1 < | ||
336 | gart_bus_addr + alpha_agpgart_size) | ||
337 | break; | ||
338 | |||
339 | /* | ||
340 | * Not found - assume legacy ioremap | ||
341 | */ | ||
342 | return (void __iomem *)(addr + IRONGATE_MEM); | ||
343 | } while(0); | ||
344 | |||
345 | mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 & | ||
346 | PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM); | ||
347 | |||
348 | gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */ | ||
349 | |||
350 | /* | ||
351 | * Adjust the limits (mappings must be page aligned) | ||
352 | */ | ||
353 | if (addr & ~PAGE_MASK) { | ||
354 | printk("AGP ioremap failed... addr not page aligned (0x%lx)\n", | ||
355 | addr); | ||
356 | return (void __iomem *)(addr + IRONGATE_MEM); | ||
357 | } | ||
358 | last = addr + size - 1; | ||
359 | size = PAGE_ALIGN(last) - addr; | ||
360 | |||
361 | #if 0 | ||
362 | printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size); | ||
363 | printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr); | ||
364 | printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size); | ||
365 | printk("irongate_ioremap: mmio_regs %p\n", mmio_regs); | ||
366 | printk("irongate_ioremap: gatt_pages %p\n", gatt_pages); | ||
367 | |||
368 | for(baddr = addr; baddr <= last; baddr += PAGE_SIZE) | ||
369 | { | ||
370 | cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1); | ||
371 | pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1; | ||
372 | printk("irongate_ioremap: cur_gatt %p pte 0x%x\n", | ||
373 | cur_gatt, pte); | ||
374 | } | ||
375 | #endif | ||
376 | |||
377 | /* | ||
378 | * Map it | ||
379 | */ | ||
380 | area = get_vm_area(size, VM_IOREMAP); | ||
381 | if (!area) return NULL; | ||
382 | |||
383 | for(baddr = addr, vaddr = (unsigned long)area->addr; | ||
384 | baddr <= last; | ||
385 | baddr += PAGE_SIZE, vaddr += PAGE_SIZE) | ||
386 | { | ||
387 | cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1); | ||
388 | pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1; | ||
389 | |||
390 | if (__alpha_remap_area_pages(vaddr, | ||
391 | pte, PAGE_SIZE, 0)) { | ||
392 | printk("AGP ioremap: FAILED to map...\n"); | ||
393 | vfree(area->addr); | ||
394 | return NULL; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | flush_tlb_all(); | ||
399 | |||
400 | vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); | ||
401 | #if 0 | ||
402 | printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n", | ||
403 | addr, size, vaddr); | ||
404 | #endif | ||
405 | return (void __iomem *)vaddr; | ||
406 | } | ||
407 | |||
408 | void | ||
409 | irongate_iounmap(volatile void __iomem *xaddr) | ||
410 | { | ||
411 | unsigned long addr = (unsigned long) xaddr; | ||
412 | if (((long)addr >> 41) == -2) | ||
413 | return; /* kseg map, nothing to do */ | ||
414 | if (addr) | ||
415 | return vfree((void *)(PAGE_MASK & addr)); | ||
416 | } | ||
diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c new file mode 100644 index 000000000000..6a5a9145c676 --- /dev/null +++ b/arch/alpha/kernel/core_lca.c | |||
@@ -0,0 +1,515 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_lca.c | ||
3 | * | ||
4 | * Written by David Mosberger (davidm@cs.arizona.edu) with some code | ||
5 | * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit | ||
6 | * bios code. | ||
7 | * | ||
8 | * Code common to all LCA core logic chips. | ||
9 | */ | ||
10 | |||
11 | #define __EXTERN_INLINE inline | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/core_lca.h> | ||
14 | #undef __EXTERN_INLINE | ||
15 | |||
16 | #include <linux/types.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/tty.h> | ||
20 | |||
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/smp.h> | ||
23 | |||
24 | #include "proto.h" | ||
25 | #include "pci_impl.h" | ||
26 | |||
27 | |||
28 | /* | ||
29 | * BIOS32-style PCI interface: | ||
30 | */ | ||
31 | |||
32 | /* | ||
33 | * Machine check reasons. Defined according to PALcode sources | ||
34 | * (osf.h and platform.h). | ||
35 | */ | ||
36 | #define MCHK_K_TPERR 0x0080 | ||
37 | #define MCHK_K_TCPERR 0x0082 | ||
38 | #define MCHK_K_HERR 0x0084 | ||
39 | #define MCHK_K_ECC_C 0x0086 | ||
40 | #define MCHK_K_ECC_NC 0x0088 | ||
41 | #define MCHK_K_UNKNOWN 0x008A | ||
42 | #define MCHK_K_CACKSOFT 0x008C | ||
43 | #define MCHK_K_BUGCHECK 0x008E | ||
44 | #define MCHK_K_OS_BUGCHECK 0x0090 | ||
45 | #define MCHK_K_DCPERR 0x0092 | ||
46 | #define MCHK_K_ICPERR 0x0094 | ||
47 | |||
48 | |||
49 | /* | ||
50 | * Platform-specific machine-check reasons: | ||
51 | */ | ||
52 | #define MCHK_K_SIO_SERR 0x204 /* all platforms so far */ | ||
53 | #define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */ | ||
54 | #define MCHK_K_DCSR 0x208 /* all but Noname */ | ||
55 | |||
56 | |||
57 | /* | ||
58 | * Given a bus, device, and function number, compute resulting | ||
59 | * configuration space address and setup the LCA_IOC_CONF register | ||
60 | * accordingly. It is therefore not safe to have concurrent | ||
61 | * invocations to configuration space access routines, but there | ||
62 | * really shouldn't be any need for this. | ||
63 | * | ||
64 | * Type 0: | ||
65 | * | ||
66 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
67 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
68 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
69 | * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0| | ||
70 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
71 | * | ||
72 | * 31:11 Device select bit. | ||
73 | * 10:8 Function number | ||
74 | * 7:2 Register number | ||
75 | * | ||
76 | * Type 1: | ||
77 | * | ||
78 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
79 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
80 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
81 | * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| | ||
82 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
83 | * | ||
84 | * 31:24 reserved | ||
85 | * 23:16 bus number (8 bits = 128 possible buses) | ||
86 | * 15:11 Device number (5 bits) | ||
87 | * 10:8 function number | ||
88 | * 7:2 register number | ||
89 | * | ||
90 | * Notes: | ||
91 | * The function number selects which function of a multi-function device | ||
92 | * (e.g., SCSI and Ethernet). | ||
93 | * | ||
94 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
95 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
96 | * bits. | ||
97 | */ | ||
98 | |||
99 | static int | ||
100 | mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, | ||
101 | unsigned long *pci_addr) | ||
102 | { | ||
103 | unsigned long addr; | ||
104 | u8 bus = pbus->number; | ||
105 | |||
106 | if (bus == 0) { | ||
107 | int device = device_fn >> 3; | ||
108 | int func = device_fn & 0x7; | ||
109 | |||
110 | /* Type 0 configuration cycle. */ | ||
111 | |||
112 | if (device > 12) { | ||
113 | return -1; | ||
114 | } | ||
115 | |||
116 | *(vulp)LCA_IOC_CONF = 0; | ||
117 | addr = (1 << (11 + device)) | (func << 8) | where; | ||
118 | } else { | ||
119 | /* Type 1 configuration cycle. */ | ||
120 | *(vulp)LCA_IOC_CONF = 1; | ||
121 | addr = (bus << 16) | (device_fn << 8) | where; | ||
122 | } | ||
123 | *pci_addr = addr; | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static unsigned int | ||
128 | conf_read(unsigned long addr) | ||
129 | { | ||
130 | unsigned long flags, code, stat0; | ||
131 | unsigned int value; | ||
132 | |||
133 | local_irq_save(flags); | ||
134 | |||
135 | /* Reset status register to avoid loosing errors. */ | ||
136 | stat0 = *(vulp)LCA_IOC_STAT0; | ||
137 | *(vulp)LCA_IOC_STAT0 = stat0; | ||
138 | mb(); | ||
139 | |||
140 | /* Access configuration space. */ | ||
141 | value = *(vuip)addr; | ||
142 | draina(); | ||
143 | |||
144 | stat0 = *(vulp)LCA_IOC_STAT0; | ||
145 | if (stat0 & LCA_IOC_STAT0_ERR) { | ||
146 | code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) | ||
147 | & LCA_IOC_STAT0_CODE_MASK); | ||
148 | if (code != 1) { | ||
149 | printk("lca.c:conf_read: got stat0=%lx\n", stat0); | ||
150 | } | ||
151 | |||
152 | /* Reset error status. */ | ||
153 | *(vulp)LCA_IOC_STAT0 = stat0; | ||
154 | mb(); | ||
155 | |||
156 | /* Reset machine check. */ | ||
157 | wrmces(0x7); | ||
158 | |||
159 | value = 0xffffffff; | ||
160 | } | ||
161 | local_irq_restore(flags); | ||
162 | return value; | ||
163 | } | ||
164 | |||
165 | static void | ||
166 | conf_write(unsigned long addr, unsigned int value) | ||
167 | { | ||
168 | unsigned long flags, code, stat0; | ||
169 | |||
170 | local_irq_save(flags); /* avoid getting hit by machine check */ | ||
171 | |||
172 | /* Reset status register to avoid loosing errors. */ | ||
173 | stat0 = *(vulp)LCA_IOC_STAT0; | ||
174 | *(vulp)LCA_IOC_STAT0 = stat0; | ||
175 | mb(); | ||
176 | |||
177 | /* Access configuration space. */ | ||
178 | *(vuip)addr = value; | ||
179 | draina(); | ||
180 | |||
181 | stat0 = *(vulp)LCA_IOC_STAT0; | ||
182 | if (stat0 & LCA_IOC_STAT0_ERR) { | ||
183 | code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) | ||
184 | & LCA_IOC_STAT0_CODE_MASK); | ||
185 | if (code != 1) { | ||
186 | printk("lca.c:conf_write: got stat0=%lx\n", stat0); | ||
187 | } | ||
188 | |||
189 | /* Reset error status. */ | ||
190 | *(vulp)LCA_IOC_STAT0 = stat0; | ||
191 | mb(); | ||
192 | |||
193 | /* Reset machine check. */ | ||
194 | wrmces(0x7); | ||
195 | } | ||
196 | local_irq_restore(flags); | ||
197 | } | ||
198 | |||
199 | static int | ||
200 | lca_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
201 | int size, u32 *value) | ||
202 | { | ||
203 | unsigned long addr, pci_addr; | ||
204 | long mask; | ||
205 | int shift; | ||
206 | |||
207 | if (mk_conf_addr(bus, devfn, where, &pci_addr)) | ||
208 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
209 | |||
210 | shift = (where & 3) * 8; | ||
211 | mask = (size - 1) * 8; | ||
212 | addr = (pci_addr << 5) + mask + LCA_CONF; | ||
213 | *value = conf_read(addr) >> (shift); | ||
214 | return PCIBIOS_SUCCESSFUL; | ||
215 | } | ||
216 | |||
217 | static int | ||
218 | lca_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, | ||
219 | u32 value) | ||
220 | { | ||
221 | unsigned long addr, pci_addr; | ||
222 | long mask; | ||
223 | |||
224 | if (mk_conf_addr(bus, devfn, where, &pci_addr)) | ||
225 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
226 | |||
227 | mask = (size - 1) * 8; | ||
228 | addr = (pci_addr << 5) + mask + LCA_CONF; | ||
229 | conf_write(addr, value << ((where & 3) * 8)); | ||
230 | return PCIBIOS_SUCCESSFUL; | ||
231 | } | ||
232 | |||
233 | struct pci_ops lca_pci_ops = | ||
234 | { | ||
235 | .read = lca_read_config, | ||
236 | .write = lca_write_config, | ||
237 | }; | ||
238 | |||
239 | void | ||
240 | lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
241 | { | ||
242 | wmb(); | ||
243 | *(vulp)LCA_IOC_TBIA = 0; | ||
244 | mb(); | ||
245 | } | ||
246 | |||
247 | void __init | ||
248 | lca_init_arch(void) | ||
249 | { | ||
250 | struct pci_controller *hose; | ||
251 | |||
252 | /* | ||
253 | * Create our single hose. | ||
254 | */ | ||
255 | |||
256 | pci_isa_hose = hose = alloc_pci_controller(); | ||
257 | hose->io_space = &ioport_resource; | ||
258 | hose->mem_space = &iomem_resource; | ||
259 | hose->index = 0; | ||
260 | |||
261 | hose->sparse_mem_base = LCA_SPARSE_MEM - IDENT_ADDR; | ||
262 | hose->dense_mem_base = LCA_DENSE_MEM - IDENT_ADDR; | ||
263 | hose->sparse_io_base = LCA_IO - IDENT_ADDR; | ||
264 | hose->dense_io_base = 0; | ||
265 | |||
266 | /* | ||
267 | * Set up the PCI to main memory translation windows. | ||
268 | * | ||
269 | * Mimic the SRM settings for the direct-map window. | ||
270 | * Window 0 is scatter-gather 8MB at 8MB (for isa). | ||
271 | * Window 1 is direct access 1GB at 1GB. | ||
272 | * | ||
273 | * Note that we do not try to save any of the DMA window CSRs | ||
274 | * before setting them, since we cannot read those CSRs on LCA. | ||
275 | */ | ||
276 | hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); | ||
277 | hose->sg_pci = NULL; | ||
278 | __direct_map_base = 0x40000000; | ||
279 | __direct_map_size = 0x40000000; | ||
280 | |||
281 | *(vulp)LCA_IOC_W_BASE0 = hose->sg_isa->dma_base | (3UL << 32); | ||
282 | *(vulp)LCA_IOC_W_MASK0 = (hose->sg_isa->size - 1) & 0xfff00000; | ||
283 | *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes); | ||
284 | |||
285 | *(vulp)LCA_IOC_W_BASE1 = __direct_map_base | (2UL << 32); | ||
286 | *(vulp)LCA_IOC_W_MASK1 = (__direct_map_size - 1) & 0xfff00000; | ||
287 | *(vulp)LCA_IOC_T_BASE1 = 0; | ||
288 | |||
289 | *(vulp)LCA_IOC_TB_ENA = 0x80; | ||
290 | |||
291 | lca_pci_tbi(hose, 0, -1); | ||
292 | |||
293 | /* | ||
294 | * Disable PCI parity for now. The NCR53c810 chip has | ||
295 | * troubles meeting the PCI spec which results in | ||
296 | * data parity errors. | ||
297 | */ | ||
298 | *(vulp)LCA_IOC_PAR_DIS = 1UL<<5; | ||
299 | |||
300 | /* | ||
301 | * Finally, set up for restoring the correct HAE if using SRM. | ||
302 | * Again, since we cannot read many of the CSRs on the LCA, | ||
303 | * one of which happens to be the HAE, we save the value that | ||
304 | * the SRM will expect... | ||
305 | */ | ||
306 | if (alpha_using_srm) | ||
307 | srm_hae = 0x80000000UL; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Constants used during machine-check handling. I suppose these | ||
312 | * could be moved into lca.h but I don't see much reason why anybody | ||
313 | * else would want to use them. | ||
314 | */ | ||
315 | |||
316 | #define ESR_EAV (1UL<< 0) /* error address valid */ | ||
317 | #define ESR_CEE (1UL<< 1) /* correctable error */ | ||
318 | #define ESR_UEE (1UL<< 2) /* uncorrectable error */ | ||
319 | #define ESR_WRE (1UL<< 3) /* write-error */ | ||
320 | #define ESR_SOR (1UL<< 4) /* error source */ | ||
321 | #define ESR_CTE (1UL<< 7) /* cache-tag error */ | ||
322 | #define ESR_MSE (1UL<< 9) /* multiple soft errors */ | ||
323 | #define ESR_MHE (1UL<<10) /* multiple hard errors */ | ||
324 | #define ESR_NXM (1UL<<12) /* non-existent memory */ | ||
325 | |||
326 | #define IOC_ERR ( 1<<4) /* ioc logs an error */ | ||
327 | #define IOC_CMD_SHIFT 0 | ||
328 | #define IOC_CMD (0xf<<IOC_CMD_SHIFT) | ||
329 | #define IOC_CODE_SHIFT 8 | ||
330 | #define IOC_CODE (0xf<<IOC_CODE_SHIFT) | ||
331 | #define IOC_LOST ( 1<<5) | ||
332 | #define IOC_P_NBR ((__u32) ~((1<<13) - 1)) | ||
333 | |||
334 | static void | ||
335 | mem_error(unsigned long esr, unsigned long ear) | ||
336 | { | ||
337 | printk(" %s %s error to %s occurred at address %x\n", | ||
338 | ((esr & ESR_CEE) ? "Correctable" : | ||
339 | (esr & ESR_UEE) ? "Uncorrectable" : "A"), | ||
340 | (esr & ESR_WRE) ? "write" : "read", | ||
341 | (esr & ESR_SOR) ? "memory" : "b-cache", | ||
342 | (unsigned) (ear & 0x1ffffff8)); | ||
343 | if (esr & ESR_CTE) { | ||
344 | printk(" A b-cache tag parity error was detected.\n"); | ||
345 | } | ||
346 | if (esr & ESR_MSE) { | ||
347 | printk(" Several other correctable errors occurred.\n"); | ||
348 | } | ||
349 | if (esr & ESR_MHE) { | ||
350 | printk(" Several other uncorrectable errors occurred.\n"); | ||
351 | } | ||
352 | if (esr & ESR_NXM) { | ||
353 | printk(" Attempted to access non-existent memory.\n"); | ||
354 | } | ||
355 | } | ||
356 | |||
357 | static void | ||
358 | ioc_error(__u32 stat0, __u32 stat1) | ||
359 | { | ||
360 | static const char * const pci_cmd[] = { | ||
361 | "Interrupt Acknowledge", "Special", "I/O Read", "I/O Write", | ||
362 | "Rsvd 1", "Rsvd 2", "Memory Read", "Memory Write", "Rsvd3", | ||
363 | "Rsvd4", "Configuration Read", "Configuration Write", | ||
364 | "Memory Read Multiple", "Dual Address", "Memory Read Line", | ||
365 | "Memory Write and Invalidate" | ||
366 | }; | ||
367 | static const char * const err_name[] = { | ||
368 | "exceeded retry limit", "no device", "bad data parity", | ||
369 | "target abort", "bad address parity", "page table read error", | ||
370 | "invalid page", "data error" | ||
371 | }; | ||
372 | unsigned code = (stat0 & IOC_CODE) >> IOC_CODE_SHIFT; | ||
373 | unsigned cmd = (stat0 & IOC_CMD) >> IOC_CMD_SHIFT; | ||
374 | |||
375 | printk(" %s initiated PCI %s cycle to address %x" | ||
376 | " failed due to %s.\n", | ||
377 | code > 3 ? "PCI" : "CPU", pci_cmd[cmd], stat1, err_name[code]); | ||
378 | |||
379 | if (code == 5 || code == 6) { | ||
380 | printk(" (Error occurred at PCI memory address %x.)\n", | ||
381 | (stat0 & ~IOC_P_NBR)); | ||
382 | } | ||
383 | if (stat0 & IOC_LOST) { | ||
384 | printk(" Other PCI errors occurred simultaneously.\n"); | ||
385 | } | ||
386 | } | ||
387 | |||
388 | void | ||
389 | lca_machine_check(unsigned long vector, unsigned long la_ptr, | ||
390 | struct pt_regs *regs) | ||
391 | { | ||
392 | const char * reason; | ||
393 | union el_lca el; | ||
394 | |||
395 | el.c = (struct el_common *) la_ptr; | ||
396 | |||
397 | wrmces(rdmces()); /* reset machine check pending flag */ | ||
398 | |||
399 | printk(KERN_CRIT "LCA machine check: vector=%#lx pc=%#lx code=%#x\n", | ||
400 | vector, regs->pc, (unsigned int) el.c->code); | ||
401 | |||
402 | /* | ||
403 | * The first quadword after the common header always seems to | ||
404 | * be the machine check reason---don't know why this isn't | ||
405 | * part of the common header instead. In the case of a long | ||
406 | * logout frame, the upper 32 bits is the machine check | ||
407 | * revision level, which we ignore for now. | ||
408 | */ | ||
409 | switch ((unsigned int) el.c->code) { | ||
410 | case MCHK_K_TPERR: reason = "tag parity error"; break; | ||
411 | case MCHK_K_TCPERR: reason = "tag control parity error"; break; | ||
412 | case MCHK_K_HERR: reason = "access to non-existent memory"; break; | ||
413 | case MCHK_K_ECC_C: reason = "correctable ECC error"; break; | ||
414 | case MCHK_K_ECC_NC: reason = "non-correctable ECC error"; break; | ||
415 | case MCHK_K_CACKSOFT: reason = "MCHK_K_CACKSOFT"; break; | ||
416 | case MCHK_K_BUGCHECK: reason = "illegal exception in PAL mode"; break; | ||
417 | case MCHK_K_OS_BUGCHECK: reason = "callsys in kernel mode"; break; | ||
418 | case MCHK_K_DCPERR: reason = "d-cache parity error"; break; | ||
419 | case MCHK_K_ICPERR: reason = "i-cache parity error"; break; | ||
420 | case MCHK_K_SIO_SERR: reason = "SIO SERR occurred on PCI bus"; break; | ||
421 | case MCHK_K_SIO_IOCHK: reason = "SIO IOCHK occurred on ISA bus"; break; | ||
422 | case MCHK_K_DCSR: reason = "MCHK_K_DCSR"; break; | ||
423 | case MCHK_K_UNKNOWN: | ||
424 | default: reason = "unknown"; break; | ||
425 | } | ||
426 | |||
427 | switch (el.c->size) { | ||
428 | case sizeof(struct el_lca_mcheck_short): | ||
429 | printk(KERN_CRIT | ||
430 | " Reason: %s (short frame%s, dc_stat=%#lx):\n", | ||
431 | reason, el.c->retry ? ", retryable" : "", | ||
432 | el.s->dc_stat); | ||
433 | if (el.s->esr & ESR_EAV) { | ||
434 | mem_error(el.s->esr, el.s->ear); | ||
435 | } | ||
436 | if (el.s->ioc_stat0 & IOC_ERR) { | ||
437 | ioc_error(el.s->ioc_stat0, el.s->ioc_stat1); | ||
438 | } | ||
439 | break; | ||
440 | |||
441 | case sizeof(struct el_lca_mcheck_long): | ||
442 | printk(KERN_CRIT " Reason: %s (long frame%s):\n", | ||
443 | reason, el.c->retry ? ", retryable" : ""); | ||
444 | printk(KERN_CRIT | ||
445 | " reason: %#lx exc_addr: %#lx dc_stat: %#lx\n", | ||
446 | el.l->pt[0], el.l->exc_addr, el.l->dc_stat); | ||
447 | printk(KERN_CRIT " car: %#lx\n", el.l->car); | ||
448 | if (el.l->esr & ESR_EAV) { | ||
449 | mem_error(el.l->esr, el.l->ear); | ||
450 | } | ||
451 | if (el.l->ioc_stat0 & IOC_ERR) { | ||
452 | ioc_error(el.l->ioc_stat0, el.l->ioc_stat1); | ||
453 | } | ||
454 | break; | ||
455 | |||
456 | default: | ||
457 | printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size); | ||
458 | } | ||
459 | |||
460 | /* Dump the logout area to give all info. */ | ||
461 | #ifdef CONFIG_VERBOSE_MCHECK | ||
462 | if (alpha_verbose_mcheck > 1) { | ||
463 | unsigned long * ptr = (unsigned long *) la_ptr; | ||
464 | long i; | ||
465 | for (i = 0; i < el.c->size / sizeof(long); i += 2) { | ||
466 | printk(KERN_CRIT " +%8lx %016lx %016lx\n", | ||
467 | i*sizeof(long), ptr[i], ptr[i+1]); | ||
468 | } | ||
469 | } | ||
470 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * The following routines are needed to support the SPEED changing | ||
475 | * necessary to successfully manage the thermal problem on the AlphaBook1. | ||
476 | */ | ||
477 | |||
478 | void | ||
479 | lca_clock_print(void) | ||
480 | { | ||
481 | long pmr_reg; | ||
482 | |||
483 | pmr_reg = LCA_READ_PMR; | ||
484 | |||
485 | printk("Status of clock control:\n"); | ||
486 | printk("\tPrimary clock divisor\t0x%lx\n", LCA_GET_PRIMARY(pmr_reg)); | ||
487 | printk("\tOverride clock divisor\t0x%lx\n", LCA_GET_OVERRIDE(pmr_reg)); | ||
488 | printk("\tInterrupt override is %s\n", | ||
489 | (pmr_reg & LCA_PMR_INTO) ? "on" : "off"); | ||
490 | printk("\tDMA override is %s\n", | ||
491 | (pmr_reg & LCA_PMR_DMAO) ? "on" : "off"); | ||
492 | |||
493 | } | ||
494 | |||
495 | int | ||
496 | lca_get_clock(void) | ||
497 | { | ||
498 | long pmr_reg; | ||
499 | |||
500 | pmr_reg = LCA_READ_PMR; | ||
501 | return(LCA_GET_PRIMARY(pmr_reg)); | ||
502 | |||
503 | } | ||
504 | |||
505 | void | ||
506 | lca_clock_fiddle(int divisor) | ||
507 | { | ||
508 | long pmr_reg; | ||
509 | |||
510 | pmr_reg = LCA_READ_PMR; | ||
511 | LCA_SET_PRIMARY_CLOCK(pmr_reg, divisor); | ||
512 | /* lca_norm_clock = divisor; */ | ||
513 | LCA_WRITE_PMR(pmr_reg); | ||
514 | mb(); | ||
515 | } | ||
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c new file mode 100644 index 000000000000..44866cb26a80 --- /dev/null +++ b/arch/alpha/kernel/core_marvel.c | |||
@@ -0,0 +1,1154 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_marvel.c | ||
3 | * | ||
4 | * Code common to all Marvel based systems. | ||
5 | */ | ||
6 | |||
7 | #define __EXTERN_INLINE inline | ||
8 | #include <asm/io.h> | ||
9 | #include <asm/core_marvel.h> | ||
10 | #undef __EXTERN_INLINE | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/mc146818rtc.h> | ||
18 | #include <linux/rtc.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/bootmem.h> | ||
21 | |||
22 | #include <asm/ptrace.h> | ||
23 | #include <asm/smp.h> | ||
24 | #include <asm/gct.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/rtc.h> | ||
28 | |||
29 | #include "proto.h" | ||
30 | #include "pci_impl.h" | ||
31 | |||
32 | |||
33 | /* | ||
34 | * Debug helpers | ||
35 | */ | ||
36 | #define DEBUG_CONFIG 0 | ||
37 | |||
38 | #if DEBUG_CONFIG | ||
39 | # define DBG_CFG(args) printk args | ||
40 | #else | ||
41 | # define DBG_CFG(args) | ||
42 | #endif | ||
43 | |||
44 | |||
45 | /* | ||
46 | * Private data | ||
47 | */ | ||
48 | static struct io7 *io7_head = NULL; | ||
49 | |||
50 | |||
51 | /* | ||
52 | * Helper functions | ||
53 | */ | ||
54 | static unsigned long __attribute__ ((unused)) | ||
55 | read_ev7_csr(int pe, unsigned long offset) | ||
56 | { | ||
57 | ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset); | ||
58 | unsigned long q; | ||
59 | |||
60 | mb(); | ||
61 | q = ev7csr->csr; | ||
62 | mb(); | ||
63 | |||
64 | return q; | ||
65 | } | ||
66 | |||
67 | static void __attribute__ ((unused)) | ||
68 | write_ev7_csr(int pe, unsigned long offset, unsigned long q) | ||
69 | { | ||
70 | ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset); | ||
71 | |||
72 | mb(); | ||
73 | ev7csr->csr = q; | ||
74 | mb(); | ||
75 | } | ||
76 | |||
77 | static char * __init | ||
78 | mk_resource_name(int pe, int port, char *str) | ||
79 | { | ||
80 | char tmp[80]; | ||
81 | char *name; | ||
82 | |||
83 | sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); | ||
84 | name = alloc_bootmem(strlen(tmp) + 1); | ||
85 | strcpy(name, tmp); | ||
86 | |||
87 | return name; | ||
88 | } | ||
89 | |||
90 | inline struct io7 * | ||
91 | marvel_next_io7(struct io7 *prev) | ||
92 | { | ||
93 | return (prev ? prev->next : io7_head); | ||
94 | } | ||
95 | |||
96 | struct io7 * | ||
97 | marvel_find_io7(int pe) | ||
98 | { | ||
99 | struct io7 *io7; | ||
100 | |||
101 | for (io7 = io7_head; io7 && io7->pe != pe; io7 = io7->next) | ||
102 | continue; | ||
103 | |||
104 | return io7; | ||
105 | } | ||
106 | |||
107 | static struct io7 * __init | ||
108 | alloc_io7(unsigned int pe) | ||
109 | { | ||
110 | struct io7 *io7; | ||
111 | struct io7 *insp; | ||
112 | int h; | ||
113 | |||
114 | if (marvel_find_io7(pe)) { | ||
115 | printk(KERN_WARNING "IO7 at PE %d already allocated!\n", pe); | ||
116 | return NULL; | ||
117 | } | ||
118 | |||
119 | io7 = alloc_bootmem(sizeof(*io7)); | ||
120 | io7->pe = pe; | ||
121 | spin_lock_init(&io7->irq_lock); | ||
122 | |||
123 | for (h = 0; h < 4; h++) { | ||
124 | io7->ports[h].io7 = io7; | ||
125 | io7->ports[h].port = h; | ||
126 | io7->ports[h].enabled = 0; /* default to disabled */ | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Insert in pe sorted order. | ||
131 | */ | ||
132 | if (NULL == io7_head) /* empty list */ | ||
133 | io7_head = io7; | ||
134 | else if (io7_head->pe > io7->pe) { /* insert at head */ | ||
135 | io7->next = io7_head; | ||
136 | io7_head = io7; | ||
137 | } else { /* insert at position */ | ||
138 | for (insp = io7_head; insp; insp = insp->next) { | ||
139 | if (insp->pe == io7->pe) { | ||
140 | printk(KERN_ERR "Too many IO7s at PE %d\n", | ||
141 | io7->pe); | ||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | if (NULL == insp->next || | ||
146 | insp->next->pe > io7->pe) { /* insert here */ | ||
147 | io7->next = insp->next; | ||
148 | insp->next = io7; | ||
149 | break; | ||
150 | } | ||
151 | } | ||
152 | |||
153 | if (NULL == insp) { /* couldn't insert ?!? */ | ||
154 | printk(KERN_WARNING "Failed to insert IO7 at PE %d " | ||
155 | " - adding at head of list\n", io7->pe); | ||
156 | io7->next = io7_head; | ||
157 | io7_head = io7; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | return io7; | ||
162 | } | ||
163 | |||
164 | void | ||
165 | io7_clear_errors(struct io7 *io7) | ||
166 | { | ||
167 | io7_port7_csrs *p7csrs; | ||
168 | io7_ioport_csrs *csrs; | ||
169 | int port; | ||
170 | |||
171 | |||
172 | /* | ||
173 | * First the IO ports. | ||
174 | */ | ||
175 | for (port = 0; port < 4; port++) { | ||
176 | csrs = IO7_CSRS_KERN(io7->pe, port); | ||
177 | |||
178 | csrs->POx_ERR_SUM.csr = -1UL; | ||
179 | csrs->POx_TLB_ERR.csr = -1UL; | ||
180 | csrs->POx_SPL_COMPLT.csr = -1UL; | ||
181 | csrs->POx_TRANS_SUM.csr = -1UL; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Then the common ones. | ||
186 | */ | ||
187 | p7csrs = IO7_PORT7_CSRS_KERN(io7->pe); | ||
188 | |||
189 | p7csrs->PO7_ERROR_SUM.csr = -1UL; | ||
190 | p7csrs->PO7_UNCRR_SYM.csr = -1UL; | ||
191 | p7csrs->PO7_CRRCT_SYM.csr = -1UL; | ||
192 | } | ||
193 | |||
194 | |||
195 | /* | ||
196 | * IO7 PCI, PCI/X, AGP configuration. | ||
197 | */ | ||
198 | static void __init | ||
199 | io7_init_hose(struct io7 *io7, int port) | ||
200 | { | ||
201 | static int hose_index = 0; | ||
202 | |||
203 | struct pci_controller *hose = alloc_pci_controller(); | ||
204 | struct io7_port *io7_port = &io7->ports[port]; | ||
205 | io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, port); | ||
206 | int i; | ||
207 | |||
208 | hose->index = hose_index++; /* arbitrary */ | ||
209 | |||
210 | /* | ||
211 | * We don't have an isa or legacy hose, but glibc expects to be | ||
212 | * able to use the bus == 0 / dev == 0 form of the iobase syscall | ||
213 | * to determine information about the i/o system. Since XFree86 | ||
214 | * relies on glibc's determination to tell whether or not to use | ||
215 | * sparse access, we need to point the pci_isa_hose at a real hose | ||
216 | * so at least that determination is correct. | ||
217 | */ | ||
218 | if (hose->index == 0) | ||
219 | pci_isa_hose = hose; | ||
220 | |||
221 | io7_port->csrs = csrs; | ||
222 | io7_port->hose = hose; | ||
223 | hose->sysdata = io7_port; | ||
224 | |||
225 | hose->io_space = alloc_resource(); | ||
226 | hose->mem_space = alloc_resource(); | ||
227 | |||
228 | /* | ||
229 | * Base addresses for userland consumption. Since these are going | ||
230 | * to be mapped, they are pure physical addresses. | ||
231 | */ | ||
232 | hose->sparse_mem_base = hose->sparse_io_base = 0; | ||
233 | hose->dense_mem_base = IO7_MEM_PHYS(io7->pe, port); | ||
234 | hose->dense_io_base = IO7_IO_PHYS(io7->pe, port); | ||
235 | |||
236 | /* | ||
237 | * Base addresses and resource ranges for kernel consumption. | ||
238 | */ | ||
239 | hose->config_space_base = (unsigned long)IO7_CONF_KERN(io7->pe, port); | ||
240 | |||
241 | hose->io_space->start = (unsigned long)IO7_IO_KERN(io7->pe, port); | ||
242 | hose->io_space->end = hose->io_space->start + IO7_IO_SPACE - 1; | ||
243 | hose->io_space->name = mk_resource_name(io7->pe, port, "IO"); | ||
244 | hose->io_space->flags = IORESOURCE_IO; | ||
245 | |||
246 | hose->mem_space->start = (unsigned long)IO7_MEM_KERN(io7->pe, port); | ||
247 | hose->mem_space->end = hose->mem_space->start + IO7_MEM_SPACE - 1; | ||
248 | hose->mem_space->name = mk_resource_name(io7->pe, port, "MEM"); | ||
249 | hose->mem_space->flags = IORESOURCE_MEM; | ||
250 | |||
251 | if (request_resource(&ioport_resource, hose->io_space) < 0) | ||
252 | printk(KERN_ERR "Failed to request IO on hose %d\n", | ||
253 | hose->index); | ||
254 | if (request_resource(&iomem_resource, hose->mem_space) < 0) | ||
255 | printk(KERN_ERR "Failed to request MEM on hose %d\n", | ||
256 | hose->index); | ||
257 | |||
258 | /* | ||
259 | * Save the existing DMA window settings for later restoration. | ||
260 | */ | ||
261 | for (i = 0; i < 4; i++) { | ||
262 | io7_port->saved_wbase[i] = csrs->POx_WBASE[i].csr; | ||
263 | io7_port->saved_wmask[i] = csrs->POx_WMASK[i].csr; | ||
264 | io7_port->saved_tbase[i] = csrs->POx_TBASE[i].csr; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Set up the PCI to main memory translation windows. | ||
269 | * | ||
270 | * Window 0 is scatter-gather 8MB at 8MB | ||
271 | * Window 1 is direct access 1GB at 2GB | ||
272 | * Window 2 is scatter-gather (up-to) 1GB at 3GB | ||
273 | * Window 3 is disabled | ||
274 | */ | ||
275 | |||
276 | /* | ||
277 | * TBIA before modifying windows. | ||
278 | */ | ||
279 | marvel_pci_tbi(hose, 0, -1); | ||
280 | |||
281 | /* | ||
282 | * Set up window 0 for scatter-gather 8MB at 8MB. | ||
283 | */ | ||
284 | hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe), | ||
285 | hose, 0x00800000, 0x00800000, 0); | ||
286 | hose->sg_isa->align_entry = 8; /* cache line boundary */ | ||
287 | csrs->POx_WBASE[0].csr = | ||
288 | hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg; | ||
289 | csrs->POx_WMASK[0].csr = (hose->sg_isa->size - 1) & wbase_m_addr; | ||
290 | csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes); | ||
291 | |||
292 | /* | ||
293 | * Set up window 1 for direct-mapped 1GB at 2GB. | ||
294 | */ | ||
295 | csrs->POx_WBASE[1].csr = __direct_map_base | wbase_m_ena; | ||
296 | csrs->POx_WMASK[1].csr = (__direct_map_size - 1) & wbase_m_addr; | ||
297 | csrs->POx_TBASE[1].csr = 0; | ||
298 | |||
299 | /* | ||
300 | * Set up window 2 for scatter-gather (up-to) 1GB at 3GB. | ||
301 | */ | ||
302 | hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe), | ||
303 | hose, 0xc0000000, 0x40000000, 0); | ||
304 | hose->sg_pci->align_entry = 8; /* cache line boundary */ | ||
305 | csrs->POx_WBASE[2].csr = | ||
306 | hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg; | ||
307 | csrs->POx_WMASK[2].csr = (hose->sg_pci->size - 1) & wbase_m_addr; | ||
308 | csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes); | ||
309 | |||
310 | /* | ||
311 | * Disable window 3. | ||
312 | */ | ||
313 | csrs->POx_WBASE[3].csr = 0; | ||
314 | |||
315 | /* | ||
316 | * Make sure that the AGP Monster Window is disabled. | ||
317 | */ | ||
318 | csrs->POx_CTRL.csr &= ~(1UL << 61); | ||
319 | |||
320 | #if 1 | ||
321 | printk("FIXME: disabling master aborts\n"); | ||
322 | csrs->POx_MSK_HEI.csr &= ~(3UL << 14); | ||
323 | #endif | ||
324 | /* | ||
325 | * TBIA after modifying windows. | ||
326 | */ | ||
327 | marvel_pci_tbi(hose, 0, -1); | ||
328 | } | ||
329 | |||
330 | static void __init | ||
331 | marvel_init_io7(struct io7 *io7) | ||
332 | { | ||
333 | int i; | ||
334 | |||
335 | printk("Initializing IO7 at PID %d\n", io7->pe); | ||
336 | |||
337 | /* | ||
338 | * Get the Port 7 CSR pointer. | ||
339 | */ | ||
340 | io7->csrs = IO7_PORT7_CSRS_KERN(io7->pe); | ||
341 | |||
342 | /* | ||
343 | * Init this IO7's hoses. | ||
344 | */ | ||
345 | for (i = 0; i < IO7_NUM_PORTS; i++) { | ||
346 | io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, i); | ||
347 | if (csrs->POx_CACHE_CTL.csr == 8) { | ||
348 | io7->ports[i].enabled = 1; | ||
349 | io7_init_hose(io7, i); | ||
350 | } | ||
351 | } | ||
352 | } | ||
353 | |||
354 | void | ||
355 | marvel_io7_present(gct6_node *node) | ||
356 | { | ||
357 | int pe; | ||
358 | |||
359 | if (node->type != GCT_TYPE_HOSE || | ||
360 | node->subtype != GCT_SUBTYPE_IO_PORT_MODULE) | ||
361 | return; | ||
362 | |||
363 | pe = (node->id >> 8) & 0xff; | ||
364 | printk("Found an IO7 at PID %d\n", pe); | ||
365 | |||
366 | alloc_io7(pe); | ||
367 | } | ||
368 | |||
369 | static void __init | ||
370 | marvel_init_vga_hose(void) | ||
371 | { | ||
372 | #ifdef CONFIG_VGA_HOSE | ||
373 | u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); | ||
374 | |||
375 | if (pu64[7] == 3) { /* TERM_TYPE == graphics */ | ||
376 | struct pci_controller *hose = NULL; | ||
377 | int h = (pu64[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */ | ||
378 | struct io7 *io7; | ||
379 | int pid, port; | ||
380 | |||
381 | /* FIXME - encoding is going to have to change for Marvel | ||
382 | * since hose will be able to overflow a byte... | ||
383 | * need to fix this decode when the console | ||
384 | * changes its encoding | ||
385 | */ | ||
386 | printk("console graphics is on hose %d (console)\n", h); | ||
387 | |||
388 | /* | ||
389 | * The console's hose numbering is: | ||
390 | * | ||
391 | * hose<n:2>: PID | ||
392 | * hose<1:0>: PORT | ||
393 | * | ||
394 | * We need to find the hose at that pid and port | ||
395 | */ | ||
396 | pid = h >> 2; | ||
397 | port = h & 3; | ||
398 | if ((io7 = marvel_find_io7(pid))) | ||
399 | hose = io7->ports[port].hose; | ||
400 | |||
401 | if (hose) { | ||
402 | printk("Console graphics on hose %d\n", hose->index); | ||
403 | pci_vga_hose = hose; | ||
404 | } | ||
405 | } | ||
406 | #endif /* CONFIG_VGA_HOSE */ | ||
407 | } | ||
408 | |||
409 | gct6_search_struct gct_wanted_node_list[] = { | ||
410 | { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present }, | ||
411 | { 0, 0, NULL } | ||
412 | }; | ||
413 | |||
414 | /* | ||
415 | * In case the GCT is not complete, let the user specify PIDs with IO7s | ||
416 | * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal) | ||
417 | * where IO7s are connected | ||
418 | */ | ||
419 | static int __init | ||
420 | marvel_specify_io7(char *str) | ||
421 | { | ||
422 | unsigned long pid; | ||
423 | struct io7 *io7; | ||
424 | char *pchar; | ||
425 | |||
426 | do { | ||
427 | pid = simple_strtoul(str, &pchar, 0); | ||
428 | if (pchar != str) { | ||
429 | printk("User-specified IO7 at PID %lu\n", pid); | ||
430 | io7 = alloc_io7(pid); | ||
431 | if (io7) marvel_init_io7(io7); | ||
432 | } | ||
433 | |||
434 | if (pchar == str) pchar++; | ||
435 | str = pchar; | ||
436 | } while(*str); | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | __setup("io7=", marvel_specify_io7); | ||
441 | |||
442 | void __init | ||
443 | marvel_init_arch(void) | ||
444 | { | ||
445 | struct io7 *io7; | ||
446 | |||
447 | /* With multiple PCI busses, we play with I/O as physical addrs. */ | ||
448 | ioport_resource.end = ~0UL; | ||
449 | |||
450 | /* PCI DMA Direct Mapping is 1GB at 2GB. */ | ||
451 | __direct_map_base = 0x80000000; | ||
452 | __direct_map_size = 0x40000000; | ||
453 | |||
454 | /* Parse the config tree. */ | ||
455 | gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list); | ||
456 | |||
457 | /* Init the io7s. */ | ||
458 | for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) | ||
459 | marvel_init_io7(io7); | ||
460 | |||
461 | /* Check for graphic console location (if any). */ | ||
462 | marvel_init_vga_hose(); | ||
463 | } | ||
464 | |||
465 | void | ||
466 | marvel_kill_arch(int mode) | ||
467 | { | ||
468 | } | ||
469 | |||
470 | |||
471 | /* | ||
472 | * PCI Configuration Space access functions | ||
473 | * | ||
474 | * Configuration space addresses have the following format: | ||
475 | * | ||
476 | * |2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
477 | * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
478 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
479 | * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R| | ||
480 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
481 | * | ||
482 | * n:24 reserved for hose base | ||
483 | * 23:16 bus number (8 bits = 128 possible buses) | ||
484 | * 15:11 Device number (5 bits) | ||
485 | * 10:8 function number | ||
486 | * 7:2 register number | ||
487 | * | ||
488 | * Notes: | ||
489 | * IO7 determines whether to use a type 0 or type 1 config cycle | ||
490 | * based on the bus number. Therefore the bus number must be set | ||
491 | * to 0 for the root bus on any hose. | ||
492 | * | ||
493 | * The function number selects which function of a multi-function device | ||
494 | * (e.g., SCSI and Ethernet). | ||
495 | * | ||
496 | */ | ||
497 | |||
498 | static inline unsigned long | ||
499 | build_conf_addr(struct pci_controller *hose, u8 bus, | ||
500 | unsigned int devfn, int where) | ||
501 | { | ||
502 | return (hose->config_space_base | (bus << 16) | (devfn << 8) | where); | ||
503 | } | ||
504 | |||
505 | static unsigned long | ||
506 | mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where) | ||
507 | { | ||
508 | struct pci_controller *hose = pbus->sysdata; | ||
509 | struct io7_port *io7_port; | ||
510 | unsigned long addr = 0; | ||
511 | u8 bus = pbus->number; | ||
512 | |||
513 | if (!hose) | ||
514 | return addr; | ||
515 | |||
516 | /* Check for enabled. */ | ||
517 | io7_port = hose->sysdata; | ||
518 | if (!io7_port->enabled) | ||
519 | return addr; | ||
520 | |||
521 | if (!pbus->parent) { /* No parent means peer PCI bus. */ | ||
522 | /* Don't support idsel > 20 on primary bus. */ | ||
523 | if (devfn >= PCI_DEVFN(21, 0)) | ||
524 | return addr; | ||
525 | bus = 0; | ||
526 | } | ||
527 | |||
528 | addr = build_conf_addr(hose, bus, devfn, where); | ||
529 | |||
530 | DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); | ||
531 | return addr; | ||
532 | } | ||
533 | |||
534 | static int | ||
535 | marvel_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
536 | int size, u32 *value) | ||
537 | { | ||
538 | unsigned long addr; | ||
539 | |||
540 | if (0 == (addr = mk_conf_addr(bus, devfn, where))) | ||
541 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
542 | |||
543 | switch(size) { | ||
544 | case 1: | ||
545 | *value = __kernel_ldbu(*(vucp)addr); | ||
546 | break; | ||
547 | case 2: | ||
548 | *value = __kernel_ldwu(*(vusp)addr); | ||
549 | break; | ||
550 | case 4: | ||
551 | *value = *(vuip)addr; | ||
552 | break; | ||
553 | default: | ||
554 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
555 | } | ||
556 | |||
557 | return PCIBIOS_SUCCESSFUL; | ||
558 | } | ||
559 | |||
560 | static int | ||
561 | marvel_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
562 | int size, u32 value) | ||
563 | { | ||
564 | unsigned long addr; | ||
565 | |||
566 | if (0 == (addr = mk_conf_addr(bus, devfn, where))) | ||
567 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
568 | |||
569 | switch (size) { | ||
570 | case 1: | ||
571 | __kernel_stb(value, *(vucp)addr); | ||
572 | mb(); | ||
573 | __kernel_ldbu(*(vucp)addr); | ||
574 | break; | ||
575 | case 2: | ||
576 | __kernel_stw(value, *(vusp)addr); | ||
577 | mb(); | ||
578 | __kernel_ldwu(*(vusp)addr); | ||
579 | break; | ||
580 | case 4: | ||
581 | *(vuip)addr = value; | ||
582 | mb(); | ||
583 | *(vuip)addr; | ||
584 | break; | ||
585 | default: | ||
586 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
587 | } | ||
588 | |||
589 | return PCIBIOS_SUCCESSFUL; | ||
590 | } | ||
591 | |||
592 | struct pci_ops marvel_pci_ops = | ||
593 | { | ||
594 | .read = marvel_read_config, | ||
595 | .write = marvel_write_config, | ||
596 | }; | ||
597 | |||
598 | |||
599 | /* | ||
600 | * Other PCI helper functions. | ||
601 | */ | ||
602 | void | ||
603 | marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
604 | { | ||
605 | io7_ioport_csrs *csrs = ((struct io7_port *)hose->sysdata)->csrs; | ||
606 | |||
607 | wmb(); | ||
608 | csrs->POx_SG_TBIA.csr = 0; | ||
609 | mb(); | ||
610 | csrs->POx_SG_TBIA.csr; | ||
611 | } | ||
612 | |||
613 | |||
614 | |||
615 | /* | ||
616 | * RTC Support | ||
617 | */ | ||
618 | struct marvel_rtc_access_info { | ||
619 | unsigned long function; | ||
620 | unsigned long index; | ||
621 | unsigned long data; | ||
622 | }; | ||
623 | |||
624 | static void | ||
625 | __marvel_access_rtc(void *info) | ||
626 | { | ||
627 | struct marvel_rtc_access_info *rtc_access = info; | ||
628 | |||
629 | register unsigned long __r0 __asm__("$0"); | ||
630 | register unsigned long __r16 __asm__("$16") = rtc_access->function; | ||
631 | register unsigned long __r17 __asm__("$17") = rtc_access->index; | ||
632 | register unsigned long __r18 __asm__("$18") = rtc_access->data; | ||
633 | |||
634 | __asm__ __volatile__( | ||
635 | "call_pal %4 # cserve rtc" | ||
636 | : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0) | ||
637 | : "i"(PAL_cserve), "0"(__r16), "1"(__r17), "2"(__r18) | ||
638 | : "$1", "$22", "$23", "$24", "$25"); | ||
639 | |||
640 | rtc_access->data = __r0; | ||
641 | } | ||
642 | |||
643 | static u8 | ||
644 | __marvel_rtc_io(u8 b, unsigned long addr, int write) | ||
645 | { | ||
646 | static u8 index = 0; | ||
647 | |||
648 | struct marvel_rtc_access_info rtc_access; | ||
649 | u8 ret = 0; | ||
650 | |||
651 | switch(addr) { | ||
652 | case 0x70: /* RTC_PORT(0) */ | ||
653 | if (write) index = b; | ||
654 | ret = index; | ||
655 | break; | ||
656 | |||
657 | case 0x71: /* RTC_PORT(1) */ | ||
658 | rtc_access.index = index; | ||
659 | rtc_access.data = BCD_TO_BIN(b); | ||
660 | rtc_access.function = 0x48 + !write; /* GET/PUT_TOY */ | ||
661 | |||
662 | #ifdef CONFIG_SMP | ||
663 | if (smp_processor_id() != boot_cpuid) | ||
664 | smp_call_function_on_cpu(__marvel_access_rtc, | ||
665 | &rtc_access, 1, 1, | ||
666 | cpumask_of_cpu(boot_cpuid)); | ||
667 | else | ||
668 | __marvel_access_rtc(&rtc_access); | ||
669 | #else | ||
670 | __marvel_access_rtc(&rtc_access); | ||
671 | #endif | ||
672 | ret = BIN_TO_BCD(rtc_access.data); | ||
673 | break; | ||
674 | |||
675 | default: | ||
676 | printk(KERN_WARNING "Illegal RTC port %lx\n", addr); | ||
677 | break; | ||
678 | } | ||
679 | |||
680 | return ret; | ||
681 | } | ||
682 | |||
683 | |||
684 | /* | ||
685 | * IO map support. | ||
686 | */ | ||
687 | |||
688 | #define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000)) | ||
689 | |||
690 | void __iomem * | ||
691 | marvel_ioremap(unsigned long addr, unsigned long size) | ||
692 | { | ||
693 | struct pci_controller *hose; | ||
694 | unsigned long baddr, last; | ||
695 | struct vm_struct *area; | ||
696 | unsigned long vaddr; | ||
697 | unsigned long *ptes; | ||
698 | unsigned long pfn; | ||
699 | |||
700 | /* | ||
701 | * Adjust the addr. | ||
702 | */ | ||
703 | #ifdef CONFIG_VGA_HOSE | ||
704 | if (pci_vga_hose && __marvel_is_mem_vga(addr)) { | ||
705 | addr += pci_vga_hose->mem_space->start; | ||
706 | } | ||
707 | #endif | ||
708 | |||
709 | /* | ||
710 | * Find the hose. | ||
711 | */ | ||
712 | for (hose = hose_head; hose; hose = hose->next) { | ||
713 | if ((addr >> 32) == (hose->mem_space->start >> 32)) | ||
714 | break; | ||
715 | } | ||
716 | if (!hose) | ||
717 | return NULL; | ||
718 | |||
719 | /* | ||
720 | * We have the hose - calculate the bus limits. | ||
721 | */ | ||
722 | baddr = addr - hose->mem_space->start; | ||
723 | last = baddr + size - 1; | ||
724 | |||
725 | /* | ||
726 | * Is it direct-mapped? | ||
727 | */ | ||
728 | if ((baddr >= __direct_map_base) && | ||
729 | ((baddr + size - 1) < __direct_map_base + __direct_map_size)) { | ||
730 | addr = IDENT_ADDR | (baddr - __direct_map_base); | ||
731 | return (void __iomem *) addr; | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * Check the scatter-gather arena. | ||
736 | */ | ||
737 | if (hose->sg_pci && | ||
738 | baddr >= (unsigned long)hose->sg_pci->dma_base && | ||
739 | last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size) { | ||
740 | |||
741 | /* | ||
742 | * Adjust the limits (mappings must be page aligned) | ||
743 | */ | ||
744 | baddr -= hose->sg_pci->dma_base; | ||
745 | last -= hose->sg_pci->dma_base; | ||
746 | baddr &= PAGE_MASK; | ||
747 | size = PAGE_ALIGN(last) - baddr; | ||
748 | |||
749 | /* | ||
750 | * Map it. | ||
751 | */ | ||
752 | area = get_vm_area(size, VM_IOREMAP); | ||
753 | if (!area) | ||
754 | return NULL; | ||
755 | |||
756 | ptes = hose->sg_pci->ptes; | ||
757 | for (vaddr = (unsigned long)area->addr; | ||
758 | baddr <= last; | ||
759 | baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { | ||
760 | pfn = ptes[baddr >> PAGE_SHIFT]; | ||
761 | if (!(pfn & 1)) { | ||
762 | printk("ioremap failed... pte not valid...\n"); | ||
763 | vfree(area->addr); | ||
764 | return NULL; | ||
765 | } | ||
766 | pfn >>= 1; /* make it a true pfn */ | ||
767 | |||
768 | if (__alpha_remap_area_pages(vaddr, | ||
769 | pfn << PAGE_SHIFT, | ||
770 | PAGE_SIZE, 0)) { | ||
771 | printk("FAILED to map...\n"); | ||
772 | vfree(area->addr); | ||
773 | return NULL; | ||
774 | } | ||
775 | } | ||
776 | |||
777 | flush_tlb_all(); | ||
778 | |||
779 | vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); | ||
780 | |||
781 | return (void __iomem *) vaddr; | ||
782 | } | ||
783 | |||
784 | return NULL; | ||
785 | } | ||
786 | |||
787 | void | ||
788 | marvel_iounmap(volatile void __iomem *xaddr) | ||
789 | { | ||
790 | unsigned long addr = (unsigned long) xaddr; | ||
791 | if (addr >= VMALLOC_START) | ||
792 | vfree((void *)(PAGE_MASK & addr)); | ||
793 | } | ||
794 | |||
795 | int | ||
796 | marvel_is_mmio(const volatile void __iomem *xaddr) | ||
797 | { | ||
798 | unsigned long addr = (unsigned long) xaddr; | ||
799 | |||
800 | if (addr >= VMALLOC_START) | ||
801 | return 1; | ||
802 | else | ||
803 | return (addr & 0xFF000000UL) == 0; | ||
804 | } | ||
805 | |||
806 | #define __marvel_is_port_vga(a) \ | ||
807 | (((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3)) | ||
808 | #define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64)) | ||
809 | #define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71)) | ||
810 | |||
811 | void __iomem *marvel_ioportmap (unsigned long addr) | ||
812 | { | ||
813 | if (__marvel_is_port_rtc (addr) || __marvel_is_port_kbd(addr)) | ||
814 | ; | ||
815 | #ifdef CONFIG_VGA_HOSE | ||
816 | else if (__marvel_is_port_vga (addr) && pci_vga_hose) | ||
817 | addr += pci_vga_hose->io_space->start; | ||
818 | #endif | ||
819 | else | ||
820 | return NULL; | ||
821 | return (void __iomem *)addr; | ||
822 | } | ||
823 | |||
824 | unsigned int | ||
825 | marvel_ioread8(void __iomem *xaddr) | ||
826 | { | ||
827 | unsigned long addr = (unsigned long) xaddr; | ||
828 | if (__marvel_is_port_kbd(addr)) | ||
829 | return 0; | ||
830 | else if (__marvel_is_port_rtc(addr)) | ||
831 | return __marvel_rtc_io(0, addr, 0); | ||
832 | else | ||
833 | return __kernel_ldbu(*(vucp)addr); | ||
834 | } | ||
835 | |||
836 | void | ||
837 | marvel_iowrite8(u8 b, void __iomem *xaddr) | ||
838 | { | ||
839 | unsigned long addr = (unsigned long) xaddr; | ||
840 | if (__marvel_is_port_kbd(addr)) | ||
841 | return; | ||
842 | else if (__marvel_is_port_rtc(addr)) | ||
843 | __marvel_rtc_io(b, addr, 1); | ||
844 | else | ||
845 | __kernel_stb(b, *(vucp)addr); | ||
846 | } | ||
847 | |||
848 | #ifndef CONFIG_ALPHA_GENERIC | ||
849 | EXPORT_SYMBOL(marvel_ioremap); | ||
850 | EXPORT_SYMBOL(marvel_iounmap); | ||
851 | EXPORT_SYMBOL(marvel_is_mmio); | ||
852 | EXPORT_SYMBOL(marvel_ioportmap); | ||
853 | EXPORT_SYMBOL(marvel_ioread8); | ||
854 | EXPORT_SYMBOL(marvel_iowrite8); | ||
855 | #endif | ||
856 | |||
857 | /* | ||
858 | * NUMA Support | ||
859 | */ | ||
860 | /********** | ||
861 | * FIXME - for now each cpu is a node by itself | ||
862 | * -- no real support for striped mode | ||
863 | ********** | ||
864 | */ | ||
865 | int | ||
866 | marvel_pa_to_nid(unsigned long pa) | ||
867 | { | ||
868 | int cpuid; | ||
869 | |||
870 | if ((pa >> 43) & 1) /* I/O */ | ||
871 | cpuid = (~(pa >> 35) & 0xff); | ||
872 | else /* mem */ | ||
873 | cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2)); | ||
874 | |||
875 | return marvel_cpuid_to_nid(cpuid); | ||
876 | } | ||
877 | |||
878 | int | ||
879 | marvel_cpuid_to_nid(int cpuid) | ||
880 | { | ||
881 | return cpuid; | ||
882 | } | ||
883 | |||
884 | unsigned long | ||
885 | marvel_node_mem_start(int nid) | ||
886 | { | ||
887 | unsigned long pa; | ||
888 | |||
889 | pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1); | ||
890 | pa <<= 34; | ||
891 | |||
892 | return pa; | ||
893 | } | ||
894 | |||
895 | unsigned long | ||
896 | marvel_node_mem_size(int nid) | ||
897 | { | ||
898 | return 16UL * 1024 * 1024 * 1024; /* 16GB */ | ||
899 | } | ||
900 | |||
901 | |||
902 | /* | ||
903 | * AGP GART Support. | ||
904 | */ | ||
905 | #include <linux/agp_backend.h> | ||
906 | #include <asm/agp_backend.h> | ||
907 | #include <linux/slab.h> | ||
908 | #include <linux/delay.h> | ||
909 | |||
910 | struct marvel_agp_aperture { | ||
911 | struct pci_iommu_arena *arena; | ||
912 | long pg_start; | ||
913 | long pg_count; | ||
914 | }; | ||
915 | |||
916 | static int | ||
917 | marvel_agp_setup(alpha_agp_info *agp) | ||
918 | { | ||
919 | struct marvel_agp_aperture *aper; | ||
920 | |||
921 | if (!alpha_agpgart_size) | ||
922 | return -ENOMEM; | ||
923 | |||
924 | aper = kmalloc(sizeof(*aper), GFP_KERNEL); | ||
925 | if (aper == NULL) return -ENOMEM; | ||
926 | |||
927 | aper->arena = agp->hose->sg_pci; | ||
928 | aper->pg_count = alpha_agpgart_size / PAGE_SIZE; | ||
929 | aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, | ||
930 | aper->pg_count - 1); | ||
931 | |||
932 | if (aper->pg_start < 0) { | ||
933 | printk(KERN_ERR "Failed to reserve AGP memory\n"); | ||
934 | kfree(aper); | ||
935 | return -ENOMEM; | ||
936 | } | ||
937 | |||
938 | agp->aperture.bus_base = | ||
939 | aper->arena->dma_base + aper->pg_start * PAGE_SIZE; | ||
940 | agp->aperture.size = aper->pg_count * PAGE_SIZE; | ||
941 | agp->aperture.sysdata = aper; | ||
942 | |||
943 | return 0; | ||
944 | } | ||
945 | |||
946 | static void | ||
947 | marvel_agp_cleanup(alpha_agp_info *agp) | ||
948 | { | ||
949 | struct marvel_agp_aperture *aper = agp->aperture.sysdata; | ||
950 | int status; | ||
951 | |||
952 | status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); | ||
953 | if (status == -EBUSY) { | ||
954 | printk(KERN_WARNING | ||
955 | "Attempted to release bound AGP memory - unbinding\n"); | ||
956 | iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); | ||
957 | status = iommu_release(aper->arena, aper->pg_start, | ||
958 | aper->pg_count); | ||
959 | } | ||
960 | if (status < 0) | ||
961 | printk(KERN_ERR "Failed to release AGP memory\n"); | ||
962 | |||
963 | kfree(aper); | ||
964 | kfree(agp); | ||
965 | } | ||
966 | |||
967 | static int | ||
968 | marvel_agp_configure(alpha_agp_info *agp) | ||
969 | { | ||
970 | io7_ioport_csrs *csrs = ((struct io7_port *)agp->hose->sysdata)->csrs; | ||
971 | struct io7 *io7 = ((struct io7_port *)agp->hose->sysdata)->io7; | ||
972 | unsigned int new_rate = 0; | ||
973 | unsigned long agp_pll; | ||
974 | |||
975 | /* | ||
976 | * Check the requested mode against the PLL setting. | ||
977 | * The agpgart_be code has not programmed the card yet, | ||
978 | * so we can still tweak mode here. | ||
979 | */ | ||
980 | agp_pll = io7->csrs->POx_RST[IO7_AGP_PORT].csr; | ||
981 | switch(IO7_PLL_RNGB(agp_pll)) { | ||
982 | case 0x4: /* 2x only */ | ||
983 | /* | ||
984 | * The PLL is only programmed for 2x, so adjust the | ||
985 | * rate to 2x, if necessary. | ||
986 | */ | ||
987 | if (agp->mode.bits.rate != 2) | ||
988 | new_rate = 2; | ||
989 | break; | ||
990 | |||
991 | case 0x6: /* 1x / 4x */ | ||
992 | /* | ||
993 | * The PLL is programmed for 1x or 4x. Don't go faster | ||
994 | * than requested, so if the requested rate is 2x, use 1x. | ||
995 | */ | ||
996 | if (agp->mode.bits.rate == 2) | ||
997 | new_rate = 1; | ||
998 | break; | ||
999 | |||
1000 | default: /* ??????? */ | ||
1001 | /* | ||
1002 | * Don't know what this PLL setting is, take the requested | ||
1003 | * rate, but warn the user. | ||
1004 | */ | ||
1005 | printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n", | ||
1006 | __FUNCTION__, IO7_PLL_RNGB(agp_pll), agp_pll); | ||
1007 | break; | ||
1008 | } | ||
1009 | |||
1010 | /* | ||
1011 | * Set the new rate, if necessary. | ||
1012 | */ | ||
1013 | if (new_rate) { | ||
1014 | printk("Requested AGP Rate %dX not compatible " | ||
1015 | "with PLL setting - using %dX\n", | ||
1016 | agp->mode.bits.rate, | ||
1017 | new_rate); | ||
1018 | |||
1019 | agp->mode.bits.rate = new_rate; | ||
1020 | } | ||
1021 | |||
1022 | printk("Enabling AGP on hose %d: %dX%s RQ %d\n", | ||
1023 | agp->hose->index, agp->mode.bits.rate, | ||
1024 | agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq); | ||
1025 | |||
1026 | csrs->AGP_CMD.csr = agp->mode.lw; | ||
1027 | |||
1028 | return 0; | ||
1029 | } | ||
1030 | |||
1031 | static int | ||
1032 | marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) | ||
1033 | { | ||
1034 | struct marvel_agp_aperture *aper = agp->aperture.sysdata; | ||
1035 | return iommu_bind(aper->arena, aper->pg_start + pg_start, | ||
1036 | mem->page_count, mem->memory); | ||
1037 | } | ||
1038 | |||
1039 | static int | ||
1040 | marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) | ||
1041 | { | ||
1042 | struct marvel_agp_aperture *aper = agp->aperture.sysdata; | ||
1043 | return iommu_unbind(aper->arena, aper->pg_start + pg_start, | ||
1044 | mem->page_count); | ||
1045 | } | ||
1046 | |||
1047 | static unsigned long | ||
1048 | marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr) | ||
1049 | { | ||
1050 | struct marvel_agp_aperture *aper = agp->aperture.sysdata; | ||
1051 | unsigned long baddr = addr - aper->arena->dma_base; | ||
1052 | unsigned long pte; | ||
1053 | |||
1054 | if (addr < agp->aperture.bus_base || | ||
1055 | addr >= agp->aperture.bus_base + agp->aperture.size) { | ||
1056 | printk("%s: addr out of range\n", __FUNCTION__); | ||
1057 | return -EINVAL; | ||
1058 | } | ||
1059 | |||
1060 | pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; | ||
1061 | if (!(pte & 1)) { | ||
1062 | printk("%s: pte not valid\n", __FUNCTION__); | ||
1063 | return -EINVAL; | ||
1064 | } | ||
1065 | return (pte >> 1) << PAGE_SHIFT; | ||
1066 | } | ||
1067 | |||
1068 | struct alpha_agp_ops marvel_agp_ops = | ||
1069 | { | ||
1070 | .setup = marvel_agp_setup, | ||
1071 | .cleanup = marvel_agp_cleanup, | ||
1072 | .configure = marvel_agp_configure, | ||
1073 | .bind = marvel_agp_bind_memory, | ||
1074 | .unbind = marvel_agp_unbind_memory, | ||
1075 | .translate = marvel_agp_translate | ||
1076 | }; | ||
1077 | |||
1078 | alpha_agp_info * | ||
1079 | marvel_agp_info(void) | ||
1080 | { | ||
1081 | struct pci_controller *hose; | ||
1082 | io7_ioport_csrs *csrs; | ||
1083 | alpha_agp_info *agp; | ||
1084 | struct io7 *io7; | ||
1085 | |||
1086 | /* | ||
1087 | * Find the first IO7 with an AGP card. | ||
1088 | * | ||
1089 | * FIXME -- there should be a better way (we want to be able to | ||
1090 | * specify and what if the agp card is not video???) | ||
1091 | */ | ||
1092 | hose = NULL; | ||
1093 | for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) { | ||
1094 | struct pci_controller *h; | ||
1095 | vuip addr; | ||
1096 | |||
1097 | if (!io7->ports[IO7_AGP_PORT].enabled) | ||
1098 | continue; | ||
1099 | |||
1100 | h = io7->ports[IO7_AGP_PORT].hose; | ||
1101 | addr = (vuip)build_conf_addr(h, 0, PCI_DEVFN(5, 0), 0); | ||
1102 | |||
1103 | if (*addr != 0xffffffffu) { | ||
1104 | hose = h; | ||
1105 | break; | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | if (!hose || !hose->sg_pci) | ||
1110 | return NULL; | ||
1111 | |||
1112 | printk("MARVEL - using hose %d as AGP\n", hose->index); | ||
1113 | |||
1114 | /* | ||
1115 | * Get the csrs from the hose. | ||
1116 | */ | ||
1117 | csrs = ((struct io7_port *)hose->sysdata)->csrs; | ||
1118 | |||
1119 | /* | ||
1120 | * Allocate the info structure. | ||
1121 | */ | ||
1122 | agp = kmalloc(sizeof(*agp), GFP_KERNEL); | ||
1123 | |||
1124 | /* | ||
1125 | * Fill it in. | ||
1126 | */ | ||
1127 | agp->hose = hose; | ||
1128 | agp->private = NULL; | ||
1129 | agp->ops = &marvel_agp_ops; | ||
1130 | |||
1131 | /* | ||
1132 | * Aperture - not configured until ops.setup(). | ||
1133 | */ | ||
1134 | agp->aperture.bus_base = 0; | ||
1135 | agp->aperture.size = 0; | ||
1136 | agp->aperture.sysdata = NULL; | ||
1137 | |||
1138 | /* | ||
1139 | * Capabilities. | ||
1140 | * | ||
1141 | * NOTE: IO7 reports through AGP_STAT that it can support a read queue | ||
1142 | * depth of 17 (rq = 0x10). It actually only supports a depth of | ||
1143 | * 16 (rq = 0xf). | ||
1144 | */ | ||
1145 | agp->capability.lw = csrs->AGP_STAT.csr; | ||
1146 | agp->capability.bits.rq = 0xf; | ||
1147 | |||
1148 | /* | ||
1149 | * Mode. | ||
1150 | */ | ||
1151 | agp->mode.lw = csrs->AGP_CMD.csr; | ||
1152 | |||
1153 | return agp; | ||
1154 | } | ||
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c new file mode 100644 index 000000000000..28849c894153 --- /dev/null +++ b/arch/alpha/kernel/core_mcpcia.c | |||
@@ -0,0 +1,618 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_mcpcia.c | ||
3 | * | ||
4 | * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com). | ||
5 | * | ||
6 | * Code common to all MCbus-PCI Adaptor core logic chipsets | ||
7 | */ | ||
8 | |||
9 | #define __EXTERN_INLINE inline | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/core_mcpcia.h> | ||
12 | #undef __EXTERN_INLINE | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/delay.h> | ||
19 | |||
20 | #include <asm/ptrace.h> | ||
21 | |||
22 | #include "proto.h" | ||
23 | #include "pci_impl.h" | ||
24 | |||
25 | /* | ||
26 | * NOTE: Herein lie back-to-back mb instructions. They are magic. | ||
27 | * One plausible explanation is that the i/o controller does not properly | ||
28 | * handle the system transaction. Another involves timing. Ho hum. | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * BIOS32-style PCI interface: | ||
33 | */ | ||
34 | |||
35 | #define DEBUG_CFG 0 | ||
36 | |||
37 | #if DEBUG_CFG | ||
38 | # define DBG_CFG(args) printk args | ||
39 | #else | ||
40 | # define DBG_CFG(args) | ||
41 | #endif | ||
42 | |||
43 | #define MCPCIA_MAX_HOSES 4 | ||
44 | |||
45 | /* | ||
46 | * Given a bus, device, and function number, compute resulting | ||
47 | * configuration space address and setup the MCPCIA_HAXR2 register | ||
48 | * accordingly. It is therefore not safe to have concurrent | ||
49 | * invocations to configuration space access routines, but there | ||
50 | * really shouldn't be any need for this. | ||
51 | * | ||
52 | * Type 0: | ||
53 | * | ||
54 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
55 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
56 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
57 | * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| | ||
58 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
59 | * | ||
60 | * 31:11 Device select bit. | ||
61 | * 10:8 Function number | ||
62 | * 7:2 Register number | ||
63 | * | ||
64 | * Type 1: | ||
65 | * | ||
66 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
67 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
68 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
69 | * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| | ||
70 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
71 | * | ||
72 | * 31:24 reserved | ||
73 | * 23:16 bus number (8 bits = 128 possible buses) | ||
74 | * 15:11 Device number (5 bits) | ||
75 | * 10:8 function number | ||
76 | * 7:2 register number | ||
77 | * | ||
78 | * Notes: | ||
79 | * The function number selects which function of a multi-function device | ||
80 | * (e.g., SCSI and Ethernet). | ||
81 | * | ||
82 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
83 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
84 | * bits. | ||
85 | */ | ||
86 | |||
87 | static unsigned int | ||
88 | conf_read(unsigned long addr, unsigned char type1, | ||
89 | struct pci_controller *hose) | ||
90 | { | ||
91 | unsigned long flags; | ||
92 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); | ||
93 | unsigned int stat0, value, temp, cpu; | ||
94 | |||
95 | cpu = smp_processor_id(); | ||
96 | |||
97 | local_irq_save(flags); | ||
98 | |||
99 | DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n", | ||
100 | addr, type1, mid)); | ||
101 | |||
102 | /* Reset status register to avoid losing errors. */ | ||
103 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); | ||
104 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; | ||
105 | mb(); | ||
106 | temp = *(vuip)MCPCIA_CAP_ERR(mid); | ||
107 | DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); | ||
108 | |||
109 | mb(); | ||
110 | draina(); | ||
111 | mcheck_expected(cpu) = 1; | ||
112 | mcheck_taken(cpu) = 0; | ||
113 | mcheck_extra(cpu) = mid; | ||
114 | mb(); | ||
115 | |||
116 | /* Access configuration space. */ | ||
117 | value = *((vuip)addr); | ||
118 | mb(); | ||
119 | mb(); /* magic */ | ||
120 | |||
121 | if (mcheck_taken(cpu)) { | ||
122 | mcheck_taken(cpu) = 0; | ||
123 | value = 0xffffffffU; | ||
124 | mb(); | ||
125 | } | ||
126 | mcheck_expected(cpu) = 0; | ||
127 | mb(); | ||
128 | |||
129 | DBG_CFG(("conf_read(): finished\n")); | ||
130 | |||
131 | local_irq_restore(flags); | ||
132 | return value; | ||
133 | } | ||
134 | |||
135 | static void | ||
136 | conf_write(unsigned long addr, unsigned int value, unsigned char type1, | ||
137 | struct pci_controller *hose) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); | ||
141 | unsigned int stat0, temp, cpu; | ||
142 | |||
143 | cpu = smp_processor_id(); | ||
144 | |||
145 | local_irq_save(flags); /* avoid getting hit by machine check */ | ||
146 | |||
147 | /* Reset status register to avoid losing errors. */ | ||
148 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); | ||
149 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); | ||
150 | temp = *(vuip)MCPCIA_CAP_ERR(mid); | ||
151 | DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); | ||
152 | |||
153 | draina(); | ||
154 | mcheck_expected(cpu) = 1; | ||
155 | mcheck_extra(cpu) = mid; | ||
156 | mb(); | ||
157 | |||
158 | /* Access configuration space. */ | ||
159 | *((vuip)addr) = value; | ||
160 | mb(); | ||
161 | mb(); /* magic */ | ||
162 | temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ | ||
163 | mcheck_expected(cpu) = 0; | ||
164 | mb(); | ||
165 | |||
166 | DBG_CFG(("conf_write(): finished\n")); | ||
167 | local_irq_restore(flags); | ||
168 | } | ||
169 | |||
170 | static int | ||
171 | mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where, | ||
172 | struct pci_controller *hose, unsigned long *pci_addr, | ||
173 | unsigned char *type1) | ||
174 | { | ||
175 | u8 bus = pbus->number; | ||
176 | unsigned long addr; | ||
177 | |||
178 | DBG_CFG(("mk_conf_addr(bus=%d,devfn=0x%x,hose=%d,where=0x%x," | ||
179 | " pci_addr=0x%p, type1=0x%p)\n", | ||
180 | bus, devfn, hose->index, where, pci_addr, type1)); | ||
181 | |||
182 | /* Type 1 configuration cycle for *ALL* busses. */ | ||
183 | *type1 = 1; | ||
184 | |||
185 | if (!pbus->parent) /* No parent means peer PCI bus. */ | ||
186 | bus = 0; | ||
187 | addr = (bus << 16) | (devfn << 8) | (where); | ||
188 | addr <<= 5; /* swizzle for SPARSE */ | ||
189 | addr |= hose->config_space_base; | ||
190 | |||
191 | *pci_addr = addr; | ||
192 | DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static int | ||
197 | mcpcia_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
198 | int size, u32 *value) | ||
199 | { | ||
200 | struct pci_controller *hose = bus->sysdata; | ||
201 | unsigned long addr, w; | ||
202 | unsigned char type1; | ||
203 | |||
204 | if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) | ||
205 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
206 | |||
207 | addr |= (size - 1) * 8; | ||
208 | w = conf_read(addr, type1, hose); | ||
209 | switch (size) { | ||
210 | case 1: | ||
211 | *value = __kernel_extbl(w, where & 3); | ||
212 | break; | ||
213 | case 2: | ||
214 | *value = __kernel_extwl(w, where & 3); | ||
215 | break; | ||
216 | case 4: | ||
217 | *value = w; | ||
218 | break; | ||
219 | } | ||
220 | return PCIBIOS_SUCCESSFUL; | ||
221 | } | ||
222 | |||
223 | static int | ||
224 | mcpcia_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
225 | int size, u32 value) | ||
226 | { | ||
227 | struct pci_controller *hose = bus->sysdata; | ||
228 | unsigned long addr; | ||
229 | unsigned char type1; | ||
230 | |||
231 | if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) | ||
232 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
233 | |||
234 | addr |= (size - 1) * 8; | ||
235 | value = __kernel_insql(value, where & 3); | ||
236 | conf_write(addr, value, type1, hose); | ||
237 | return PCIBIOS_SUCCESSFUL; | ||
238 | } | ||
239 | |||
240 | struct pci_ops mcpcia_pci_ops = | ||
241 | { | ||
242 | .read = mcpcia_read_config, | ||
243 | .write = mcpcia_write_config, | ||
244 | }; | ||
245 | |||
246 | void | ||
247 | mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
248 | { | ||
249 | wmb(); | ||
250 | *(vuip)MCPCIA_SG_TBIA(MCPCIA_HOSE2MID(hose->index)) = 0; | ||
251 | mb(); | ||
252 | } | ||
253 | |||
254 | static int __init | ||
255 | mcpcia_probe_hose(int h) | ||
256 | { | ||
257 | int cpu = smp_processor_id(); | ||
258 | int mid = MCPCIA_HOSE2MID(h); | ||
259 | unsigned int pci_rev; | ||
260 | |||
261 | /* Gotta be REAL careful. If hose is absent, we get an mcheck. */ | ||
262 | |||
263 | mb(); | ||
264 | mb(); | ||
265 | draina(); | ||
266 | wrmces(7); | ||
267 | |||
268 | mcheck_expected(cpu) = 2; /* indicates probing */ | ||
269 | mcheck_taken(cpu) = 0; | ||
270 | mcheck_extra(cpu) = mid; | ||
271 | mb(); | ||
272 | |||
273 | /* Access the bus revision word. */ | ||
274 | pci_rev = *(vuip)MCPCIA_REV(mid); | ||
275 | |||
276 | mb(); | ||
277 | mb(); /* magic */ | ||
278 | if (mcheck_taken(cpu)) { | ||
279 | mcheck_taken(cpu) = 0; | ||
280 | pci_rev = 0xffffffff; | ||
281 | mb(); | ||
282 | } | ||
283 | mcheck_expected(cpu) = 0; | ||
284 | mb(); | ||
285 | |||
286 | return (pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST; | ||
287 | } | ||
288 | |||
289 | static void __init | ||
290 | mcpcia_new_hose(int h) | ||
291 | { | ||
292 | struct pci_controller *hose; | ||
293 | struct resource *io, *mem, *hae_mem; | ||
294 | int mid = MCPCIA_HOSE2MID(h); | ||
295 | |||
296 | hose = alloc_pci_controller(); | ||
297 | if (h == 0) | ||
298 | pci_isa_hose = hose; | ||
299 | io = alloc_resource(); | ||
300 | mem = alloc_resource(); | ||
301 | hae_mem = alloc_resource(); | ||
302 | |||
303 | hose->io_space = io; | ||
304 | hose->mem_space = hae_mem; | ||
305 | hose->sparse_mem_base = MCPCIA_SPARSE(mid) - IDENT_ADDR; | ||
306 | hose->dense_mem_base = MCPCIA_DENSE(mid) - IDENT_ADDR; | ||
307 | hose->sparse_io_base = MCPCIA_IO(mid) - IDENT_ADDR; | ||
308 | hose->dense_io_base = 0; | ||
309 | hose->config_space_base = MCPCIA_CONF(mid); | ||
310 | hose->index = h; | ||
311 | |||
312 | io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS; | ||
313 | io->end = io->start + 0xffff; | ||
314 | io->name = pci_io_names[h]; | ||
315 | io->flags = IORESOURCE_IO; | ||
316 | |||
317 | mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS; | ||
318 | mem->end = mem->start + 0xffffffff; | ||
319 | mem->name = pci_mem_names[h]; | ||
320 | mem->flags = IORESOURCE_MEM; | ||
321 | |||
322 | hae_mem->start = mem->start; | ||
323 | hae_mem->end = mem->start + MCPCIA_MEM_MASK; | ||
324 | hae_mem->name = pci_hae0_name; | ||
325 | hae_mem->flags = IORESOURCE_MEM; | ||
326 | |||
327 | if (request_resource(&ioport_resource, io) < 0) | ||
328 | printk(KERN_ERR "Failed to request IO on hose %d\n", h); | ||
329 | if (request_resource(&iomem_resource, mem) < 0) | ||
330 | printk(KERN_ERR "Failed to request MEM on hose %d\n", h); | ||
331 | if (request_resource(mem, hae_mem) < 0) | ||
332 | printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h); | ||
333 | } | ||
334 | |||
335 | static void | ||
336 | mcpcia_pci_clr_err(int mid) | ||
337 | { | ||
338 | *(vuip)MCPCIA_CAP_ERR(mid); | ||
339 | *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */ | ||
340 | mb(); | ||
341 | *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */ | ||
342 | } | ||
343 | |||
344 | static void __init | ||
345 | mcpcia_startup_hose(struct pci_controller *hose) | ||
346 | { | ||
347 | int mid = MCPCIA_HOSE2MID(hose->index); | ||
348 | unsigned int tmp; | ||
349 | |||
350 | mcpcia_pci_clr_err(mid); | ||
351 | |||
352 | /* | ||
353 | * Set up error reporting. | ||
354 | */ | ||
355 | tmp = *(vuip)MCPCIA_CAP_ERR(mid); | ||
356 | tmp |= 0x0006; /* master/target abort */ | ||
357 | *(vuip)MCPCIA_CAP_ERR(mid) = tmp; | ||
358 | mb(); | ||
359 | tmp = *(vuip)MCPCIA_CAP_ERR(mid); | ||
360 | |||
361 | /* | ||
362 | * Set up the PCI->physical memory translation windows. | ||
363 | * | ||
364 | * Window 0 is scatter-gather 8MB at 8MB (for isa) | ||
365 | * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci) | ||
366 | * Window 2 is direct access 2GB at 2GB | ||
367 | */ | ||
368 | hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); | ||
369 | hose->sg_pci = iommu_arena_new(hose, 0x40000000, | ||
370 | size_for_memory(0x40000000), 0); | ||
371 | |||
372 | __direct_map_base = 0x80000000; | ||
373 | __direct_map_size = 0x80000000; | ||
374 | |||
375 | *(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3; | ||
376 | *(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000; | ||
377 | *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8; | ||
378 | |||
379 | *(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3; | ||
380 | *(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000; | ||
381 | *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8; | ||
382 | |||
383 | *(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1; | ||
384 | *(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000; | ||
385 | *(vuip)MCPCIA_T2_BASE(mid) = 0; | ||
386 | |||
387 | *(vuip)MCPCIA_W3_BASE(mid) = 0x0; | ||
388 | |||
389 | mcpcia_pci_tbi(hose, 0, -1); | ||
390 | |||
391 | *(vuip)MCPCIA_HBASE(mid) = 0x0; | ||
392 | mb(); | ||
393 | |||
394 | *(vuip)MCPCIA_HAE_MEM(mid) = 0U; | ||
395 | mb(); | ||
396 | *(vuip)MCPCIA_HAE_MEM(mid); /* read it back. */ | ||
397 | *(vuip)MCPCIA_HAE_IO(mid) = 0; | ||
398 | mb(); | ||
399 | *(vuip)MCPCIA_HAE_IO(mid); /* read it back. */ | ||
400 | } | ||
401 | |||
402 | void __init | ||
403 | mcpcia_init_arch(void) | ||
404 | { | ||
405 | /* With multiple PCI busses, we play with I/O as physical addrs. */ | ||
406 | ioport_resource.end = ~0UL; | ||
407 | |||
408 | /* Allocate hose 0. That's the one that all the ISA junk hangs | ||
409 | off of, from which we'll be registering stuff here in a bit. | ||
410 | Other hose detection is done in mcpcia_init_hoses, which is | ||
411 | called from init_IRQ. */ | ||
412 | |||
413 | mcpcia_new_hose(0); | ||
414 | } | ||
415 | |||
416 | /* This is called from init_IRQ, since we cannot take interrupts | ||
417 | before then. Which means we cannot do this in init_arch. */ | ||
418 | |||
419 | void __init | ||
420 | mcpcia_init_hoses(void) | ||
421 | { | ||
422 | struct pci_controller *hose; | ||
423 | int hose_count; | ||
424 | int h; | ||
425 | |||
426 | /* First, find how many hoses we have. */ | ||
427 | hose_count = 0; | ||
428 | for (h = 0; h < MCPCIA_MAX_HOSES; ++h) { | ||
429 | if (mcpcia_probe_hose(h)) { | ||
430 | if (h != 0) | ||
431 | mcpcia_new_hose(h); | ||
432 | hose_count++; | ||
433 | } | ||
434 | } | ||
435 | |||
436 | printk("mcpcia_init_hoses: found %d hoses\n", hose_count); | ||
437 | |||
438 | /* Now do init for each hose. */ | ||
439 | for (hose = hose_head; hose; hose = hose->next) | ||
440 | mcpcia_startup_hose(hose); | ||
441 | } | ||
442 | |||
443 | static void | ||
444 | mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout) | ||
445 | { | ||
446 | struct el_common_EV5_uncorrectable_mcheck *frame; | ||
447 | int i; | ||
448 | |||
449 | frame = &logout->procdata; | ||
450 | |||
451 | /* Print PAL fields */ | ||
452 | for (i = 0; i < 24; i += 2) { | ||
453 | printk(" paltmp[%d-%d] = %16lx %16lx\n", | ||
454 | i, i+1, frame->paltemp[i], frame->paltemp[i+1]); | ||
455 | } | ||
456 | for (i = 0; i < 8; i += 2) { | ||
457 | printk(" shadow[%d-%d] = %16lx %16lx\n", | ||
458 | i, i+1, frame->shadow[i], | ||
459 | frame->shadow[i+1]); | ||
460 | } | ||
461 | printk(" Addr of excepting instruction = %16lx\n", | ||
462 | frame->exc_addr); | ||
463 | printk(" Summary of arithmetic traps = %16lx\n", | ||
464 | frame->exc_sum); | ||
465 | printk(" Exception mask = %16lx\n", | ||
466 | frame->exc_mask); | ||
467 | printk(" Base address for PALcode = %16lx\n", | ||
468 | frame->pal_base); | ||
469 | printk(" Interrupt Status Reg = %16lx\n", | ||
470 | frame->isr); | ||
471 | printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n", | ||
472 | frame->icsr); | ||
473 | printk(" I-CACHE Reg %s parity error = %16lx\n", | ||
474 | (frame->ic_perr_stat & 0x800L) ? | ||
475 | "Data" : "Tag", | ||
476 | frame->ic_perr_stat); | ||
477 | printk(" D-CACHE error Reg = %16lx\n", | ||
478 | frame->dc_perr_stat); | ||
479 | if (frame->dc_perr_stat & 0x2) { | ||
480 | switch (frame->dc_perr_stat & 0x03c) { | ||
481 | case 8: | ||
482 | printk(" Data error in bank 1\n"); | ||
483 | break; | ||
484 | case 4: | ||
485 | printk(" Data error in bank 0\n"); | ||
486 | break; | ||
487 | case 20: | ||
488 | printk(" Tag error in bank 1\n"); | ||
489 | break; | ||
490 | case 10: | ||
491 | printk(" Tag error in bank 0\n"); | ||
492 | break; | ||
493 | } | ||
494 | } | ||
495 | printk(" Effective VA = %16lx\n", | ||
496 | frame->va); | ||
497 | printk(" Reason for D-stream = %16lx\n", | ||
498 | frame->mm_stat); | ||
499 | printk(" EV5 SCache address = %16lx\n", | ||
500 | frame->sc_addr); | ||
501 | printk(" EV5 SCache TAG/Data parity = %16lx\n", | ||
502 | frame->sc_stat); | ||
503 | printk(" EV5 BC_TAG_ADDR = %16lx\n", | ||
504 | frame->bc_tag_addr); | ||
505 | printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n", | ||
506 | frame->ei_addr); | ||
507 | printk(" Fill Syndrome = %16lx\n", | ||
508 | frame->fill_syndrome); | ||
509 | printk(" EI_STAT reg = %16lx\n", | ||
510 | frame->ei_stat); | ||
511 | printk(" LD_LOCK = %16lx\n", | ||
512 | frame->ld_lock); | ||
513 | } | ||
514 | |||
515 | static void | ||
516 | mcpcia_print_system_area(unsigned long la_ptr) | ||
517 | { | ||
518 | struct el_common *frame; | ||
519 | struct pci_controller *hose; | ||
520 | |||
521 | struct IOD_subpacket { | ||
522 | unsigned long base; | ||
523 | unsigned int whoami; | ||
524 | unsigned int rsvd1; | ||
525 | unsigned int pci_rev; | ||
526 | unsigned int cap_ctrl; | ||
527 | unsigned int hae_mem; | ||
528 | unsigned int hae_io; | ||
529 | unsigned int int_ctl; | ||
530 | unsigned int int_reg; | ||
531 | unsigned int int_mask0; | ||
532 | unsigned int int_mask1; | ||
533 | unsigned int mc_err0; | ||
534 | unsigned int mc_err1; | ||
535 | unsigned int cap_err; | ||
536 | unsigned int rsvd2; | ||
537 | unsigned int pci_err1; | ||
538 | unsigned int mdpa_stat; | ||
539 | unsigned int mdpa_syn; | ||
540 | unsigned int mdpb_stat; | ||
541 | unsigned int mdpb_syn; | ||
542 | unsigned int rsvd3; | ||
543 | unsigned int rsvd4; | ||
544 | unsigned int rsvd5; | ||
545 | } *iodpp; | ||
546 | |||
547 | frame = (struct el_common *)la_ptr; | ||
548 | iodpp = (struct IOD_subpacket *) (la_ptr + frame->sys_offset); | ||
549 | |||
550 | for (hose = hose_head; hose; hose = hose->next, iodpp++) { | ||
551 | |||
552 | printk("IOD %d Register Subpacket - Bridge Base Address %16lx\n", | ||
553 | hose->index, iodpp->base); | ||
554 | printk(" WHOAMI = %8x\n", iodpp->whoami); | ||
555 | printk(" PCI_REV = %8x\n", iodpp->pci_rev); | ||
556 | printk(" CAP_CTRL = %8x\n", iodpp->cap_ctrl); | ||
557 | printk(" HAE_MEM = %8x\n", iodpp->hae_mem); | ||
558 | printk(" HAE_IO = %8x\n", iodpp->hae_io); | ||
559 | printk(" INT_CTL = %8x\n", iodpp->int_ctl); | ||
560 | printk(" INT_REG = %8x\n", iodpp->int_reg); | ||
561 | printk(" INT_MASK0 = %8x\n", iodpp->int_mask0); | ||
562 | printk(" INT_MASK1 = %8x\n", iodpp->int_mask1); | ||
563 | printk(" MC_ERR0 = %8x\n", iodpp->mc_err0); | ||
564 | printk(" MC_ERR1 = %8x\n", iodpp->mc_err1); | ||
565 | printk(" CAP_ERR = %8x\n", iodpp->cap_err); | ||
566 | printk(" PCI_ERR1 = %8x\n", iodpp->pci_err1); | ||
567 | printk(" MDPA_STAT = %8x\n", iodpp->mdpa_stat); | ||
568 | printk(" MDPA_SYN = %8x\n", iodpp->mdpa_syn); | ||
569 | printk(" MDPB_STAT = %8x\n", iodpp->mdpb_stat); | ||
570 | printk(" MDPB_SYN = %8x\n", iodpp->mdpb_syn); | ||
571 | } | ||
572 | } | ||
573 | |||
574 | void | ||
575 | mcpcia_machine_check(unsigned long vector, unsigned long la_ptr, | ||
576 | struct pt_regs * regs) | ||
577 | { | ||
578 | struct el_common *mchk_header; | ||
579 | struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; | ||
580 | unsigned int cpu = smp_processor_id(); | ||
581 | int expected; | ||
582 | |||
583 | mchk_header = (struct el_common *)la_ptr; | ||
584 | mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; | ||
585 | expected = mcheck_expected(cpu); | ||
586 | |||
587 | mb(); | ||
588 | mb(); /* magic */ | ||
589 | draina(); | ||
590 | |||
591 | switch (expected) { | ||
592 | case 0: | ||
593 | { | ||
594 | /* FIXME: how do we figure out which hose the | ||
595 | error was on? */ | ||
596 | struct pci_controller *hose; | ||
597 | for (hose = hose_head; hose; hose = hose->next) | ||
598 | mcpcia_pci_clr_err(MCPCIA_HOSE2MID(hose->index)); | ||
599 | break; | ||
600 | } | ||
601 | case 1: | ||
602 | mcpcia_pci_clr_err(mcheck_extra(cpu)); | ||
603 | break; | ||
604 | default: | ||
605 | /* Otherwise, we're being called from mcpcia_probe_hose | ||
606 | and there's no hose clear an error from. */ | ||
607 | break; | ||
608 | } | ||
609 | |||
610 | wrmces(0x7); | ||
611 | mb(); | ||
612 | |||
613 | process_mcheck_info(vector, la_ptr, regs, "MCPCIA", expected != 0); | ||
614 | if (!expected && vector != 0x620 && vector != 0x630) { | ||
615 | mcpcia_print_uncorrectable(mchk_logout); | ||
616 | mcpcia_print_system_area(la_ptr); | ||
617 | } | ||
618 | } | ||
diff --git a/arch/alpha/kernel/core_polaris.c b/arch/alpha/kernel/core_polaris.c new file mode 100644 index 000000000000..277674a500ff --- /dev/null +++ b/arch/alpha/kernel/core_polaris.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_polaris.c | ||
3 | * | ||
4 | * POLARIS chip-specific code | ||
5 | */ | ||
6 | |||
7 | #define __EXTERN_INLINE inline | ||
8 | #include <asm/io.h> | ||
9 | #include <asm/core_polaris.h> | ||
10 | #undef __EXTERN_INLINE | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/init.h> | ||
16 | |||
17 | #include <asm/ptrace.h> | ||
18 | |||
19 | #include "proto.h" | ||
20 | #include "pci_impl.h" | ||
21 | |||
22 | /* | ||
23 | * BIOS32-style PCI interface: | ||
24 | */ | ||
25 | |||
26 | #define DEBUG_CONFIG 0 | ||
27 | |||
28 | #if DEBUG_CONFIG | ||
29 | # define DBG_CFG(args) printk args | ||
30 | #else | ||
31 | # define DBG_CFG(args) | ||
32 | #endif | ||
33 | |||
34 | |||
35 | /* | ||
36 | * Given a bus, device, and function number, compute resulting | ||
37 | * configuration space address. This is fairly straightforward | ||
38 | * on POLARIS, since the chip itself generates Type 0 or Type 1 | ||
39 | * cycles automatically depending on the bus number (Bus 0 is | ||
40 | * hardwired to Type 0, all others are Type 1. Peer bridges | ||
41 | * are not supported). | ||
42 | * | ||
43 | * All types: | ||
44 | * | ||
45 | * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
46 | * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
47 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
48 | * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x| | ||
49 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
50 | * | ||
51 | * 23:16 bus number (8 bits = 128 possible buses) | ||
52 | * 15:11 Device number (5 bits) | ||
53 | * 10:8 function number | ||
54 | * 7:2 register number | ||
55 | * | ||
56 | * Notes: | ||
57 | * The function number selects which function of a multi-function device | ||
58 | * (e.g., scsi and ethernet). | ||
59 | * | ||
60 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
61 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
62 | * bits. | ||
63 | */ | ||
64 | |||
65 | static int | ||
66 | mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, | ||
67 | unsigned long *pci_addr, u8 *type1) | ||
68 | { | ||
69 | u8 bus = pbus->number; | ||
70 | |||
71 | *type1 = (bus == 0) ? 0 : 1; | ||
72 | *pci_addr = (bus << 16) | (device_fn << 8) | (where) | | ||
73 | POLARIS_DENSE_CONFIG_BASE; | ||
74 | |||
75 | DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," | ||
76 | " returning address 0x%p\n" | ||
77 | bus, device_fn, where, *pci_addr)); | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static int | ||
83 | polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
84 | int size, u32 *value) | ||
85 | { | ||
86 | unsigned long addr; | ||
87 | unsigned char type1; | ||
88 | |||
89 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
90 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
91 | |||
92 | switch (size) { | ||
93 | case 1: | ||
94 | *value = __kernel_ldbu(*(vucp)addr); | ||
95 | break; | ||
96 | case 2: | ||
97 | *value = __kernel_ldwu(*(vusp)addr); | ||
98 | break; | ||
99 | case 4: | ||
100 | *value = *(vuip)addr; | ||
101 | break; | ||
102 | } | ||
103 | |||
104 | return PCIBIOS_SUCCESSFUL; | ||
105 | } | ||
106 | |||
107 | |||
108 | static int | ||
109 | polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
110 | int size, u32 value) | ||
111 | { | ||
112 | unsigned long addr; | ||
113 | unsigned char type1; | ||
114 | |||
115 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
116 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
117 | |||
118 | switch (size) { | ||
119 | case 1: | ||
120 | __kernel_stb(value, *(vucp)addr); | ||
121 | mb(); | ||
122 | __kernel_ldbu(*(vucp)addr); | ||
123 | break; | ||
124 | case 2: | ||
125 | __kernel_stw(value, *(vusp)addr); | ||
126 | mb(); | ||
127 | __kernel_ldwu(*(vusp)addr); | ||
128 | break; | ||
129 | case 4: | ||
130 | *(vuip)addr = value; | ||
131 | mb(); | ||
132 | *(vuip)addr; | ||
133 | break; | ||
134 | } | ||
135 | |||
136 | return PCIBIOS_SUCCESSFUL; | ||
137 | } | ||
138 | |||
139 | struct pci_ops polaris_pci_ops = | ||
140 | { | ||
141 | .read = polaris_read_config, | ||
142 | .write = polaris_write_config, | ||
143 | }; | ||
144 | |||
145 | void __init | ||
146 | polaris_init_arch(void) | ||
147 | { | ||
148 | struct pci_controller *hose; | ||
149 | |||
150 | /* May need to initialize error reporting (see PCICTL0/1), but | ||
151 | * for now assume that the firmware has done the right thing | ||
152 | * already. | ||
153 | */ | ||
154 | #if 0 | ||
155 | printk("polaris_init_arch(): trusting firmware for setup\n"); | ||
156 | #endif | ||
157 | |||
158 | /* | ||
159 | * Create our single hose. | ||
160 | */ | ||
161 | |||
162 | pci_isa_hose = hose = alloc_pci_controller(); | ||
163 | hose->io_space = &ioport_resource; | ||
164 | hose->mem_space = &iomem_resource; | ||
165 | hose->index = 0; | ||
166 | |||
167 | hose->sparse_mem_base = 0; | ||
168 | hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR; | ||
169 | hose->sparse_io_base = 0; | ||
170 | hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR; | ||
171 | |||
172 | hose->sg_isa = hose->sg_pci = NULL; | ||
173 | |||
174 | /* The I/O window is fixed at 2G @ 2G. */ | ||
175 | __direct_map_base = 0x80000000; | ||
176 | __direct_map_size = 0x80000000; | ||
177 | } | ||
178 | |||
179 | static inline void | ||
180 | polaris_pci_clr_err(void) | ||
181 | { | ||
182 | *(vusp)POLARIS_W_STATUS; | ||
183 | /* Write 1's to settable bits to clear errors */ | ||
184 | *(vusp)POLARIS_W_STATUS = 0x7800; | ||
185 | mb(); | ||
186 | *(vusp)POLARIS_W_STATUS; | ||
187 | } | ||
188 | |||
189 | void | ||
190 | polaris_machine_check(unsigned long vector, unsigned long la_ptr, | ||
191 | struct pt_regs * regs) | ||
192 | { | ||
193 | /* Clear the error before any reporting. */ | ||
194 | mb(); | ||
195 | mb(); | ||
196 | draina(); | ||
197 | polaris_pci_clr_err(); | ||
198 | wrmces(0x7); | ||
199 | mb(); | ||
200 | |||
201 | process_mcheck_info(vector, la_ptr, regs, "POLARIS", | ||
202 | mcheck_expected(0)); | ||
203 | } | ||
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c new file mode 100644 index 000000000000..ecce09e3626a --- /dev/null +++ b/arch/alpha/kernel/core_t2.c | |||
@@ -0,0 +1,622 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_t2.c | ||
3 | * | ||
4 | * Written by Jay A Estabrook (jestabro@amt.tay1.dec.com). | ||
5 | * December 1996. | ||
6 | * | ||
7 | * based on CIA code by David A Rusling (david.rusling@reo.mts.dec.com) | ||
8 | * | ||
9 | * Code common to all T2 core logic chips. | ||
10 | */ | ||
11 | |||
12 | #define __EXTERN_INLINE | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/core_t2.h> | ||
15 | #undef __EXTERN_INLINE | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/init.h> | ||
21 | |||
22 | #include <asm/ptrace.h> | ||
23 | #include <asm/delay.h> | ||
24 | |||
25 | #include "proto.h" | ||
26 | #include "pci_impl.h" | ||
27 | |||
28 | /* For dumping initial DMA window settings. */ | ||
29 | #define DEBUG_PRINT_INITIAL_SETTINGS 0 | ||
30 | |||
31 | /* For dumping final DMA window settings. */ | ||
32 | #define DEBUG_PRINT_FINAL_SETTINGS 0 | ||
33 | |||
34 | /* | ||
35 | * By default, we direct-map starting at 2GB, in order to allow the | ||
36 | * maximum size direct-map window (2GB) to match the maximum amount of | ||
37 | * memory (2GB) that can be present on SABLEs. But that limits the | ||
38 | * floppy to DMA only via the scatter/gather window set up for 8MB | ||
39 | * ISA DMA, since the maximum ISA DMA address is 2GB-1. | ||
40 | * | ||
41 | * For now, this seems a reasonable trade-off: even though most SABLEs | ||
42 | * have less than 1GB of memory, floppy usage/performance will not | ||
43 | * really be affected by forcing it to go via scatter/gather... | ||
44 | */ | ||
45 | #define T2_DIRECTMAP_2G 1 | ||
46 | |||
47 | #if T2_DIRECTMAP_2G | ||
48 | # define T2_DIRECTMAP_START 0x80000000UL | ||
49 | # define T2_DIRECTMAP_LENGTH 0x80000000UL | ||
50 | #else | ||
51 | # define T2_DIRECTMAP_START 0x40000000UL | ||
52 | # define T2_DIRECTMAP_LENGTH 0x40000000UL | ||
53 | #endif | ||
54 | |||
55 | /* The ISA scatter/gather window settings. */ | ||
56 | #define T2_ISA_SG_START 0x00800000UL | ||
57 | #define T2_ISA_SG_LENGTH 0x00800000UL | ||
58 | |||
59 | /* | ||
60 | * NOTE: Herein lie back-to-back mb instructions. They are magic. | ||
61 | * One plausible explanation is that the i/o controller does not properly | ||
62 | * handle the system transaction. Another involves timing. Ho hum. | ||
63 | */ | ||
64 | |||
65 | /* | ||
66 | * BIOS32-style PCI interface: | ||
67 | */ | ||
68 | |||
69 | #define DEBUG_CONFIG 0 | ||
70 | |||
71 | #if DEBUG_CONFIG | ||
72 | # define DBG(args) printk args | ||
73 | #else | ||
74 | # define DBG(args) | ||
75 | #endif | ||
76 | |||
77 | static volatile unsigned int t2_mcheck_any_expected; | ||
78 | static volatile unsigned int t2_mcheck_last_taken; | ||
79 | |||
80 | /* Place to save the DMA Window registers as set up by SRM | ||
81 | for restoration during shutdown. */ | ||
82 | static struct | ||
83 | { | ||
84 | struct { | ||
85 | unsigned long wbase; | ||
86 | unsigned long wmask; | ||
87 | unsigned long tbase; | ||
88 | } window[2]; | ||
89 | unsigned long hae_1; | ||
90 | unsigned long hae_2; | ||
91 | unsigned long hae_3; | ||
92 | unsigned long hae_4; | ||
93 | unsigned long hbase; | ||
94 | } t2_saved_config __attribute((common)); | ||
95 | |||
96 | /* | ||
97 | * Given a bus, device, and function number, compute resulting | ||
98 | * configuration space address and setup the T2_HAXR2 register | ||
99 | * accordingly. It is therefore not safe to have concurrent | ||
100 | * invocations to configuration space access routines, but there | ||
101 | * really shouldn't be any need for this. | ||
102 | * | ||
103 | * Type 0: | ||
104 | * | ||
105 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
106 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
107 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
108 | * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| | ||
109 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
110 | * | ||
111 | * 31:11 Device select bit. | ||
112 | * 10:8 Function number | ||
113 | * 7:2 Register number | ||
114 | * | ||
115 | * Type 1: | ||
116 | * | ||
117 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
118 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
119 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
120 | * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| | ||
121 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
122 | * | ||
123 | * 31:24 reserved | ||
124 | * 23:16 bus number (8 bits = 128 possible buses) | ||
125 | * 15:11 Device number (5 bits) | ||
126 | * 10:8 function number | ||
127 | * 7:2 register number | ||
128 | * | ||
129 | * Notes: | ||
130 | * The function number selects which function of a multi-function device | ||
131 | * (e.g., SCSI and Ethernet). | ||
132 | * | ||
133 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
134 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
135 | * bits. | ||
136 | */ | ||
137 | |||
138 | static int | ||
139 | mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, | ||
140 | unsigned long *pci_addr, unsigned char *type1) | ||
141 | { | ||
142 | unsigned long addr; | ||
143 | u8 bus = pbus->number; | ||
144 | |||
145 | DBG(("mk_conf_addr(bus=%d, dfn=0x%x, where=0x%x," | ||
146 | " addr=0x%lx, type1=0x%x)\n", | ||
147 | bus, device_fn, where, pci_addr, type1)); | ||
148 | |||
149 | if (bus == 0) { | ||
150 | int device = device_fn >> 3; | ||
151 | |||
152 | /* Type 0 configuration cycle. */ | ||
153 | |||
154 | if (device > 8) { | ||
155 | DBG(("mk_conf_addr: device (%d)>20, returning -1\n", | ||
156 | device)); | ||
157 | return -1; | ||
158 | } | ||
159 | |||
160 | *type1 = 0; | ||
161 | addr = (0x0800L << device) | ((device_fn & 7) << 8) | (where); | ||
162 | } else { | ||
163 | /* Type 1 configuration cycle. */ | ||
164 | *type1 = 1; | ||
165 | addr = (bus << 16) | (device_fn << 8) | (where); | ||
166 | } | ||
167 | *pci_addr = addr; | ||
168 | DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); | ||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * NOTE: both conf_read() and conf_write() may set HAE_3 when needing | ||
174 | * to do type1 access. This is protected by the use of spinlock IRQ | ||
175 | * primitives in the wrapper functions pci_{read,write}_config_*() | ||
176 | * defined in drivers/pci/pci.c. | ||
177 | */ | ||
178 | static unsigned int | ||
179 | conf_read(unsigned long addr, unsigned char type1) | ||
180 | { | ||
181 | unsigned int value, cpu, taken; | ||
182 | unsigned long t2_cfg = 0; | ||
183 | |||
184 | cpu = smp_processor_id(); | ||
185 | |||
186 | DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1)); | ||
187 | |||
188 | /* If Type1 access, must set T2 CFG. */ | ||
189 | if (type1) { | ||
190 | t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL; | ||
191 | *(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg; | ||
192 | mb(); | ||
193 | } | ||
194 | mb(); | ||
195 | draina(); | ||
196 | |||
197 | mcheck_expected(cpu) = 1; | ||
198 | mcheck_taken(cpu) = 0; | ||
199 | t2_mcheck_any_expected |= (1 << cpu); | ||
200 | mb(); | ||
201 | |||
202 | /* Access configuration space. */ | ||
203 | value = *(vuip)addr; | ||
204 | mb(); | ||
205 | mb(); /* magic */ | ||
206 | |||
207 | /* Wait for possible mcheck. Also, this lets other CPUs clear | ||
208 | their mchecks as well, as they can reliably tell when | ||
209 | another CPU is in the midst of handling a real mcheck via | ||
210 | the "taken" function. */ | ||
211 | udelay(100); | ||
212 | |||
213 | if ((taken = mcheck_taken(cpu))) { | ||
214 | mcheck_taken(cpu) = 0; | ||
215 | t2_mcheck_last_taken |= (1 << cpu); | ||
216 | value = 0xffffffffU; | ||
217 | mb(); | ||
218 | } | ||
219 | mcheck_expected(cpu) = 0; | ||
220 | t2_mcheck_any_expected = 0; | ||
221 | mb(); | ||
222 | |||
223 | /* If Type1 access, must reset T2 CFG so normal IO space ops work. */ | ||
224 | if (type1) { | ||
225 | *(vulp)T2_HAE_3 = t2_cfg; | ||
226 | mb(); | ||
227 | } | ||
228 | |||
229 | return value; | ||
230 | } | ||
231 | |||
232 | static void | ||
233 | conf_write(unsigned long addr, unsigned int value, unsigned char type1) | ||
234 | { | ||
235 | unsigned int cpu, taken; | ||
236 | unsigned long t2_cfg = 0; | ||
237 | |||
238 | cpu = smp_processor_id(); | ||
239 | |||
240 | /* If Type1 access, must set T2 CFG. */ | ||
241 | if (type1) { | ||
242 | t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL; | ||
243 | *(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL; | ||
244 | mb(); | ||
245 | } | ||
246 | mb(); | ||
247 | draina(); | ||
248 | |||
249 | mcheck_expected(cpu) = 1; | ||
250 | mcheck_taken(cpu) = 0; | ||
251 | t2_mcheck_any_expected |= (1 << cpu); | ||
252 | mb(); | ||
253 | |||
254 | /* Access configuration space. */ | ||
255 | *(vuip)addr = value; | ||
256 | mb(); | ||
257 | mb(); /* magic */ | ||
258 | |||
259 | /* Wait for possible mcheck. Also, this lets other CPUs clear | ||
260 | their mchecks as well, as they can reliably tell when | ||
261 | this CPU is in the midst of handling a real mcheck via | ||
262 | the "taken" function. */ | ||
263 | udelay(100); | ||
264 | |||
265 | if ((taken = mcheck_taken(cpu))) { | ||
266 | mcheck_taken(cpu) = 0; | ||
267 | t2_mcheck_last_taken |= (1 << cpu); | ||
268 | mb(); | ||
269 | } | ||
270 | mcheck_expected(cpu) = 0; | ||
271 | t2_mcheck_any_expected = 0; | ||
272 | mb(); | ||
273 | |||
274 | /* If Type1 access, must reset T2 CFG so normal IO space ops work. */ | ||
275 | if (type1) { | ||
276 | *(vulp)T2_HAE_3 = t2_cfg; | ||
277 | mb(); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | static int | ||
282 | t2_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
283 | int size, u32 *value) | ||
284 | { | ||
285 | unsigned long addr, pci_addr; | ||
286 | unsigned char type1; | ||
287 | int shift; | ||
288 | long mask; | ||
289 | |||
290 | if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) | ||
291 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
292 | |||
293 | mask = (size - 1) * 8; | ||
294 | shift = (where & 3) * 8; | ||
295 | addr = (pci_addr << 5) + mask + T2_CONF; | ||
296 | *value = conf_read(addr, type1) >> (shift); | ||
297 | return PCIBIOS_SUCCESSFUL; | ||
298 | } | ||
299 | |||
300 | static int | ||
301 | t2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, | ||
302 | u32 value) | ||
303 | { | ||
304 | unsigned long addr, pci_addr; | ||
305 | unsigned char type1; | ||
306 | long mask; | ||
307 | |||
308 | if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) | ||
309 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
310 | |||
311 | mask = (size - 1) * 8; | ||
312 | addr = (pci_addr << 5) + mask + T2_CONF; | ||
313 | conf_write(addr, value << ((where & 3) * 8), type1); | ||
314 | return PCIBIOS_SUCCESSFUL; | ||
315 | } | ||
316 | |||
317 | struct pci_ops t2_pci_ops = | ||
318 | { | ||
319 | .read = t2_read_config, | ||
320 | .write = t2_write_config, | ||
321 | }; | ||
322 | |||
323 | static void __init | ||
324 | t2_direct_map_window1(unsigned long base, unsigned long length) | ||
325 | { | ||
326 | unsigned long temp; | ||
327 | |||
328 | __direct_map_base = base; | ||
329 | __direct_map_size = length; | ||
330 | |||
331 | temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20); | ||
332 | *(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */ | ||
333 | temp = (length - 1) & 0xfff00000UL; | ||
334 | *(vulp)T2_WMASK1 = temp; | ||
335 | *(vulp)T2_TBASE1 = 0; | ||
336 | |||
337 | #if DEBUG_PRINT_FINAL_SETTINGS | ||
338 | printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", | ||
339 | __FUNCTION__, | ||
340 | *(vulp)T2_WBASE1, | ||
341 | *(vulp)T2_WMASK1, | ||
342 | *(vulp)T2_TBASE1); | ||
343 | #endif | ||
344 | } | ||
345 | |||
346 | static void __init | ||
347 | t2_sg_map_window2(struct pci_controller *hose, | ||
348 | unsigned long base, | ||
349 | unsigned long length) | ||
350 | { | ||
351 | unsigned long temp; | ||
352 | |||
353 | /* Note we can only do 1 SG window, as the other is for direct, so | ||
354 | do an ISA SG area, especially for the floppy. */ | ||
355 | hose->sg_isa = iommu_arena_new(hose, base, length, 0); | ||
356 | hose->sg_pci = NULL; | ||
357 | |||
358 | temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20); | ||
359 | *(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */ | ||
360 | temp = (length - 1) & 0xfff00000UL; | ||
361 | *(vulp)T2_WMASK2 = temp; | ||
362 | *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1; | ||
363 | mb(); | ||
364 | |||
365 | t2_pci_tbi(hose, 0, -1); /* flush TLB all */ | ||
366 | |||
367 | #if DEBUG_PRINT_FINAL_SETTINGS | ||
368 | printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", | ||
369 | __FUNCTION__, | ||
370 | *(vulp)T2_WBASE2, | ||
371 | *(vulp)T2_WMASK2, | ||
372 | *(vulp)T2_TBASE2); | ||
373 | #endif | ||
374 | } | ||
375 | |||
376 | static void __init | ||
377 | t2_save_configuration(void) | ||
378 | { | ||
379 | #if DEBUG_PRINT_INITIAL_SETTINGS | ||
380 | printk("%s: HAE_1 was 0x%lx\n", __FUNCTION__, srm_hae); /* HW is 0 */ | ||
381 | printk("%s: HAE_2 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_2); | ||
382 | printk("%s: HAE_3 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_3); | ||
383 | printk("%s: HAE_4 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_4); | ||
384 | printk("%s: HBASE was 0x%lx\n", __FUNCTION__, *(vulp)T2_HBASE); | ||
385 | |||
386 | printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __FUNCTION__, | ||
387 | *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1); | ||
388 | printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __FUNCTION__, | ||
389 | *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2); | ||
390 | #endif | ||
391 | |||
392 | /* | ||
393 | * Save the DMA Window registers. | ||
394 | */ | ||
395 | t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1; | ||
396 | t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1; | ||
397 | t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1; | ||
398 | t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2; | ||
399 | t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2; | ||
400 | t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2; | ||
401 | |||
402 | t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */ | ||
403 | t2_saved_config.hae_2 = *(vulp)T2_HAE_2; | ||
404 | t2_saved_config.hae_3 = *(vulp)T2_HAE_3; | ||
405 | t2_saved_config.hae_4 = *(vulp)T2_HAE_4; | ||
406 | t2_saved_config.hbase = *(vulp)T2_HBASE; | ||
407 | } | ||
408 | |||
409 | void __init | ||
410 | t2_init_arch(void) | ||
411 | { | ||
412 | struct pci_controller *hose; | ||
413 | unsigned long temp; | ||
414 | unsigned int i; | ||
415 | |||
416 | for (i = 0; i < NR_CPUS; i++) { | ||
417 | mcheck_expected(i) = 0; | ||
418 | mcheck_taken(i) = 0; | ||
419 | } | ||
420 | t2_mcheck_any_expected = 0; | ||
421 | t2_mcheck_last_taken = 0; | ||
422 | |||
423 | /* Enable scatter/gather TLB use. */ | ||
424 | temp = *(vulp)T2_IOCSR; | ||
425 | if (!(temp & (0x1UL << 26))) { | ||
426 | printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n", | ||
427 | temp); | ||
428 | *(vulp)T2_IOCSR = temp | (0x1UL << 26); | ||
429 | mb(); | ||
430 | *(vulp)T2_IOCSR; /* read it back to make sure */ | ||
431 | } | ||
432 | |||
433 | t2_save_configuration(); | ||
434 | |||
435 | /* | ||
436 | * Create our single hose. | ||
437 | */ | ||
438 | pci_isa_hose = hose = alloc_pci_controller(); | ||
439 | hose->io_space = &ioport_resource; | ||
440 | hose->mem_space = &iomem_resource; | ||
441 | hose->index = 0; | ||
442 | |||
443 | hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR; | ||
444 | hose->dense_mem_base = T2_DENSE_MEM - IDENT_ADDR; | ||
445 | hose->sparse_io_base = T2_IO - IDENT_ADDR; | ||
446 | hose->dense_io_base = 0; | ||
447 | |||
448 | /* | ||
449 | * Set up the PCI->physical memory translation windows. | ||
450 | * | ||
451 | * Window 1 is direct mapped. | ||
452 | * Window 2 is scatter/gather (for ISA). | ||
453 | */ | ||
454 | |||
455 | t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH); | ||
456 | |||
457 | /* Always make an ISA DMA window. */ | ||
458 | t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH); | ||
459 | |||
460 | *(vulp)T2_HBASE = 0x0; /* Disable HOLES. */ | ||
461 | |||
462 | /* Zero HAE. */ | ||
463 | *(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */ | ||
464 | *(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */ | ||
465 | *(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */ | ||
466 | |||
467 | /* | ||
468 | * We also now zero out HAE_4, the dense memory HAE, so that | ||
469 | * we need not account for its "offset" when accessing dense | ||
470 | * memory resources which we allocated in our normal way. This | ||
471 | * HAE would need to stay untouched were we to keep the SRM | ||
472 | * resource settings. | ||
473 | * | ||
474 | * Thus we can now run standard X servers on SABLE/LYNX. :-) | ||
475 | */ | ||
476 | *(vulp)T2_HAE_4 = 0; mb(); | ||
477 | } | ||
478 | |||
479 | void | ||
480 | t2_kill_arch(int mode) | ||
481 | { | ||
482 | /* | ||
483 | * Restore the DMA Window registers. | ||
484 | */ | ||
485 | *(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase; | ||
486 | *(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask; | ||
487 | *(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase; | ||
488 | *(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase; | ||
489 | *(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask; | ||
490 | *(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase; | ||
491 | mb(); | ||
492 | |||
493 | *(vulp)T2_HAE_1 = srm_hae; | ||
494 | *(vulp)T2_HAE_2 = t2_saved_config.hae_2; | ||
495 | *(vulp)T2_HAE_3 = t2_saved_config.hae_3; | ||
496 | *(vulp)T2_HAE_4 = t2_saved_config.hae_4; | ||
497 | *(vulp)T2_HBASE = t2_saved_config.hbase; | ||
498 | mb(); | ||
499 | *(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */ | ||
500 | } | ||
501 | |||
502 | void | ||
503 | t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
504 | { | ||
505 | unsigned long t2_iocsr; | ||
506 | |||
507 | t2_iocsr = *(vulp)T2_IOCSR; | ||
508 | |||
509 | /* set the TLB Clear bit */ | ||
510 | *(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 28); | ||
511 | mb(); | ||
512 | *(vulp)T2_IOCSR; /* read it back to make sure */ | ||
513 | |||
514 | /* clear the TLB Clear bit */ | ||
515 | *(vulp)T2_IOCSR = t2_iocsr & ~(0x1UL << 28); | ||
516 | mb(); | ||
517 | *(vulp)T2_IOCSR; /* read it back to make sure */ | ||
518 | } | ||
519 | |||
520 | #define SIC_SEIC (1UL << 33) /* System Event Clear */ | ||
521 | |||
522 | static void | ||
523 | t2_clear_errors(int cpu) | ||
524 | { | ||
525 | struct sable_cpu_csr *cpu_regs; | ||
526 | |||
527 | cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu); | ||
528 | |||
529 | cpu_regs->sic &= ~SIC_SEIC; | ||
530 | |||
531 | /* Clear CPU errors. */ | ||
532 | cpu_regs->bcce |= cpu_regs->bcce; | ||
533 | cpu_regs->cbe |= cpu_regs->cbe; | ||
534 | cpu_regs->bcue |= cpu_regs->bcue; | ||
535 | cpu_regs->dter |= cpu_regs->dter; | ||
536 | |||
537 | *(vulp)T2_CERR1 |= *(vulp)T2_CERR1; | ||
538 | *(vulp)T2_PERR1 |= *(vulp)T2_PERR1; | ||
539 | |||
540 | mb(); | ||
541 | mb(); /* magic */ | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * SABLE seems to have a "broadcast" style machine check, in that all | ||
546 | * CPUs receive it. And, the issuing CPU, in the case of PCI Config | ||
547 | * space read/write faults, will also receive a second mcheck, upon | ||
548 | * lowering IPL during completion processing in pci_read_config_byte() | ||
549 | * et al. | ||
550 | * | ||
551 | * Hence all the taken/expected/any_expected/last_taken stuff... | ||
552 | */ | ||
553 | void | ||
554 | t2_machine_check(unsigned long vector, unsigned long la_ptr, | ||
555 | struct pt_regs * regs) | ||
556 | { | ||
557 | int cpu = smp_processor_id(); | ||
558 | #ifdef CONFIG_VERBOSE_MCHECK | ||
559 | struct el_common *mchk_header = (struct el_common *)la_ptr; | ||
560 | #endif | ||
561 | |||
562 | /* Clear the error before any reporting. */ | ||
563 | mb(); | ||
564 | mb(); /* magic */ | ||
565 | draina(); | ||
566 | t2_clear_errors(cpu); | ||
567 | |||
568 | /* This should not actually be done until the logout frame is | ||
569 | examined, but, since we don't do that, go on and do this... */ | ||
570 | wrmces(0x7); | ||
571 | mb(); | ||
572 | |||
573 | /* Now, do testing for the anomalous conditions. */ | ||
574 | if (!mcheck_expected(cpu) && t2_mcheck_any_expected) { | ||
575 | /* | ||
576 | * FUNKY: Received mcheck on a CPU and not | ||
577 | * expecting it, but another CPU is expecting one. | ||
578 | * | ||
579 | * Just dismiss it for now on this CPU... | ||
580 | */ | ||
581 | #ifdef CONFIG_VERBOSE_MCHECK | ||
582 | if (alpha_verbose_mcheck > 1) { | ||
583 | printk("t2_machine_check(cpu%d): any_expected 0x%x -" | ||
584 | " (assumed) spurious -" | ||
585 | " code 0x%x\n", cpu, t2_mcheck_any_expected, | ||
586 | (unsigned int)mchk_header->code); | ||
587 | } | ||
588 | #endif | ||
589 | return; | ||
590 | } | ||
591 | |||
592 | if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) { | ||
593 | if (t2_mcheck_last_taken & (1 << cpu)) { | ||
594 | #ifdef CONFIG_VERBOSE_MCHECK | ||
595 | if (alpha_verbose_mcheck > 1) { | ||
596 | printk("t2_machine_check(cpu%d): last_taken 0x%x - " | ||
597 | "unexpected mcheck - code 0x%x\n", | ||
598 | cpu, t2_mcheck_last_taken, | ||
599 | (unsigned int)mchk_header->code); | ||
600 | } | ||
601 | #endif | ||
602 | t2_mcheck_last_taken = 0; | ||
603 | mb(); | ||
604 | return; | ||
605 | } else { | ||
606 | t2_mcheck_last_taken = 0; | ||
607 | mb(); | ||
608 | } | ||
609 | } | ||
610 | |||
611 | #ifdef CONFIG_VERBOSE_MCHECK | ||
612 | if (alpha_verbose_mcheck > 1) { | ||
613 | printk("%s t2_mcheck(cpu%d): last_taken 0x%x - " | ||
614 | "any_expected 0x%x - code 0x%x\n", | ||
615 | (mcheck_expected(cpu) ? "EX" : "UN"), cpu, | ||
616 | t2_mcheck_last_taken, t2_mcheck_any_expected, | ||
617 | (unsigned int)mchk_header->code); | ||
618 | } | ||
619 | #endif | ||
620 | |||
621 | process_mcheck_info(vector, la_ptr, regs, "T2", mcheck_expected(cpu)); | ||
622 | } | ||
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c new file mode 100644 index 000000000000..3662fef7db9a --- /dev/null +++ b/arch/alpha/kernel/core_titan.c | |||
@@ -0,0 +1,806 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_titan.c | ||
3 | * | ||
4 | * Code common to all TITAN core logic chips. | ||
5 | */ | ||
6 | |||
7 | #define __EXTERN_INLINE inline | ||
8 | #include <asm/io.h> | ||
9 | #include <asm/core_titan.h> | ||
10 | #undef __EXTERN_INLINE | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | |||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/smp.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | |||
25 | #include "proto.h" | ||
26 | #include "pci_impl.h" | ||
27 | |||
28 | /* Save Titan configuration data as the console had it set up. */ | ||
29 | |||
30 | struct | ||
31 | { | ||
32 | unsigned long wsba[4]; | ||
33 | unsigned long wsm[4]; | ||
34 | unsigned long tba[4]; | ||
35 | } saved_config[4] __attribute__((common)); | ||
36 | |||
37 | /* | ||
38 | * BIOS32-style PCI interface: | ||
39 | */ | ||
40 | |||
41 | #define DEBUG_CONFIG 0 | ||
42 | |||
43 | #if DEBUG_CONFIG | ||
44 | # define DBG_CFG(args) printk args | ||
45 | #else | ||
46 | # define DBG_CFG(args) | ||
47 | #endif | ||
48 | |||
49 | |||
50 | /* | ||
51 | * Routines to access TIG registers. | ||
52 | */ | ||
53 | static inline volatile unsigned long * | ||
54 | mk_tig_addr(int offset) | ||
55 | { | ||
56 | return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6)); | ||
57 | } | ||
58 | |||
59 | static inline u8 | ||
60 | titan_read_tig(int offset, u8 value) | ||
61 | { | ||
62 | volatile unsigned long *tig_addr = mk_tig_addr(offset); | ||
63 | return (u8)(*tig_addr & 0xff); | ||
64 | } | ||
65 | |||
66 | static inline void | ||
67 | titan_write_tig(int offset, u8 value) | ||
68 | { | ||
69 | volatile unsigned long *tig_addr = mk_tig_addr(offset); | ||
70 | *tig_addr = (unsigned long)value; | ||
71 | } | ||
72 | |||
73 | |||
74 | /* | ||
75 | * Given a bus, device, and function number, compute resulting | ||
76 | * configuration space address | ||
77 | * accordingly. It is therefore not safe to have concurrent | ||
78 | * invocations to configuration space access routines, but there | ||
79 | * really shouldn't be any need for this. | ||
80 | * | ||
81 | * Note that all config space accesses use Type 1 address format. | ||
82 | * | ||
83 | * Note also that type 1 is determined by non-zero bus number. | ||
84 | * | ||
85 | * Type 1: | ||
86 | * | ||
87 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
88 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
89 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
90 | * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| | ||
91 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
92 | * | ||
93 | * 31:24 reserved | ||
94 | * 23:16 bus number (8 bits = 128 possible buses) | ||
95 | * 15:11 Device number (5 bits) | ||
96 | * 10:8 function number | ||
97 | * 7:2 register number | ||
98 | * | ||
99 | * Notes: | ||
100 | * The function number selects which function of a multi-function device | ||
101 | * (e.g., SCSI and Ethernet). | ||
102 | * | ||
103 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
104 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
105 | * bits. | ||
106 | */ | ||
107 | |||
108 | static int | ||
109 | mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, | ||
110 | unsigned long *pci_addr, unsigned char *type1) | ||
111 | { | ||
112 | struct pci_controller *hose = pbus->sysdata; | ||
113 | unsigned long addr; | ||
114 | u8 bus = pbus->number; | ||
115 | |||
116 | DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " | ||
117 | "pci_addr=0x%p, type1=0x%p)\n", | ||
118 | bus, device_fn, where, pci_addr, type1)); | ||
119 | |||
120 | if (!pbus->parent) /* No parent means peer PCI bus. */ | ||
121 | bus = 0; | ||
122 | *type1 = (bus != 0); | ||
123 | |||
124 | addr = (bus << 16) | (device_fn << 8) | where; | ||
125 | addr |= hose->config_space_base; | ||
126 | |||
127 | *pci_addr = addr; | ||
128 | DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int | ||
133 | titan_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
134 | int size, u32 *value) | ||
135 | { | ||
136 | unsigned long addr; | ||
137 | unsigned char type1; | ||
138 | |||
139 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
140 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
141 | |||
142 | switch (size) { | ||
143 | case 1: | ||
144 | *value = __kernel_ldbu(*(vucp)addr); | ||
145 | break; | ||
146 | case 2: | ||
147 | *value = __kernel_ldwu(*(vusp)addr); | ||
148 | break; | ||
149 | case 4: | ||
150 | *value = *(vuip)addr; | ||
151 | break; | ||
152 | } | ||
153 | |||
154 | return PCIBIOS_SUCCESSFUL; | ||
155 | } | ||
156 | |||
157 | static int | ||
158 | titan_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
159 | int size, u32 value) | ||
160 | { | ||
161 | unsigned long addr; | ||
162 | unsigned char type1; | ||
163 | |||
164 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
165 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
166 | |||
167 | switch (size) { | ||
168 | case 1: | ||
169 | __kernel_stb(value, *(vucp)addr); | ||
170 | mb(); | ||
171 | __kernel_ldbu(*(vucp)addr); | ||
172 | break; | ||
173 | case 2: | ||
174 | __kernel_stw(value, *(vusp)addr); | ||
175 | mb(); | ||
176 | __kernel_ldwu(*(vusp)addr); | ||
177 | break; | ||
178 | case 4: | ||
179 | *(vuip)addr = value; | ||
180 | mb(); | ||
181 | *(vuip)addr; | ||
182 | break; | ||
183 | } | ||
184 | |||
185 | return PCIBIOS_SUCCESSFUL; | ||
186 | } | ||
187 | |||
188 | struct pci_ops titan_pci_ops = | ||
189 | { | ||
190 | .read = titan_read_config, | ||
191 | .write = titan_write_config, | ||
192 | }; | ||
193 | |||
194 | |||
195 | void | ||
196 | titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
197 | { | ||
198 | titan_pachip *pachip = | ||
199 | (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0; | ||
200 | titan_pachip_port *port; | ||
201 | volatile unsigned long *csr; | ||
202 | unsigned long value; | ||
203 | |||
204 | /* Get the right hose. */ | ||
205 | port = &pachip->g_port; | ||
206 | if (hose->index & 2) | ||
207 | port = &pachip->a_port; | ||
208 | |||
209 | /* We can invalidate up to 8 tlb entries in a go. The flush | ||
210 | matches against <31:16> in the pci address. | ||
211 | Note that gtlbi* and atlbi* are in the same place in the g_port | ||
212 | and a_port, respectively, so the g_port offset can be used | ||
213 | even if hose is an a_port */ | ||
214 | csr = &port->port_specific.g.gtlbia.csr; | ||
215 | if (((start ^ end) & 0xffff0000) == 0) | ||
216 | csr = &port->port_specific.g.gtlbiv.csr; | ||
217 | |||
218 | /* For TBIA, it doesn't matter what value we write. For TBI, | ||
219 | it's the shifted tag bits. */ | ||
220 | value = (start & 0xffff0000) >> 12; | ||
221 | |||
222 | wmb(); | ||
223 | *csr = value; | ||
224 | mb(); | ||
225 | *csr; | ||
226 | } | ||
227 | |||
228 | static int | ||
229 | titan_query_agp(titan_pachip_port *port) | ||
230 | { | ||
231 | union TPAchipPCTL pctl; | ||
232 | |||
233 | /* set up APCTL */ | ||
234 | pctl.pctl_q_whole = port->pctl.csr; | ||
235 | |||
236 | return pctl.pctl_r_bits.apctl_v_agp_present; | ||
237 | |||
238 | } | ||
239 | |||
240 | static void __init | ||
241 | titan_init_one_pachip_port(titan_pachip_port *port, int index) | ||
242 | { | ||
243 | struct pci_controller *hose; | ||
244 | |||
245 | hose = alloc_pci_controller(); | ||
246 | if (index == 0) | ||
247 | pci_isa_hose = hose; | ||
248 | hose->io_space = alloc_resource(); | ||
249 | hose->mem_space = alloc_resource(); | ||
250 | |||
251 | /* | ||
252 | * This is for userland consumption. The 40-bit PIO bias that we | ||
253 | * use in the kernel through KSEG doesn't work in the page table | ||
254 | * based user mappings. (43-bit KSEG sign extends the physical | ||
255 | * address from bit 40 to hit the I/O bit - mapped addresses don't). | ||
256 | * So make sure we get the 43-bit PIO bias. | ||
257 | */ | ||
258 | hose->sparse_mem_base = 0; | ||
259 | hose->sparse_io_base = 0; | ||
260 | hose->dense_mem_base | ||
261 | = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL; | ||
262 | hose->dense_io_base | ||
263 | = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL; | ||
264 | |||
265 | hose->config_space_base = TITAN_CONF(index); | ||
266 | hose->index = index; | ||
267 | |||
268 | hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS; | ||
269 | hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1; | ||
270 | hose->io_space->name = pci_io_names[index]; | ||
271 | hose->io_space->flags = IORESOURCE_IO; | ||
272 | |||
273 | hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS; | ||
274 | hose->mem_space->end = hose->mem_space->start + 0xffffffff; | ||
275 | hose->mem_space->name = pci_mem_names[index]; | ||
276 | hose->mem_space->flags = IORESOURCE_MEM; | ||
277 | |||
278 | if (request_resource(&ioport_resource, hose->io_space) < 0) | ||
279 | printk(KERN_ERR "Failed to request IO on hose %d\n", index); | ||
280 | if (request_resource(&iomem_resource, hose->mem_space) < 0) | ||
281 | printk(KERN_ERR "Failed to request MEM on hose %d\n", index); | ||
282 | |||
283 | /* | ||
284 | * Save the existing PCI window translations. SRM will | ||
285 | * need them when we go to reboot. | ||
286 | */ | ||
287 | saved_config[index].wsba[0] = port->wsba[0].csr; | ||
288 | saved_config[index].wsm[0] = port->wsm[0].csr; | ||
289 | saved_config[index].tba[0] = port->tba[0].csr; | ||
290 | |||
291 | saved_config[index].wsba[1] = port->wsba[1].csr; | ||
292 | saved_config[index].wsm[1] = port->wsm[1].csr; | ||
293 | saved_config[index].tba[1] = port->tba[1].csr; | ||
294 | |||
295 | saved_config[index].wsba[2] = port->wsba[2].csr; | ||
296 | saved_config[index].wsm[2] = port->wsm[2].csr; | ||
297 | saved_config[index].tba[2] = port->tba[2].csr; | ||
298 | |||
299 | saved_config[index].wsba[3] = port->wsba[3].csr; | ||
300 | saved_config[index].wsm[3] = port->wsm[3].csr; | ||
301 | saved_config[index].tba[3] = port->tba[3].csr; | ||
302 | |||
303 | /* | ||
304 | * Set up the PCI to main memory translation windows. | ||
305 | * | ||
306 | * Note: Window 3 on Titan is Scatter-Gather ONLY. | ||
307 | * | ||
308 | * Window 0 is scatter-gather 8MB at 8MB (for isa) | ||
309 | * Window 1 is direct access 1GB at 2GB | ||
310 | * Window 2 is scatter-gather 1GB at 3GB | ||
311 | */ | ||
312 | hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); | ||
313 | hose->sg_isa->align_entry = 8; /* 64KB for ISA */ | ||
314 | |||
315 | hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0); | ||
316 | hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */ | ||
317 | |||
318 | port->wsba[0].csr = hose->sg_isa->dma_base | 3; | ||
319 | port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000; | ||
320 | port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); | ||
321 | |||
322 | port->wsba[1].csr = __direct_map_base | 1; | ||
323 | port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000; | ||
324 | port->tba[1].csr = 0; | ||
325 | |||
326 | port->wsba[2].csr = hose->sg_pci->dma_base | 3; | ||
327 | port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000; | ||
328 | port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); | ||
329 | |||
330 | port->wsba[3].csr = 0; | ||
331 | |||
332 | /* Enable the Monster Window to make DAC pci64 possible. */ | ||
333 | port->pctl.csr |= pctl_m_mwin; | ||
334 | |||
335 | /* | ||
336 | * If it's an AGP port, initialize agplastwr. | ||
337 | */ | ||
338 | if (titan_query_agp(port)) | ||
339 | port->port_specific.a.agplastwr.csr = __direct_map_base; | ||
340 | |||
341 | titan_pci_tbi(hose, 0, -1); | ||
342 | } | ||
343 | |||
344 | static void __init | ||
345 | titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1) | ||
346 | { | ||
347 | int pchip1_present = TITAN_cchip->csc.csr & 1L<<14; | ||
348 | |||
349 | /* Init the ports in hose order... */ | ||
350 | titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */ | ||
351 | if (pchip1_present) | ||
352 | titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */ | ||
353 | titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */ | ||
354 | if (pchip1_present) | ||
355 | titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */ | ||
356 | } | ||
357 | |||
358 | static void __init | ||
359 | titan_init_vga_hose(void) | ||
360 | { | ||
361 | #ifdef CONFIG_VGA_HOSE | ||
362 | u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); | ||
363 | |||
364 | if (pu64[7] == 3) { /* TERM_TYPE == graphics */ | ||
365 | struct pci_controller *hose; | ||
366 | int h = (pu64[30] >> 24) & 0xff; /* console hose # */ | ||
367 | |||
368 | /* | ||
369 | * Our hose numbering matches the console's, so just find | ||
370 | * the right one... | ||
371 | */ | ||
372 | for (hose = hose_head; hose; hose = hose->next) { | ||
373 | if (hose->index == h) break; | ||
374 | } | ||
375 | |||
376 | if (hose) { | ||
377 | printk("Console graphics on hose %d\n", hose->index); | ||
378 | pci_vga_hose = hose; | ||
379 | } | ||
380 | } | ||
381 | #endif /* CONFIG_VGA_HOSE */ | ||
382 | } | ||
383 | |||
384 | void __init | ||
385 | titan_init_arch(void) | ||
386 | { | ||
387 | #if 0 | ||
388 | printk("%s: titan_init_arch()\n", __FUNCTION__); | ||
389 | printk("%s: CChip registers:\n", __FUNCTION__); | ||
390 | printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr); | ||
391 | printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr); | ||
392 | printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr); | ||
393 | printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr); | ||
394 | printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr); | ||
395 | printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr); | ||
396 | printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr); | ||
397 | printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr); | ||
398 | |||
399 | printk("%s: DChip registers:\n", __FUNCTION__); | ||
400 | printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr); | ||
401 | printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr); | ||
402 | printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr); | ||
403 | #endif | ||
404 | |||
405 | boot_cpuid = __hard_smp_processor_id(); | ||
406 | |||
407 | /* With multiple PCI busses, we play with I/O as physical addrs. */ | ||
408 | ioport_resource.end = ~0UL; | ||
409 | |||
410 | /* PCI DMA Direct Mapping is 1GB at 2GB. */ | ||
411 | __direct_map_base = 0x80000000; | ||
412 | __direct_map_size = 0x40000000; | ||
413 | |||
414 | /* Init the PA chip(s). */ | ||
415 | titan_init_pachips(TITAN_pachip0, TITAN_pachip1); | ||
416 | |||
417 | /* Check for graphic console location (if any). */ | ||
418 | titan_init_vga_hose(); | ||
419 | } | ||
420 | |||
421 | static void | ||
422 | titan_kill_one_pachip_port(titan_pachip_port *port, int index) | ||
423 | { | ||
424 | port->wsba[0].csr = saved_config[index].wsba[0]; | ||
425 | port->wsm[0].csr = saved_config[index].wsm[0]; | ||
426 | port->tba[0].csr = saved_config[index].tba[0]; | ||
427 | |||
428 | port->wsba[1].csr = saved_config[index].wsba[1]; | ||
429 | port->wsm[1].csr = saved_config[index].wsm[1]; | ||
430 | port->tba[1].csr = saved_config[index].tba[1]; | ||
431 | |||
432 | port->wsba[2].csr = saved_config[index].wsba[2]; | ||
433 | port->wsm[2].csr = saved_config[index].wsm[2]; | ||
434 | port->tba[2].csr = saved_config[index].tba[2]; | ||
435 | |||
436 | port->wsba[3].csr = saved_config[index].wsba[3]; | ||
437 | port->wsm[3].csr = saved_config[index].wsm[3]; | ||
438 | port->tba[3].csr = saved_config[index].tba[3]; | ||
439 | } | ||
440 | |||
441 | static void | ||
442 | titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1) | ||
443 | { | ||
444 | int pchip1_present = TITAN_cchip->csc.csr & 1L<<14; | ||
445 | |||
446 | if (pchip1_present) { | ||
447 | titan_kill_one_pachip_port(&pachip1->g_port, 1); | ||
448 | titan_kill_one_pachip_port(&pachip1->a_port, 3); | ||
449 | } | ||
450 | titan_kill_one_pachip_port(&pachip0->g_port, 0); | ||
451 | titan_kill_one_pachip_port(&pachip0->a_port, 2); | ||
452 | } | ||
453 | |||
454 | void | ||
455 | titan_kill_arch(int mode) | ||
456 | { | ||
457 | titan_kill_pachips(TITAN_pachip0, TITAN_pachip1); | ||
458 | } | ||
459 | |||
460 | |||
461 | /* | ||
462 | * IO map support. | ||
463 | */ | ||
464 | |||
465 | void __iomem * | ||
466 | titan_ioremap(unsigned long addr, unsigned long size) | ||
467 | { | ||
468 | int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT; | ||
469 | unsigned long baddr = addr & ~TITAN_HOSE_MASK; | ||
470 | unsigned long last = baddr + size - 1; | ||
471 | struct pci_controller *hose; | ||
472 | struct vm_struct *area; | ||
473 | unsigned long vaddr; | ||
474 | unsigned long *ptes; | ||
475 | unsigned long pfn; | ||
476 | |||
477 | /* | ||
478 | * Adjust the addr. | ||
479 | */ | ||
480 | #ifdef CONFIG_VGA_HOSE | ||
481 | if (pci_vga_hose && __titan_is_mem_vga(addr)) { | ||
482 | h = pci_vga_hose->index; | ||
483 | addr += pci_vga_hose->mem_space->start; | ||
484 | } | ||
485 | #endif | ||
486 | |||
487 | /* | ||
488 | * Find the hose. | ||
489 | */ | ||
490 | for (hose = hose_head; hose; hose = hose->next) | ||
491 | if (hose->index == h) | ||
492 | break; | ||
493 | if (!hose) | ||
494 | return NULL; | ||
495 | |||
496 | /* | ||
497 | * Is it direct-mapped? | ||
498 | */ | ||
499 | if ((baddr >= __direct_map_base) && | ||
500 | ((baddr + size - 1) < __direct_map_base + __direct_map_size)) { | ||
501 | vaddr = addr - __direct_map_base + TITAN_MEM_BIAS; | ||
502 | return (void __iomem *) vaddr; | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Check the scatter-gather arena. | ||
507 | */ | ||
508 | if (hose->sg_pci && | ||
509 | baddr >= (unsigned long)hose->sg_pci->dma_base && | ||
510 | last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){ | ||
511 | |||
512 | /* | ||
513 | * Adjust the limits (mappings must be page aligned) | ||
514 | */ | ||
515 | baddr -= hose->sg_pci->dma_base; | ||
516 | last -= hose->sg_pci->dma_base; | ||
517 | baddr &= PAGE_MASK; | ||
518 | size = PAGE_ALIGN(last) - baddr; | ||
519 | |||
520 | /* | ||
521 | * Map it | ||
522 | */ | ||
523 | area = get_vm_area(size, VM_IOREMAP); | ||
524 | if (!area) | ||
525 | return NULL; | ||
526 | |||
527 | ptes = hose->sg_pci->ptes; | ||
528 | for (vaddr = (unsigned long)area->addr; | ||
529 | baddr <= last; | ||
530 | baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { | ||
531 | pfn = ptes[baddr >> PAGE_SHIFT]; | ||
532 | if (!(pfn & 1)) { | ||
533 | printk("ioremap failed... pte not valid...\n"); | ||
534 | vfree(area->addr); | ||
535 | return NULL; | ||
536 | } | ||
537 | pfn >>= 1; /* make it a true pfn */ | ||
538 | |||
539 | if (__alpha_remap_area_pages(vaddr, | ||
540 | pfn << PAGE_SHIFT, | ||
541 | PAGE_SIZE, 0)) { | ||
542 | printk("FAILED to map...\n"); | ||
543 | vfree(area->addr); | ||
544 | return NULL; | ||
545 | } | ||
546 | } | ||
547 | |||
548 | flush_tlb_all(); | ||
549 | |||
550 | vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); | ||
551 | return (void __iomem *) vaddr; | ||
552 | } | ||
553 | |||
554 | return NULL; | ||
555 | } | ||
556 | |||
557 | void | ||
558 | titan_iounmap(volatile void __iomem *xaddr) | ||
559 | { | ||
560 | unsigned long addr = (unsigned long) xaddr; | ||
561 | if (addr >= VMALLOC_START) | ||
562 | vfree((void *)(PAGE_MASK & addr)); | ||
563 | } | ||
564 | |||
565 | int | ||
566 | titan_is_mmio(const volatile void __iomem *xaddr) | ||
567 | { | ||
568 | unsigned long addr = (unsigned long) xaddr; | ||
569 | |||
570 | if (addr >= VMALLOC_START) | ||
571 | return 1; | ||
572 | else | ||
573 | return (addr & 0x100000000UL) == 0; | ||
574 | } | ||
575 | |||
576 | #ifndef CONFIG_ALPHA_GENERIC | ||
577 | EXPORT_SYMBOL(titan_ioremap); | ||
578 | EXPORT_SYMBOL(titan_iounmap); | ||
579 | EXPORT_SYMBOL(titan_is_mmio); | ||
580 | #endif | ||
581 | |||
582 | /* | ||
583 | * AGP GART Support. | ||
584 | */ | ||
585 | #include <linux/agp_backend.h> | ||
586 | #include <asm/agp_backend.h> | ||
587 | #include <linux/slab.h> | ||
588 | #include <linux/delay.h> | ||
589 | |||
590 | struct titan_agp_aperture { | ||
591 | struct pci_iommu_arena *arena; | ||
592 | long pg_start; | ||
593 | long pg_count; | ||
594 | }; | ||
595 | |||
596 | static int | ||
597 | titan_agp_setup(alpha_agp_info *agp) | ||
598 | { | ||
599 | struct titan_agp_aperture *aper; | ||
600 | |||
601 | if (!alpha_agpgart_size) | ||
602 | return -ENOMEM; | ||
603 | |||
604 | aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL); | ||
605 | if (aper == NULL) | ||
606 | return -ENOMEM; | ||
607 | |||
608 | aper->arena = agp->hose->sg_pci; | ||
609 | aper->pg_count = alpha_agpgart_size / PAGE_SIZE; | ||
610 | aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, | ||
611 | aper->pg_count - 1); | ||
612 | if (aper->pg_start < 0) { | ||
613 | printk(KERN_ERR "Failed to reserve AGP memory\n"); | ||
614 | kfree(aper); | ||
615 | return -ENOMEM; | ||
616 | } | ||
617 | |||
618 | agp->aperture.bus_base = | ||
619 | aper->arena->dma_base + aper->pg_start * PAGE_SIZE; | ||
620 | agp->aperture.size = aper->pg_count * PAGE_SIZE; | ||
621 | agp->aperture.sysdata = aper; | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static void | ||
627 | titan_agp_cleanup(alpha_agp_info *agp) | ||
628 | { | ||
629 | struct titan_agp_aperture *aper = agp->aperture.sysdata; | ||
630 | int status; | ||
631 | |||
632 | status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); | ||
633 | if (status == -EBUSY) { | ||
634 | printk(KERN_WARNING | ||
635 | "Attempted to release bound AGP memory - unbinding\n"); | ||
636 | iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); | ||
637 | status = iommu_release(aper->arena, aper->pg_start, | ||
638 | aper->pg_count); | ||
639 | } | ||
640 | if (status < 0) | ||
641 | printk(KERN_ERR "Failed to release AGP memory\n"); | ||
642 | |||
643 | kfree(aper); | ||
644 | kfree(agp); | ||
645 | } | ||
646 | |||
647 | static int | ||
648 | titan_agp_configure(alpha_agp_info *agp) | ||
649 | { | ||
650 | union TPAchipPCTL pctl; | ||
651 | titan_pachip_port *port = agp->private; | ||
652 | pctl.pctl_q_whole = port->pctl.csr; | ||
653 | |||
654 | /* Side-Band Addressing? */ | ||
655 | pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba; | ||
656 | |||
657 | /* AGP Rate? */ | ||
658 | pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */ | ||
659 | if (agp->mode.bits.rate & 2) | ||
660 | pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */ | ||
661 | #if 0 | ||
662 | if (agp->mode.bits.rate & 4) | ||
663 | pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */ | ||
664 | #endif | ||
665 | |||
666 | /* RQ Depth? */ | ||
667 | pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2; | ||
668 | pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7; | ||
669 | |||
670 | /* | ||
671 | * AGP Enable. | ||
672 | */ | ||
673 | pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable; | ||
674 | |||
675 | /* Tell the user. */ | ||
676 | printk("Enabling AGP: %dX%s\n", | ||
677 | 1 << pctl.pctl_r_bits.apctl_v_agp_rate, | ||
678 | pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : ""); | ||
679 | |||
680 | /* Write it. */ | ||
681 | port->pctl.csr = pctl.pctl_q_whole; | ||
682 | |||
683 | /* And wait at least 5000 66MHz cycles (per Titan spec). */ | ||
684 | udelay(100); | ||
685 | |||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | static int | ||
690 | titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) | ||
691 | { | ||
692 | struct titan_agp_aperture *aper = agp->aperture.sysdata; | ||
693 | return iommu_bind(aper->arena, aper->pg_start + pg_start, | ||
694 | mem->page_count, mem->memory); | ||
695 | } | ||
696 | |||
697 | static int | ||
698 | titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) | ||
699 | { | ||
700 | struct titan_agp_aperture *aper = agp->aperture.sysdata; | ||
701 | return iommu_unbind(aper->arena, aper->pg_start + pg_start, | ||
702 | mem->page_count); | ||
703 | } | ||
704 | |||
705 | static unsigned long | ||
706 | titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr) | ||
707 | { | ||
708 | struct titan_agp_aperture *aper = agp->aperture.sysdata; | ||
709 | unsigned long baddr = addr - aper->arena->dma_base; | ||
710 | unsigned long pte; | ||
711 | |||
712 | if (addr < agp->aperture.bus_base || | ||
713 | addr >= agp->aperture.bus_base + agp->aperture.size) { | ||
714 | printk("%s: addr out of range\n", __FUNCTION__); | ||
715 | return -EINVAL; | ||
716 | } | ||
717 | |||
718 | pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; | ||
719 | if (!(pte & 1)) { | ||
720 | printk("%s: pte not valid\n", __FUNCTION__); | ||
721 | return -EINVAL; | ||
722 | } | ||
723 | |||
724 | return (pte >> 1) << PAGE_SHIFT; | ||
725 | } | ||
726 | |||
727 | struct alpha_agp_ops titan_agp_ops = | ||
728 | { | ||
729 | .setup = titan_agp_setup, | ||
730 | .cleanup = titan_agp_cleanup, | ||
731 | .configure = titan_agp_configure, | ||
732 | .bind = titan_agp_bind_memory, | ||
733 | .unbind = titan_agp_unbind_memory, | ||
734 | .translate = titan_agp_translate | ||
735 | }; | ||
736 | |||
737 | alpha_agp_info * | ||
738 | titan_agp_info(void) | ||
739 | { | ||
740 | alpha_agp_info *agp; | ||
741 | struct pci_controller *hose; | ||
742 | titan_pachip_port *port; | ||
743 | int hosenum = -1; | ||
744 | union TPAchipPCTL pctl; | ||
745 | |||
746 | /* | ||
747 | * Find the AGP port. | ||
748 | */ | ||
749 | port = &TITAN_pachip0->a_port; | ||
750 | if (titan_query_agp(port)) | ||
751 | hosenum = 2; | ||
752 | if (hosenum < 0 && | ||
753 | titan_query_agp(port = &TITAN_pachip1->a_port)) | ||
754 | hosenum = 3; | ||
755 | |||
756 | /* | ||
757 | * Find the hose the port is on. | ||
758 | */ | ||
759 | for (hose = hose_head; hose; hose = hose->next) | ||
760 | if (hose->index == hosenum) | ||
761 | break; | ||
762 | |||
763 | if (!hose || !hose->sg_pci) | ||
764 | return NULL; | ||
765 | |||
766 | /* | ||
767 | * Allocate the info structure. | ||
768 | */ | ||
769 | agp = kmalloc(sizeof(*agp), GFP_KERNEL); | ||
770 | |||
771 | /* | ||
772 | * Fill it in. | ||
773 | */ | ||
774 | agp->hose = hose; | ||
775 | agp->private = port; | ||
776 | agp->ops = &titan_agp_ops; | ||
777 | |||
778 | /* | ||
779 | * Aperture - not configured until ops.setup(). | ||
780 | * | ||
781 | * FIXME - should we go ahead and allocate it here? | ||
782 | */ | ||
783 | agp->aperture.bus_base = 0; | ||
784 | agp->aperture.size = 0; | ||
785 | agp->aperture.sysdata = NULL; | ||
786 | |||
787 | /* | ||
788 | * Capabilities. | ||
789 | */ | ||
790 | agp->capability.lw = 0; | ||
791 | agp->capability.bits.rate = 3; /* 2x, 1x */ | ||
792 | agp->capability.bits.sba = 1; | ||
793 | agp->capability.bits.rq = 7; /* 8 - 1 */ | ||
794 | |||
795 | /* | ||
796 | * Mode. | ||
797 | */ | ||
798 | pctl.pctl_q_whole = port->pctl.csr; | ||
799 | agp->mode.lw = 0; | ||
800 | agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate; | ||
801 | agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en; | ||
802 | agp->mode.bits.rq = 7; /* RQ Depth? */ | ||
803 | agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en; | ||
804 | |||
805 | return agp; | ||
806 | } | ||
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c new file mode 100644 index 000000000000..8aa305bd6a2c --- /dev/null +++ b/arch/alpha/kernel/core_tsunami.c | |||
@@ -0,0 +1,459 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_tsunami.c | ||
3 | * | ||
4 | * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com). | ||
5 | * | ||
6 | * Code common to all TSUNAMI core logic chips. | ||
7 | */ | ||
8 | |||
9 | #define __EXTERN_INLINE inline | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/core_tsunami.h> | ||
12 | #undef __EXTERN_INLINE | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | |||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/smp.h> | ||
22 | |||
23 | #include "proto.h" | ||
24 | #include "pci_impl.h" | ||
25 | |||
26 | /* Save Tsunami configuration data as the console had it set up. */ | ||
27 | |||
28 | struct | ||
29 | { | ||
30 | unsigned long wsba[4]; | ||
31 | unsigned long wsm[4]; | ||
32 | unsigned long tba[4]; | ||
33 | } saved_config[2] __attribute__((common)); | ||
34 | |||
35 | /* | ||
36 | * NOTE: Herein lie back-to-back mb instructions. They are magic. | ||
37 | * One plausible explanation is that the I/O controller does not properly | ||
38 | * handle the system transaction. Another involves timing. Ho hum. | ||
39 | */ | ||
40 | |||
41 | /* | ||
42 | * BIOS32-style PCI interface: | ||
43 | */ | ||
44 | |||
45 | #define DEBUG_CONFIG 0 | ||
46 | |||
47 | #if DEBUG_CONFIG | ||
48 | # define DBG_CFG(args) printk args | ||
49 | #else | ||
50 | # define DBG_CFG(args) | ||
51 | #endif | ||
52 | |||
53 | |||
54 | /* | ||
55 | * Given a bus, device, and function number, compute resulting | ||
56 | * configuration space address | ||
57 | * accordingly. It is therefore not safe to have concurrent | ||
58 | * invocations to configuration space access routines, but there | ||
59 | * really shouldn't be any need for this. | ||
60 | * | ||
61 | * Note that all config space accesses use Type 1 address format. | ||
62 | * | ||
63 | * Note also that type 1 is determined by non-zero bus number. | ||
64 | * | ||
65 | * Type 1: | ||
66 | * | ||
67 | * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 | ||
68 | * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 | ||
69 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
70 | * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| | ||
71 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
72 | * | ||
73 | * 31:24 reserved | ||
74 | * 23:16 bus number (8 bits = 128 possible buses) | ||
75 | * 15:11 Device number (5 bits) | ||
76 | * 10:8 function number | ||
77 | * 7:2 register number | ||
78 | * | ||
79 | * Notes: | ||
80 | * The function number selects which function of a multi-function device | ||
81 | * (e.g., SCSI and Ethernet). | ||
82 | * | ||
83 | * The register selects a DWORD (32 bit) register offset. Hence it | ||
84 | * doesn't get shifted by 2 bits as we want to "drop" the bottom two | ||
85 | * bits. | ||
86 | */ | ||
87 | |||
88 | static int | ||
89 | mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, | ||
90 | unsigned long *pci_addr, unsigned char *type1) | ||
91 | { | ||
92 | struct pci_controller *hose = pbus->sysdata; | ||
93 | unsigned long addr; | ||
94 | u8 bus = pbus->number; | ||
95 | |||
96 | DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " | ||
97 | "pci_addr=0x%p, type1=0x%p)\n", | ||
98 | bus, device_fn, where, pci_addr, type1)); | ||
99 | |||
100 | if (!pbus->parent) /* No parent means peer PCI bus. */ | ||
101 | bus = 0; | ||
102 | *type1 = (bus != 0); | ||
103 | |||
104 | addr = (bus << 16) | (device_fn << 8) | where; | ||
105 | addr |= hose->config_space_base; | ||
106 | |||
107 | *pci_addr = addr; | ||
108 | DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int | ||
113 | tsunami_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
114 | int size, u32 *value) | ||
115 | { | ||
116 | unsigned long addr; | ||
117 | unsigned char type1; | ||
118 | |||
119 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
120 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
121 | |||
122 | switch (size) { | ||
123 | case 1: | ||
124 | *value = __kernel_ldbu(*(vucp)addr); | ||
125 | break; | ||
126 | case 2: | ||
127 | *value = __kernel_ldwu(*(vusp)addr); | ||
128 | break; | ||
129 | case 4: | ||
130 | *value = *(vuip)addr; | ||
131 | break; | ||
132 | } | ||
133 | |||
134 | return PCIBIOS_SUCCESSFUL; | ||
135 | } | ||
136 | |||
137 | static int | ||
138 | tsunami_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
139 | int size, u32 value) | ||
140 | { | ||
141 | unsigned long addr; | ||
142 | unsigned char type1; | ||
143 | |||
144 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
145 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
146 | |||
147 | switch (size) { | ||
148 | case 1: | ||
149 | __kernel_stb(value, *(vucp)addr); | ||
150 | mb(); | ||
151 | __kernel_ldbu(*(vucp)addr); | ||
152 | break; | ||
153 | case 2: | ||
154 | __kernel_stw(value, *(vusp)addr); | ||
155 | mb(); | ||
156 | __kernel_ldwu(*(vusp)addr); | ||
157 | break; | ||
158 | case 4: | ||
159 | *(vuip)addr = value; | ||
160 | mb(); | ||
161 | *(vuip)addr; | ||
162 | break; | ||
163 | } | ||
164 | |||
165 | return PCIBIOS_SUCCESSFUL; | ||
166 | } | ||
167 | |||
168 | struct pci_ops tsunami_pci_ops = | ||
169 | { | ||
170 | .read = tsunami_read_config, | ||
171 | .write = tsunami_write_config, | ||
172 | }; | ||
173 | |||
174 | void | ||
175 | tsunami_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
176 | { | ||
177 | tsunami_pchip *pchip = hose->index ? TSUNAMI_pchip1 : TSUNAMI_pchip0; | ||
178 | volatile unsigned long *csr; | ||
179 | unsigned long value; | ||
180 | |||
181 | /* We can invalidate up to 8 tlb entries in a go. The flush | ||
182 | matches against <31:16> in the pci address. */ | ||
183 | csr = &pchip->tlbia.csr; | ||
184 | if (((start ^ end) & 0xffff0000) == 0) | ||
185 | csr = &pchip->tlbiv.csr; | ||
186 | |||
187 | /* For TBIA, it doesn't matter what value we write. For TBI, | ||
188 | it's the shifted tag bits. */ | ||
189 | value = (start & 0xffff0000) >> 12; | ||
190 | |||
191 | *csr = value; | ||
192 | mb(); | ||
193 | *csr; | ||
194 | } | ||
195 | |||
196 | #ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI | ||
197 | static long __init | ||
198 | tsunami_probe_read(volatile unsigned long *vaddr) | ||
199 | { | ||
200 | long dont_care, probe_result; | ||
201 | int cpu = smp_processor_id(); | ||
202 | int s = swpipl(IPL_MCHECK - 1); | ||
203 | |||
204 | mcheck_taken(cpu) = 0; | ||
205 | mcheck_expected(cpu) = 1; | ||
206 | mb(); | ||
207 | dont_care = *vaddr; | ||
208 | draina(); | ||
209 | mcheck_expected(cpu) = 0; | ||
210 | probe_result = !mcheck_taken(cpu); | ||
211 | mcheck_taken(cpu) = 0; | ||
212 | setipl(s); | ||
213 | |||
214 | printk("dont_care == 0x%lx\n", dont_care); | ||
215 | |||
216 | return probe_result; | ||
217 | } | ||
218 | |||
219 | static long __init | ||
220 | tsunami_probe_write(volatile unsigned long *vaddr) | ||
221 | { | ||
222 | long true_contents, probe_result = 1; | ||
223 | |||
224 | TSUNAMI_cchip->misc.csr |= (1L << 28); /* clear NXM... */ | ||
225 | true_contents = *vaddr; | ||
226 | *vaddr = 0; | ||
227 | draina(); | ||
228 | if (TSUNAMI_cchip->misc.csr & (1L << 28)) { | ||
229 | int source = (TSUNAMI_cchip->misc.csr >> 29) & 7; | ||
230 | TSUNAMI_cchip->misc.csr |= (1L << 28); /* ...and unlock NXS. */ | ||
231 | probe_result = 0; | ||
232 | printk("tsunami_probe_write: unit %d at 0x%016lx\n", source, | ||
233 | (unsigned long)vaddr); | ||
234 | } | ||
235 | if (probe_result) | ||
236 | *vaddr = true_contents; | ||
237 | return probe_result; | ||
238 | } | ||
239 | #else | ||
240 | #define tsunami_probe_read(ADDR) 1 | ||
241 | #endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */ | ||
242 | |||
243 | #define FN __FUNCTION__ | ||
244 | |||
245 | static void __init | ||
246 | tsunami_init_one_pchip(tsunami_pchip *pchip, int index) | ||
247 | { | ||
248 | struct pci_controller *hose; | ||
249 | |||
250 | if (tsunami_probe_read(&pchip->pctl.csr) == 0) | ||
251 | return; | ||
252 | |||
253 | hose = alloc_pci_controller(); | ||
254 | if (index == 0) | ||
255 | pci_isa_hose = hose; | ||
256 | hose->io_space = alloc_resource(); | ||
257 | hose->mem_space = alloc_resource(); | ||
258 | |||
259 | /* This is for userland consumption. For some reason, the 40-bit | ||
260 | PIO bias that we use in the kernel through KSEG didn't work for | ||
261 | the page table based user mappings. So make sure we get the | ||
262 | 43-bit PIO bias. */ | ||
263 | hose->sparse_mem_base = 0; | ||
264 | hose->sparse_io_base = 0; | ||
265 | hose->dense_mem_base | ||
266 | = (TSUNAMI_MEM(index) & 0xffffffffffL) | 0x80000000000L; | ||
267 | hose->dense_io_base | ||
268 | = (TSUNAMI_IO(index) & 0xffffffffffL) | 0x80000000000L; | ||
269 | |||
270 | hose->config_space_base = TSUNAMI_CONF(index); | ||
271 | hose->index = index; | ||
272 | |||
273 | hose->io_space->start = TSUNAMI_IO(index) - TSUNAMI_IO_BIAS; | ||
274 | hose->io_space->end = hose->io_space->start + TSUNAMI_IO_SPACE - 1; | ||
275 | hose->io_space->name = pci_io_names[index]; | ||
276 | hose->io_space->flags = IORESOURCE_IO; | ||
277 | |||
278 | hose->mem_space->start = TSUNAMI_MEM(index) - TSUNAMI_MEM_BIAS; | ||
279 | hose->mem_space->end = hose->mem_space->start + 0xffffffff; | ||
280 | hose->mem_space->name = pci_mem_names[index]; | ||
281 | hose->mem_space->flags = IORESOURCE_MEM; | ||
282 | |||
283 | if (request_resource(&ioport_resource, hose->io_space) < 0) | ||
284 | printk(KERN_ERR "Failed to request IO on hose %d\n", index); | ||
285 | if (request_resource(&iomem_resource, hose->mem_space) < 0) | ||
286 | printk(KERN_ERR "Failed to request MEM on hose %d\n", index); | ||
287 | |||
288 | /* | ||
289 | * Save the existing PCI window translations. SRM will | ||
290 | * need them when we go to reboot. | ||
291 | */ | ||
292 | |||
293 | saved_config[index].wsba[0] = pchip->wsba[0].csr; | ||
294 | saved_config[index].wsm[0] = pchip->wsm[0].csr; | ||
295 | saved_config[index].tba[0] = pchip->tba[0].csr; | ||
296 | |||
297 | saved_config[index].wsba[1] = pchip->wsba[1].csr; | ||
298 | saved_config[index].wsm[1] = pchip->wsm[1].csr; | ||
299 | saved_config[index].tba[1] = pchip->tba[1].csr; | ||
300 | |||
301 | saved_config[index].wsba[2] = pchip->wsba[2].csr; | ||
302 | saved_config[index].wsm[2] = pchip->wsm[2].csr; | ||
303 | saved_config[index].tba[2] = pchip->tba[2].csr; | ||
304 | |||
305 | saved_config[index].wsba[3] = pchip->wsba[3].csr; | ||
306 | saved_config[index].wsm[3] = pchip->wsm[3].csr; | ||
307 | saved_config[index].tba[3] = pchip->tba[3].csr; | ||
308 | |||
309 | /* | ||
310 | * Set up the PCI to main memory translation windows. | ||
311 | * | ||
312 | * Note: Window 3 is scatter-gather only | ||
313 | * | ||
314 | * Window 0 is scatter-gather 8MB at 8MB (for isa) | ||
315 | * Window 1 is scatter-gather (up to) 1GB at 1GB | ||
316 | * Window 2 is direct access 2GB at 2GB | ||
317 | * | ||
318 | * NOTE: we need the align_entry settings for Acer devices on ES40, | ||
319 | * specifically floppy and IDE when memory is larger than 2GB. | ||
320 | */ | ||
321 | hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); | ||
322 | /* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */ | ||
323 | hose->sg_isa->align_entry = 4; | ||
324 | |||
325 | hose->sg_pci = iommu_arena_new(hose, 0x40000000, | ||
326 | size_for_memory(0x40000000), 0); | ||
327 | hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */ | ||
328 | |||
329 | __direct_map_base = 0x80000000; | ||
330 | __direct_map_size = 0x80000000; | ||
331 | |||
332 | pchip->wsba[0].csr = hose->sg_isa->dma_base | 3; | ||
333 | pchip->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000; | ||
334 | pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); | ||
335 | |||
336 | pchip->wsba[1].csr = hose->sg_pci->dma_base | 3; | ||
337 | pchip->wsm[1].csr = (hose->sg_pci->size - 1) & 0xfff00000; | ||
338 | pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes); | ||
339 | |||
340 | pchip->wsba[2].csr = 0x80000000 | 1; | ||
341 | pchip->wsm[2].csr = (0x80000000 - 1) & 0xfff00000; | ||
342 | pchip->tba[2].csr = 0; | ||
343 | |||
344 | pchip->wsba[3].csr = 0; | ||
345 | |||
346 | /* Enable the Monster Window to make DAC pci64 possible. */ | ||
347 | pchip->pctl.csr |= pctl_m_mwin; | ||
348 | |||
349 | tsunami_pci_tbi(hose, 0, -1); | ||
350 | } | ||
351 | |||
352 | void __init | ||
353 | tsunami_init_arch(void) | ||
354 | { | ||
355 | #ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI | ||
356 | unsigned long tmp; | ||
357 | |||
358 | /* Ho hum.. init_arch is called before init_IRQ, but we need to be | ||
359 | able to handle machine checks. So install the handler now. */ | ||
360 | wrent(entInt, 0); | ||
361 | |||
362 | /* NXMs just don't matter to Tsunami--unless they make it | ||
363 | choke completely. */ | ||
364 | tmp = (unsigned long)(TSUNAMI_cchip - 1); | ||
365 | printk("%s: probing bogus address: 0x%016lx\n", FN, bogus_addr); | ||
366 | printk("\tprobe %s\n", | ||
367 | tsunami_probe_write((unsigned long *)bogus_addr) | ||
368 | ? "succeeded" : "failed"); | ||
369 | #endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */ | ||
370 | |||
371 | #if 0 | ||
372 | printk("%s: CChip registers:\n", FN); | ||
373 | printk("%s: CSR_CSC 0x%lx\n", FN, TSUNAMI_cchip->csc.csr); | ||
374 | printk("%s: CSR_MTR 0x%lx\n", FN, TSUNAMI_cchip.mtr.csr); | ||
375 | printk("%s: CSR_MISC 0x%lx\n", FN, TSUNAMI_cchip->misc.csr); | ||
376 | printk("%s: CSR_DIM0 0x%lx\n", FN, TSUNAMI_cchip->dim0.csr); | ||
377 | printk("%s: CSR_DIM1 0x%lx\n", FN, TSUNAMI_cchip->dim1.csr); | ||
378 | printk("%s: CSR_DIR0 0x%lx\n", FN, TSUNAMI_cchip->dir0.csr); | ||
379 | printk("%s: CSR_DIR1 0x%lx\n", FN, TSUNAMI_cchip->dir1.csr); | ||
380 | printk("%s: CSR_DRIR 0x%lx\n", FN, TSUNAMI_cchip->drir.csr); | ||
381 | |||
382 | printk("%s: DChip registers:\n"); | ||
383 | printk("%s: CSR_DSC 0x%lx\n", FN, TSUNAMI_dchip->dsc.csr); | ||
384 | printk("%s: CSR_STR 0x%lx\n", FN, TSUNAMI_dchip->str.csr); | ||
385 | printk("%s: CSR_DREV 0x%lx\n", FN, TSUNAMI_dchip->drev.csr); | ||
386 | #endif | ||
387 | /* With multiple PCI busses, we play with I/O as physical addrs. */ | ||
388 | ioport_resource.end = ~0UL; | ||
389 | |||
390 | /* Find how many hoses we have, and initialize them. TSUNAMI | ||
391 | and TYPHOON can have 2, but might only have 1 (DS10). */ | ||
392 | |||
393 | tsunami_init_one_pchip(TSUNAMI_pchip0, 0); | ||
394 | if (TSUNAMI_cchip->csc.csr & 1L<<14) | ||
395 | tsunami_init_one_pchip(TSUNAMI_pchip1, 1); | ||
396 | } | ||
397 | |||
398 | static void | ||
399 | tsunami_kill_one_pchip(tsunami_pchip *pchip, int index) | ||
400 | { | ||
401 | pchip->wsba[0].csr = saved_config[index].wsba[0]; | ||
402 | pchip->wsm[0].csr = saved_config[index].wsm[0]; | ||
403 | pchip->tba[0].csr = saved_config[index].tba[0]; | ||
404 | |||
405 | pchip->wsba[1].csr = saved_config[index].wsba[1]; | ||
406 | pchip->wsm[1].csr = saved_config[index].wsm[1]; | ||
407 | pchip->tba[1].csr = saved_config[index].tba[1]; | ||
408 | |||
409 | pchip->wsba[2].csr = saved_config[index].wsba[2]; | ||
410 | pchip->wsm[2].csr = saved_config[index].wsm[2]; | ||
411 | pchip->tba[2].csr = saved_config[index].tba[2]; | ||
412 | |||
413 | pchip->wsba[3].csr = saved_config[index].wsba[3]; | ||
414 | pchip->wsm[3].csr = saved_config[index].wsm[3]; | ||
415 | pchip->tba[3].csr = saved_config[index].tba[3]; | ||
416 | } | ||
417 | |||
418 | void | ||
419 | tsunami_kill_arch(int mode) | ||
420 | { | ||
421 | tsunami_kill_one_pchip(TSUNAMI_pchip0, 0); | ||
422 | if (TSUNAMI_cchip->csc.csr & 1L<<14) | ||
423 | tsunami_kill_one_pchip(TSUNAMI_pchip1, 1); | ||
424 | } | ||
425 | |||
426 | static inline void | ||
427 | tsunami_pci_clr_err_1(tsunami_pchip *pchip) | ||
428 | { | ||
429 | pchip->perror.csr; | ||
430 | pchip->perror.csr = 0x040; | ||
431 | mb(); | ||
432 | pchip->perror.csr; | ||
433 | } | ||
434 | |||
435 | static inline void | ||
436 | tsunami_pci_clr_err(void) | ||
437 | { | ||
438 | tsunami_pci_clr_err_1(TSUNAMI_pchip0); | ||
439 | |||
440 | /* TSUNAMI and TYPHOON can have 2, but might only have 1 (DS10) */ | ||
441 | if (TSUNAMI_cchip->csc.csr & 1L<<14) | ||
442 | tsunami_pci_clr_err_1(TSUNAMI_pchip1); | ||
443 | } | ||
444 | |||
445 | void | ||
446 | tsunami_machine_check(unsigned long vector, unsigned long la_ptr, | ||
447 | struct pt_regs * regs) | ||
448 | { | ||
449 | /* Clear error before any reporting. */ | ||
450 | mb(); | ||
451 | mb(); /* magic */ | ||
452 | draina(); | ||
453 | tsunami_pci_clr_err(); | ||
454 | wrmces(0x7); | ||
455 | mb(); | ||
456 | |||
457 | process_mcheck_info(vector, la_ptr, regs, "TSUNAMI", | ||
458 | mcheck_expected(smp_processor_id())); | ||
459 | } | ||
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c new file mode 100644 index 000000000000..2b767a1bad96 --- /dev/null +++ b/arch/alpha/kernel/core_wildfire.c | |||
@@ -0,0 +1,658 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/core_wildfire.c | ||
3 | * | ||
4 | * Wildfire support. | ||
5 | * | ||
6 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | ||
7 | */ | ||
8 | |||
9 | #define __EXTERN_INLINE inline | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/core_wildfire.h> | ||
12 | #undef __EXTERN_INLINE | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <asm/ptrace.h> | ||
20 | #include <asm/smp.h> | ||
21 | |||
22 | #include "proto.h" | ||
23 | #include "pci_impl.h" | ||
24 | |||
25 | #define DEBUG_CONFIG 0 | ||
26 | #define DEBUG_DUMP_REGS 0 | ||
27 | #define DEBUG_DUMP_CONFIG 1 | ||
28 | |||
29 | #if DEBUG_CONFIG | ||
30 | # define DBG_CFG(args) printk args | ||
31 | #else | ||
32 | # define DBG_CFG(args) | ||
33 | #endif | ||
34 | |||
35 | #if DEBUG_DUMP_REGS | ||
36 | static void wildfire_dump_pci_regs(int qbbno, int hoseno); | ||
37 | static void wildfire_dump_pca_regs(int qbbno, int pcano); | ||
38 | static void wildfire_dump_qsa_regs(int qbbno); | ||
39 | static void wildfire_dump_qsd_regs(int qbbno); | ||
40 | static void wildfire_dump_iop_regs(int qbbno); | ||
41 | static void wildfire_dump_gp_regs(int qbbno); | ||
42 | #endif | ||
43 | #if DEBUG_DUMP_CONFIG | ||
44 | static void wildfire_dump_hardware_config(void); | ||
45 | #endif | ||
46 | |||
47 | unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB]; | ||
48 | unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB]; | ||
49 | #define QBB_MAP_EMPTY 0xff | ||
50 | |||
51 | unsigned long wildfire_hard_qbb_mask; | ||
52 | unsigned long wildfire_soft_qbb_mask; | ||
53 | unsigned long wildfire_gp_mask; | ||
54 | unsigned long wildfire_hs_mask; | ||
55 | unsigned long wildfire_iop_mask; | ||
56 | unsigned long wildfire_ior_mask; | ||
57 | unsigned long wildfire_pca_mask; | ||
58 | unsigned long wildfire_cpu_mask; | ||
59 | unsigned long wildfire_mem_mask; | ||
60 | |||
61 | void __init | ||
62 | wildfire_init_hose(int qbbno, int hoseno) | ||
63 | { | ||
64 | struct pci_controller *hose; | ||
65 | wildfire_pci *pci; | ||
66 | |||
67 | hose = alloc_pci_controller(); | ||
68 | hose->io_space = alloc_resource(); | ||
69 | hose->mem_space = alloc_resource(); | ||
70 | |||
71 | /* This is for userland consumption. */ | ||
72 | hose->sparse_mem_base = 0; | ||
73 | hose->sparse_io_base = 0; | ||
74 | hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno); | ||
75 | hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno); | ||
76 | |||
77 | hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno); | ||
78 | hose->index = (qbbno << 3) + hoseno; | ||
79 | |||
80 | hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS; | ||
81 | hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1; | ||
82 | hose->io_space->name = pci_io_names[hoseno]; | ||
83 | hose->io_space->flags = IORESOURCE_IO; | ||
84 | |||
85 | hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS; | ||
86 | hose->mem_space->end = hose->mem_space->start + 0xffffffff; | ||
87 | hose->mem_space->name = pci_mem_names[hoseno]; | ||
88 | hose->mem_space->flags = IORESOURCE_MEM; | ||
89 | |||
90 | if (request_resource(&ioport_resource, hose->io_space) < 0) | ||
91 | printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n", | ||
92 | qbbno, hoseno); | ||
93 | if (request_resource(&iomem_resource, hose->mem_space) < 0) | ||
94 | printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n", | ||
95 | qbbno, hoseno); | ||
96 | |||
97 | #if DEBUG_DUMP_REGS | ||
98 | wildfire_dump_pci_regs(qbbno, hoseno); | ||
99 | #endif | ||
100 | |||
101 | /* | ||
102 | * Set up the PCI to main memory translation windows. | ||
103 | * | ||
104 | * Note: Window 3 is scatter-gather only | ||
105 | * | ||
106 | * Window 0 is scatter-gather 8MB at 8MB (for isa) | ||
107 | * Window 1 is direct access 1GB at 1GB | ||
108 | * Window 2 is direct access 1GB at 2GB | ||
109 | * Window 3 is scatter-gather 128MB at 3GB | ||
110 | * ??? We ought to scale window 3 memory. | ||
111 | * | ||
112 | */ | ||
113 | hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); | ||
114 | hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0); | ||
115 | |||
116 | pci = WILDFIRE_pci(qbbno, hoseno); | ||
117 | |||
118 | pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3; | ||
119 | pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000; | ||
120 | pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes); | ||
121 | |||
122 | pci->pci_window[1].wbase.csr = 0x40000000 | 1; | ||
123 | pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000; | ||
124 | pci->pci_window[1].tbase.csr = 0; | ||
125 | |||
126 | pci->pci_window[2].wbase.csr = 0x80000000 | 1; | ||
127 | pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000; | ||
128 | pci->pci_window[2].tbase.csr = 0x40000000; | ||
129 | |||
130 | pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3; | ||
131 | pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000; | ||
132 | pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes); | ||
133 | |||
134 | wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */ | ||
135 | } | ||
136 | |||
137 | void __init | ||
138 | wildfire_init_pca(int qbbno, int pcano) | ||
139 | { | ||
140 | |||
141 | /* Test for PCA existence first. */ | ||
142 | if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) | ||
143 | return; | ||
144 | |||
145 | #if DEBUG_DUMP_REGS | ||
146 | wildfire_dump_pca_regs(qbbno, pcano); | ||
147 | #endif | ||
148 | |||
149 | /* Do both hoses of the PCA. */ | ||
150 | wildfire_init_hose(qbbno, (pcano << 1) + 0); | ||
151 | wildfire_init_hose(qbbno, (pcano << 1) + 1); | ||
152 | } | ||
153 | |||
154 | void __init | ||
155 | wildfire_init_qbb(int qbbno) | ||
156 | { | ||
157 | int pcano; | ||
158 | |||
159 | /* Test for QBB existence first. */ | ||
160 | if (!WILDFIRE_QBB_EXISTS(qbbno)) | ||
161 | return; | ||
162 | |||
163 | #if DEBUG_DUMP_REGS | ||
164 | wildfire_dump_qsa_regs(qbbno); | ||
165 | wildfire_dump_qsd_regs(qbbno); | ||
166 | wildfire_dump_iop_regs(qbbno); | ||
167 | wildfire_dump_gp_regs(qbbno); | ||
168 | #endif | ||
169 | |||
170 | /* Init all PCAs here. */ | ||
171 | for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) { | ||
172 | wildfire_init_pca(qbbno, pcano); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | void __init | ||
177 | wildfire_hardware_probe(void) | ||
178 | { | ||
179 | unsigned long temp; | ||
180 | unsigned int hard_qbb, soft_qbb; | ||
181 | wildfire_fast_qsd *fast = WILDFIRE_fast_qsd(); | ||
182 | wildfire_qsd *qsd; | ||
183 | wildfire_qsa *qsa; | ||
184 | wildfire_iop *iop; | ||
185 | wildfire_gp *gp; | ||
186 | wildfire_ne *ne; | ||
187 | wildfire_fe *fe; | ||
188 | int i; | ||
189 | |||
190 | temp = fast->qsd_whami.csr; | ||
191 | #if 0 | ||
192 | printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp); | ||
193 | #endif | ||
194 | |||
195 | hard_qbb = (temp >> 8) & 7; | ||
196 | soft_qbb = (temp >> 4) & 7; | ||
197 | |||
198 | /* Init the HW configuration variables. */ | ||
199 | wildfire_hard_qbb_mask = (1 << hard_qbb); | ||
200 | wildfire_soft_qbb_mask = (1 << soft_qbb); | ||
201 | |||
202 | wildfire_gp_mask = 0; | ||
203 | wildfire_hs_mask = 0; | ||
204 | wildfire_iop_mask = 0; | ||
205 | wildfire_ior_mask = 0; | ||
206 | wildfire_pca_mask = 0; | ||
207 | |||
208 | wildfire_cpu_mask = 0; | ||
209 | wildfire_mem_mask = 0; | ||
210 | |||
211 | memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); | ||
212 | memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); | ||
213 | |||
214 | /* First, determine which QBBs are present. */ | ||
215 | qsa = WILDFIRE_qsa(soft_qbb); | ||
216 | |||
217 | temp = qsa->qsa_qbb_id.csr; | ||
218 | #if 0 | ||
219 | printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp); | ||
220 | #endif | ||
221 | |||
222 | if (temp & 0x40) /* Is there an HS? */ | ||
223 | wildfire_hs_mask = 1; | ||
224 | |||
225 | if (temp & 0x20) { /* Is there a GP? */ | ||
226 | gp = WILDFIRE_gp(soft_qbb); | ||
227 | temp = 0; | ||
228 | for (i = 0; i < 4; i++) { | ||
229 | temp |= gp->gpa_qbb_map[i].csr << (i * 8); | ||
230 | #if 0 | ||
231 | printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n", | ||
232 | i, gp, temp); | ||
233 | #endif | ||
234 | } | ||
235 | |||
236 | for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) { | ||
237 | if (temp & 8) { /* Is there a QBB? */ | ||
238 | soft_qbb = temp & 7; | ||
239 | wildfire_hard_qbb_mask |= (1 << hard_qbb); | ||
240 | wildfire_soft_qbb_mask |= (1 << soft_qbb); | ||
241 | } | ||
242 | temp >>= 4; | ||
243 | } | ||
244 | wildfire_gp_mask = wildfire_soft_qbb_mask; | ||
245 | } | ||
246 | |||
247 | /* Next determine each QBBs resources. */ | ||
248 | for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) { | ||
249 | if (WILDFIRE_QBB_EXISTS(soft_qbb)) { | ||
250 | qsd = WILDFIRE_qsd(soft_qbb); | ||
251 | temp = qsd->qsd_whami.csr; | ||
252 | #if 0 | ||
253 | printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp); | ||
254 | #endif | ||
255 | hard_qbb = (temp >> 8) & 7; | ||
256 | wildfire_hard_qbb_map[hard_qbb] = soft_qbb; | ||
257 | wildfire_soft_qbb_map[soft_qbb] = hard_qbb; | ||
258 | |||
259 | qsa = WILDFIRE_qsa(soft_qbb); | ||
260 | temp = qsa->qsa_qbb_pop[0].csr; | ||
261 | #if 0 | ||
262 | printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp); | ||
263 | #endif | ||
264 | wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2); | ||
265 | wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); | ||
266 | |||
267 | temp = qsa->qsa_qbb_pop[1].csr; | ||
268 | #if 0 | ||
269 | printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp); | ||
270 | #endif | ||
271 | wildfire_iop_mask |= (1 << soft_qbb); | ||
272 | wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); | ||
273 | |||
274 | temp = qsa->qsa_qbb_id.csr; | ||
275 | #if 0 | ||
276 | printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp); | ||
277 | #endif | ||
278 | if (temp & 0x20) | ||
279 | wildfire_gp_mask |= (1 << soft_qbb); | ||
280 | |||
281 | /* Probe for PCA existence here. */ | ||
282 | for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) { | ||
283 | iop = WILDFIRE_iop(soft_qbb); | ||
284 | ne = WILDFIRE_ne(soft_qbb, i); | ||
285 | fe = WILDFIRE_fe(soft_qbb, i); | ||
286 | |||
287 | if ((iop->iop_hose[i].init.csr & 1) == 1 && | ||
288 | ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) && | ||
289 | ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL)) | ||
290 | { | ||
291 | wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i); | ||
292 | } | ||
293 | } | ||
294 | |||
295 | } | ||
296 | } | ||
297 | #if DEBUG_DUMP_CONFIG | ||
298 | wildfire_dump_hardware_config(); | ||
299 | #endif | ||
300 | } | ||
301 | |||
302 | void __init | ||
303 | wildfire_init_arch(void) | ||
304 | { | ||
305 | int qbbno; | ||
306 | |||
307 | /* With multiple PCI buses, we play with I/O as physical addrs. */ | ||
308 | ioport_resource.end = ~0UL; | ||
309 | |||
310 | |||
311 | /* Probe the hardware for info about configuration. */ | ||
312 | wildfire_hardware_probe(); | ||
313 | |||
314 | /* Now init all the found QBBs. */ | ||
315 | for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) { | ||
316 | wildfire_init_qbb(qbbno); | ||
317 | } | ||
318 | |||
319 | /* Normal direct PCI DMA mapping. */ | ||
320 | __direct_map_base = 0x40000000UL; | ||
321 | __direct_map_size = 0x80000000UL; | ||
322 | } | ||
323 | |||
324 | void | ||
325 | wildfire_machine_check(unsigned long vector, unsigned long la_ptr, | ||
326 | struct pt_regs * regs) | ||
327 | { | ||
328 | mb(); | ||
329 | mb(); /* magic */ | ||
330 | draina(); | ||
331 | /* FIXME: clear pci errors */ | ||
332 | wrmces(0x7); | ||
333 | mb(); | ||
334 | |||
335 | process_mcheck_info(vector, la_ptr, regs, "WILDFIRE", | ||
336 | mcheck_expected(smp_processor_id())); | ||
337 | } | ||
338 | |||
339 | void | ||
340 | wildfire_kill_arch(int mode) | ||
341 | { | ||
342 | } | ||
343 | |||
344 | void | ||
345 | wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) | ||
346 | { | ||
347 | int qbbno = hose->index >> 3; | ||
348 | int hoseno = hose->index & 7; | ||
349 | wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); | ||
350 | |||
351 | mb(); | ||
352 | pci->pci_flush_tlb.csr; /* reading does the trick */ | ||
353 | } | ||
354 | |||
355 | static int | ||
356 | mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, | ||
357 | unsigned long *pci_addr, unsigned char *type1) | ||
358 | { | ||
359 | struct pci_controller *hose = pbus->sysdata; | ||
360 | unsigned long addr; | ||
361 | u8 bus = pbus->number; | ||
362 | |||
363 | DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " | ||
364 | "pci_addr=0x%p, type1=0x%p)\n", | ||
365 | bus, device_fn, where, pci_addr, type1)); | ||
366 | |||
367 | if (!pbus->parent) /* No parent means peer PCI bus. */ | ||
368 | bus = 0; | ||
369 | *type1 = (bus != 0); | ||
370 | |||
371 | addr = (bus << 16) | (device_fn << 8) | where; | ||
372 | addr |= hose->config_space_base; | ||
373 | |||
374 | *pci_addr = addr; | ||
375 | DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static int | ||
380 | wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
381 | int size, u32 *value) | ||
382 | { | ||
383 | unsigned long addr; | ||
384 | unsigned char type1; | ||
385 | |||
386 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
387 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
388 | |||
389 | switch (size) { | ||
390 | case 1: | ||
391 | *value = __kernel_ldbu(*(vucp)addr); | ||
392 | break; | ||
393 | case 2: | ||
394 | *value = __kernel_ldwu(*(vusp)addr); | ||
395 | break; | ||
396 | case 4: | ||
397 | *value = *(vuip)addr; | ||
398 | break; | ||
399 | } | ||
400 | |||
401 | return PCIBIOS_SUCCESSFUL; | ||
402 | } | ||
403 | |||
404 | static int | ||
405 | wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
406 | int size, u32 value) | ||
407 | { | ||
408 | unsigned long addr; | ||
409 | unsigned char type1; | ||
410 | |||
411 | if (mk_conf_addr(bus, devfn, where, &addr, &type1)) | ||
412 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
413 | |||
414 | switch (size) { | ||
415 | case 1: | ||
416 | __kernel_stb(value, *(vucp)addr); | ||
417 | mb(); | ||
418 | __kernel_ldbu(*(vucp)addr); | ||
419 | break; | ||
420 | case 2: | ||
421 | __kernel_stw(value, *(vusp)addr); | ||
422 | mb(); | ||
423 | __kernel_ldwu(*(vusp)addr); | ||
424 | break; | ||
425 | case 4: | ||
426 | *(vuip)addr = value; | ||
427 | mb(); | ||
428 | *(vuip)addr; | ||
429 | break; | ||
430 | } | ||
431 | |||
432 | return PCIBIOS_SUCCESSFUL; | ||
433 | } | ||
434 | |||
435 | struct pci_ops wildfire_pci_ops = | ||
436 | { | ||
437 | .read = wildfire_read_config, | ||
438 | .write = wildfire_write_config, | ||
439 | }; | ||
440 | |||
441 | |||
442 | /* | ||
443 | * NUMA Support | ||
444 | */ | ||
445 | int wildfire_pa_to_nid(unsigned long pa) | ||
446 | { | ||
447 | return pa >> 36; | ||
448 | } | ||
449 | |||
450 | int wildfire_cpuid_to_nid(int cpuid) | ||
451 | { | ||
452 | /* assume 4 CPUs per node */ | ||
453 | return cpuid >> 2; | ||
454 | } | ||
455 | |||
456 | unsigned long wildfire_node_mem_start(int nid) | ||
457 | { | ||
458 | /* 64GB per node */ | ||
459 | return (unsigned long)nid * (64UL * 1024 * 1024 * 1024); | ||
460 | } | ||
461 | |||
462 | unsigned long wildfire_node_mem_size(int nid) | ||
463 | { | ||
464 | /* 64GB per node */ | ||
465 | return 64UL * 1024 * 1024 * 1024; | ||
466 | } | ||
467 | |||
468 | #if DEBUG_DUMP_REGS | ||
469 | |||
470 | static void __init | ||
471 | wildfire_dump_pci_regs(int qbbno, int hoseno) | ||
472 | { | ||
473 | wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); | ||
474 | int i; | ||
475 | |||
476 | printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n", | ||
477 | qbbno, hoseno, pci); | ||
478 | |||
479 | printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n", | ||
480 | pci->pci_io_addr_ext.csr); | ||
481 | printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr); | ||
482 | printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr); | ||
483 | printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr); | ||
484 | printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr); | ||
485 | printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr); | ||
486 | printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr); | ||
487 | |||
488 | printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n", | ||
489 | qbbno, hoseno, pci); | ||
490 | for (i = 0; i < 4; i++) { | ||
491 | printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i, | ||
492 | pci->pci_window[i].wbase.csr, | ||
493 | pci->pci_window[i].wmask.csr, | ||
494 | pci->pci_window[i].tbase.csr); | ||
495 | } | ||
496 | printk(KERN_ERR "\n"); | ||
497 | } | ||
498 | |||
499 | static void __init | ||
500 | wildfire_dump_pca_regs(int qbbno, int pcano) | ||
501 | { | ||
502 | wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano); | ||
503 | int i; | ||
504 | |||
505 | printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n", | ||
506 | qbbno, pcano, pca); | ||
507 | |||
508 | printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr); | ||
509 | printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr); | ||
510 | printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr); | ||
511 | printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr); | ||
512 | printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n", | ||
513 | pca->pca_stdio_edge_level.csr); | ||
514 | |||
515 | printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n", | ||
516 | qbbno, pcano, pca); | ||
517 | for (i = 0; i < 4; i++) { | ||
518 | printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i, | ||
519 | pca->pca_int[i].target.csr, | ||
520 | pca->pca_int[i].enable.csr); | ||
521 | } | ||
522 | |||
523 | printk(KERN_ERR "\n"); | ||
524 | } | ||
525 | |||
526 | static void __init | ||
527 | wildfire_dump_qsa_regs(int qbbno) | ||
528 | { | ||
529 | wildfire_qsa *qsa = WILDFIRE_qsa(qbbno); | ||
530 | int i; | ||
531 | |||
532 | printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa); | ||
533 | |||
534 | printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr); | ||
535 | printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr); | ||
536 | printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr); | ||
537 | |||
538 | for (i = 0; i < 5; i++) | ||
539 | printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n", | ||
540 | i, qsa->qsa_config[i].csr); | ||
541 | |||
542 | for (i = 0; i < 2; i++) | ||
543 | printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n", | ||
544 | i, qsa->qsa_qbb_pop[0].csr); | ||
545 | |||
546 | printk(KERN_ERR "\n"); | ||
547 | } | ||
548 | |||
549 | static void __init | ||
550 | wildfire_dump_qsd_regs(int qbbno) | ||
551 | { | ||
552 | wildfire_qsd *qsd = WILDFIRE_qsd(qbbno); | ||
553 | |||
554 | printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd); | ||
555 | |||
556 | printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr); | ||
557 | printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr); | ||
558 | printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n", | ||
559 | qsd->qsd_port_present.csr); | ||
560 | printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n", | ||
561 | qsd->qsd_port_active.csr); | ||
562 | printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n", | ||
563 | qsd->qsd_fault_ena.csr); | ||
564 | printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n", | ||
565 | qsd->qsd_cpu_int_ena.csr); | ||
566 | printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n", | ||
567 | qsd->qsd_mem_config.csr); | ||
568 | printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n", | ||
569 | qsd->qsd_err_sum.csr); | ||
570 | |||
571 | printk(KERN_ERR "\n"); | ||
572 | } | ||
573 | |||
574 | static void __init | ||
575 | wildfire_dump_iop_regs(int qbbno) | ||
576 | { | ||
577 | wildfire_iop *iop = WILDFIRE_iop(qbbno); | ||
578 | int i; | ||
579 | |||
580 | printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop); | ||
581 | |||
582 | printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr); | ||
583 | printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr); | ||
584 | printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n", | ||
585 | iop->iop_switch_credits.csr); | ||
586 | printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n", | ||
587 | iop->iop_hose_credits.csr); | ||
588 | |||
589 | for (i = 0; i < 4; i++) | ||
590 | printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n", | ||
591 | i, iop->iop_hose[i].init.csr); | ||
592 | for (i = 0; i < 4; i++) | ||
593 | printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n", | ||
594 | i, iop->iop_dev_int[i].target.csr); | ||
595 | |||
596 | printk(KERN_ERR "\n"); | ||
597 | } | ||
598 | |||
599 | static void __init | ||
600 | wildfire_dump_gp_regs(int qbbno) | ||
601 | { | ||
602 | wildfire_gp *gp = WILDFIRE_gp(qbbno); | ||
603 | int i; | ||
604 | |||
605 | printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp); | ||
606 | for (i = 0; i < 4; i++) | ||
607 | printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n", | ||
608 | i, gp->gpa_qbb_map[i].csr); | ||
609 | |||
610 | printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n", | ||
611 | gp->gpa_mem_pop_map.csr); | ||
612 | printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr); | ||
613 | printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr); | ||
614 | printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr); | ||
615 | printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr); | ||
616 | printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr); | ||
617 | |||
618 | printk(KERN_ERR "\n"); | ||
619 | } | ||
620 | #endif /* DUMP_REGS */ | ||
621 | |||
622 | #if DEBUG_DUMP_CONFIG | ||
623 | static void __init | ||
624 | wildfire_dump_hardware_config(void) | ||
625 | { | ||
626 | int i; | ||
627 | |||
628 | printk(KERN_ERR "Probed Hardware Configuration\n"); | ||
629 | |||
630 | printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask); | ||
631 | printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask); | ||
632 | |||
633 | printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask); | ||
634 | printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask); | ||
635 | printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask); | ||
636 | printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask); | ||
637 | printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask); | ||
638 | |||
639 | printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask); | ||
640 | printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask); | ||
641 | |||
642 | printk(" hard_qbb_map: "); | ||
643 | for (i = 0; i < WILDFIRE_MAX_QBB; i++) | ||
644 | if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY) | ||
645 | printk("--- "); | ||
646 | else | ||
647 | printk("%3d ", wildfire_hard_qbb_map[i]); | ||
648 | printk("\n"); | ||
649 | |||
650 | printk(" soft_qbb_map: "); | ||
651 | for (i = 0; i < WILDFIRE_MAX_QBB; i++) | ||
652 | if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY) | ||
653 | printk("--- "); | ||
654 | else | ||
655 | printk("%3d ", wildfire_soft_qbb_map[i]); | ||
656 | printk("\n"); | ||
657 | } | ||
658 | #endif /* DUMP_CONFIG */ | ||
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S new file mode 100644 index 000000000000..f0927ee53f29 --- /dev/null +++ b/arch/alpha/kernel/entry.S | |||
@@ -0,0 +1,957 @@ | |||
1 | /* | ||
2 | * arch/alpha/kernel/entry.S | ||
3 | * | ||
4 | * Kernel entry-points. | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <asm/asm_offsets.h> | ||
9 | #include <asm/thread_info.h> | ||
10 | #include <asm/pal.h> | ||
11 | #include <asm/errno.h> | ||
12 | #include <asm/unistd.h> | ||
13 | |||
14 | .text | ||
15 | .set noat | ||
16 | |||
17 | /* Stack offsets. */ | ||
18 | #define SP_OFF 184 | ||
19 | #define SWITCH_STACK_SIZE 320 | ||
20 | |||
21 | /* | ||
22 | * This defines the normal kernel pt-regs layout. | ||
23 | * | ||
24 | * regs 9-15 preserved by C code | ||
25 | * regs 16-18 saved by PAL-code | ||
26 | * regs 29-30 saved and set up by PAL-code | ||
27 | * JRP - Save regs 16-18 in a special area of the stack, so that | ||
28 | * the palcode-provided values are available to the signal handler. | ||
29 | */ | ||
30 | |||
31 | #define SAVE_ALL \ | ||
32 | subq $sp, SP_OFF, $sp; \ | ||
33 | stq $0, 0($sp); \ | ||
34 | stq $1, 8($sp); \ | ||
35 | stq $2, 16($sp); \ | ||
36 | stq $3, 24($sp); \ | ||
37 | stq $4, 32($sp); \ | ||
38 | stq $28, 144($sp); \ | ||
39 | lda $2, alpha_mv; \ | ||
40 | stq $5, 40($sp); \ | ||
41 | stq $6, 48($sp); \ | ||
42 | stq $7, 56($sp); \ | ||
43 | stq $8, 64($sp); \ | ||
44 | stq $19, 72($sp); \ | ||
45 | stq $20, 80($sp); \ | ||
46 | stq $21, 88($sp); \ | ||
47 | ldq $2, HAE_CACHE($2); \ | ||
48 | stq $22, 96($sp); \ | ||
49 | stq $23, 104($sp); \ | ||
50 | stq $24, 112($sp); \ | ||
51 | stq $25, 120($sp); \ | ||
52 | stq $26, 128($sp); \ | ||
53 | stq $27, 136($sp); \ | ||
54 | stq $2, 152($sp); \ | ||
55 | stq $16, 160($sp); \ | ||
56 | stq $17, 168($sp); \ | ||
57 | stq $18, 176($sp) | ||
58 | |||
59 | #define RESTORE_ALL \ | ||
60 | lda $19, alpha_mv; \ | ||
61 | ldq $0, 0($sp); \ | ||
62 | ldq $1, 8($sp); \ | ||
63 | ldq $2, 16($sp); \ | ||
64 | ldq $3, 24($sp); \ | ||
65 | ldq $21, 152($sp); \ | ||
66 | ldq $20, HAE_CACHE($19); \ | ||
67 | ldq $4, 32($sp); \ | ||
68 | ldq $5, 40($sp); \ | ||
69 | ldq $6, 48($sp); \ | ||
70 | ldq $7, 56($sp); \ | ||
71 | subq $20, $21, $20; \ | ||
72 | ldq $8, 64($sp); \ | ||
73 | beq $20, 99f; \ | ||
74 | ldq $20, HAE_REG($19); \ | ||
75 | stq $21, HAE_CACHE($19); \ | ||
76 | stq $21, 0($20); \ | ||
77 | ldq $0, 0($sp); \ | ||
78 | ldq $1, 8($sp); \ | ||
79 | 99:; \ | ||
80 | ldq $19, 72($sp); \ | ||
81 | ldq $20, 80($sp); \ | ||
82 | ldq $21, 88($sp); \ | ||
83 | ldq $22, 96($sp); \ | ||
84 | ldq $23, 104($sp); \ | ||
85 | ldq $24, 112($sp); \ | ||
86 | ldq $25, 120($sp); \ | ||
87 | ldq $26, 128($sp); \ | ||
88 | ldq $27, 136($sp); \ | ||
89 | ldq $28, 144($sp); \ | ||
90 | addq $sp, SP_OFF, $sp | ||
91 | |||
92 | /* | ||
93 | * Non-syscall kernel entry points. | ||
94 | */ | ||
95 | |||
96 | .align 4 | ||
97 | .globl entInt | ||
98 | .ent entInt | ||
99 | entInt: | ||
100 | SAVE_ALL | ||
101 | lda $8, 0x3fff | ||
102 | lda $26, ret_from_sys_call | ||
103 | bic $sp, $8, $8 | ||
104 | mov $sp, $19 | ||
105 | jsr $31, do_entInt | ||
106 | .end entInt | ||
107 | |||
108 | .align 4 | ||
109 | .globl entArith | ||
110 | .ent entArith | ||
111 | entArith: | ||
112 | SAVE_ALL | ||
113 | lda $8, 0x3fff | ||
114 | lda $26, ret_from_sys_call | ||
115 | bic $sp, $8, $8 | ||
116 | mov $sp, $18 | ||
117 | jsr $31, do_entArith | ||
118 | .end entArith | ||
119 | |||
120 | .align 4 | ||
121 | .globl entMM | ||
122 | .ent entMM | ||
123 | entMM: | ||
124 | SAVE_ALL | ||
125 | /* save $9 - $15 so the inline exception code can manipulate them. */ | ||
126 | subq $sp, 56, $sp | ||
127 | stq $9, 0($sp) | ||
128 | stq $10, 8($sp) | ||
129 | stq $11, 16($sp) | ||
130 | stq $12, 24($sp) | ||
131 | stq $13, 32($sp) | ||
132 | stq $14, 40($sp) | ||
133 | stq $15, 48($sp) | ||
134 | addq $sp, 56, $19 | ||
135 | /* handle the fault */ | ||
136 | lda $8, 0x3fff | ||
137 | bic $sp, $8, $8 | ||
138 | jsr $26, do_page_fault | ||
139 | /* reload the registers after the exception code played. */ | ||
140 | ldq $9, 0($sp) | ||
141 | ldq $10, 8($sp) | ||
142 | ldq $11, 16($sp) | ||
143 | ldq $12, 24($sp) | ||
144 | ldq $13, 32($sp) | ||
145 | ldq $14, 40($sp) | ||
146 | ldq $15, 48($sp) | ||
147 | addq $sp, 56, $sp | ||
148 | /* finish up the syscall as normal. */ | ||
149 | br ret_from_sys_call | ||
150 | .end entMM | ||
151 | |||
152 | .align 4 | ||
153 | .globl entIF | ||
154 | .ent entIF | ||
155 | entIF: | ||
156 | SAVE_ALL | ||
157 | lda $8, 0x3fff | ||
158 | lda $26, ret_from_sys_call | ||
159 | bic $sp, $8, $8 | ||
160 | mov $sp, $17 | ||
161 | jsr $31, do_entIF | ||
162 | .end entIF | ||
163 | |||
164 | .align 4 | ||
165 | .globl entUna | ||
166 | .ent entUna | ||
167 | entUna: | ||
168 | lda $sp, -256($sp) | ||
169 | stq $0, 0($sp) | ||
170 | ldq $0, 256($sp) /* get PS */ | ||
171 | stq $1, 8($sp) | ||
172 | stq $2, 16($sp) | ||
173 | stq $3, 24($sp) | ||
174 | and $0, 8, $0 /* user mode? */ | ||
175 | stq $4, 32($sp) | ||
176 | bne $0, entUnaUser /* yup -> do user-level unaligned fault */ | ||
177 | stq $5, 40($sp) | ||
178 | stq $6, 48($sp) | ||
179 | stq $7, 56($sp) | ||
180 | stq $8, 64($sp) | ||
181 | stq $9, 72($sp) | ||
182 | stq $10, 80($sp) | ||
183 | stq $11, 88($sp) | ||
184 | stq $12, 96($sp) | ||
185 | stq $13, 104($sp) | ||
186 | stq $14, 112($sp) | ||
187 | stq $15, 120($sp) | ||
188 | /* 16-18 PAL-saved */ | ||
189 | stq $19, 152($sp) | ||
190 | stq $20, 160($sp) | ||
191 | stq $21, 168($sp) | ||
192 | stq $22, 176($sp) | ||
193 | stq $23, 184($sp) | ||
194 | stq $24, 192($sp) | ||
195 | stq $25, 200($sp) | ||
196 | stq $26, 208($sp) | ||
197 | stq $27, 216($sp) | ||
198 | stq $28, 224($sp) | ||
199 | stq $gp, 232($sp) | ||
200 | lda $8, 0x3fff | ||
201 | stq $31, 248($sp) | ||
202 | bic $sp, $8, $8 | ||
203 | jsr $26, do_entUna | ||
204 | ldq $0, 0($sp) | ||
205 | ldq $1, 8($sp) | ||
206 | ldq $2, 16($sp) | ||
207 | ldq $3, 24($sp) | ||
208 | ldq $4, 32($sp) | ||
209 | ldq $5, 40($sp) | ||
210 | ldq $6, 48($sp) | ||
211 | ldq $7, 56($sp) | ||
212 | ldq $8, 64($sp) | ||
213 | ldq $9, 72($sp) | ||
214 | ldq $10, 80($sp) | ||
215 | ldq $11, 88($sp) | ||
216 | ldq $12, 96($sp) | ||
217 | ldq $13, 104($sp) | ||
218 | ldq $14, 112($sp) | ||
219 | ldq $15, 120($sp) | ||
220 | /* 16-18 PAL-saved */ | ||
221 | ldq $19, 152($sp) | ||
222 | ldq $20, 160($sp) | ||
223 | ldq $21, 168($sp) | ||
224 | ldq $22, 176($sp) | ||
225 | ldq $23, 184($sp) | ||
226 | ldq $24, 192($sp) | ||
227 | ldq $25, 200($sp) | ||
228 | ldq $26, 208($sp) | ||
229 | ldq $27, 216($sp) | ||
230 | ldq $28, 224($sp) | ||
231 | ldq $gp, 232($sp) | ||
232 | lda $sp, 256($sp) | ||
233 | call_pal PAL_rti | ||
234 | .end entUna | ||
235 | |||
236 | .align 4 | ||
237 | .ent entUnaUser | ||
238 | entUnaUser: | ||
239 | ldq $0, 0($sp) /* restore original $0 */ | ||
240 | lda $sp, 256($sp) /* pop entUna's stack frame */ | ||
241 | SAVE_ALL /* setup normal kernel stack */ | ||
242 | lda $sp, -56($sp) | ||
243 | stq $9, 0($sp) | ||
244 | stq $10, 8($sp) | ||
245 | stq $11, 16($sp) | ||
246 | stq $12, 24($sp) | ||
247 | stq $13, 32($sp) | ||
248 | stq $14, 40($sp) | ||
249 | stq $15, 48($sp) | ||
250 | lda $8, 0x3fff | ||
251 | addq $sp, 56, $19 | ||
252 | bic $sp, $8, $8 | ||
253 | jsr $26, do_entUnaUser | ||
254 | ldq $9, 0($sp) | ||
255 | ldq $10, 8($sp) | ||
256 | ldq $11, 16($sp) | ||
257 | ldq $12, 24($sp) | ||
258 | ldq $13, 32($sp) | ||
259 | ldq $14, 40($sp) | ||
260 | ldq $15, 48($sp) | ||
261 | lda $sp, 56($sp) | ||
262 | br ret_from_sys_call | ||
263 | .end entUnaUser | ||
264 | |||
265 | .align 4 | ||
266 | .globl entDbg | ||
267 | .ent entDbg | ||
268 | entDbg: | ||
269 | SAVE_ALL | ||
270 | lda $8, 0x3fff | ||
271 | lda $26, ret_from_sys_call | ||
272 | bic $sp, $8, $8 | ||
273 | mov $sp, $16 | ||
274 | jsr $31, do_entDbg | ||
275 | .end entDbg | ||
276 | |||
277 | /* | ||
278 | * The system call entry point is special. Most importantly, it looks | ||
279 | * like a function call to userspace as far as clobbered registers. We | ||
280 | * do preserve the argument registers (for syscall restarts) and $26 | ||
281 | * (for leaf syscall functions). | ||
282 | * | ||
283 | * So much for theory. We don't take advantage of this yet. | ||
284 | * | ||
285 | * Note that a0-a2 are not saved by PALcode as with the other entry points. | ||
286 | */ | ||
287 | |||
288 | .align 4 | ||
289 | .globl entSys | ||
290 | .globl ret_from_sys_call | ||
291 | .ent entSys | ||
292 | entSys: | ||
293 | SAVE_ALL | ||
294 | lda $8, 0x3fff | ||
295 | bic $sp, $8, $8 | ||
296 | lda $4, NR_SYSCALLS($31) | ||
297 | stq $16, SP_OFF+24($sp) | ||
298 | lda $5, sys_call_table | ||
299 | lda $27, sys_ni_syscall | ||
300 | cmpult $0, $4, $4 | ||
301 | ldl $3, TI_FLAGS($8) | ||
302 | stq $17, SP_OFF+32($sp) | ||
303 | s8addq $0, $5, $5 | ||
304 | stq $18, SP_OFF+40($sp) | ||
305 | blbs $3, strace | ||
306 | beq $4, 1f | ||
307 | ldq $27, 0($5) | ||
308 | 1: jsr $26, ($27), alpha_ni_syscall | ||
309 | ldgp $gp, 0($26) | ||
310 | blt $0, $syscall_error /* the call failed */ | ||
311 | stq $0, 0($sp) | ||
312 | stq $31, 72($sp) /* a3=0 => no error */ | ||
313 | |||
314 | .align 4 | ||
315 | ret_from_sys_call: | ||
316 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ | ||
317 | ldq $0, SP_OFF($sp) | ||
318 | and $0, 8, $0 | ||
319 | beq $0, restore_all | ||
320 | ret_from_reschedule: | ||
321 | /* Make sure need_resched and sigpending don't change between | ||
322 | sampling and the rti. */ | ||
323 | lda $16, 7 | ||
324 | call_pal PAL_swpipl | ||
325 | ldl $5, TI_FLAGS($8) | ||
326 | and $5, _TIF_WORK_MASK, $2 | ||
327 | bne $5, work_pending | ||
328 | restore_all: | ||
329 | RESTORE_ALL | ||
330 | call_pal PAL_rti | ||
331 | |||
332 | .align 3 | ||
333 | $syscall_error: | ||
334 | /* | ||
335 | * Some system calls (e.g., ptrace) can return arbitrary | ||
336 | * values which might normally be mistaken as error numbers. | ||
337 | * Those functions must zero $0 (v0) directly in the stack | ||
338 | * frame to indicate that a negative return value wasn't an | ||
339 | * error number.. | ||
340 | */ | ||
341 | ldq $19, 0($sp) /* old syscall nr (zero if success) */ | ||
342 | beq $19, $ret_success | ||
343 | |||
344 | ldq $20, 72($sp) /* .. and this a3 */ | ||
345 | subq $31, $0, $0 /* with error in v0 */ | ||
346 | addq $31, 1, $1 /* set a3 for errno return */ | ||
347 | stq $0, 0($sp) | ||
348 | mov $31, $26 /* tell "ret_from_sys_call" we can restart */ | ||
349 | stq $1, 72($sp) /* a3 for return */ | ||
350 | br ret_from_sys_call | ||
351 | |||
352 | $ret_success: | ||
353 | stq $0, 0($sp) | ||
354 | stq $31, 72($sp) /* a3=0 => no error */ | ||
355 | br ret_from_sys_call | ||
356 | .end entSys | ||
357 | |||
358 | /* | ||
359 | * Do all cleanup when returning from all interrupts and system calls. | ||
360 | * | ||
361 | * Arguments: | ||
362 | * $5: TI_FLAGS. | ||
363 | * $8: current. | ||
364 | * $19: The old syscall number, or zero if this is not a return | ||
365 | * from a syscall that errored and is possibly restartable. | ||
366 | * $20: Error indication. | ||
367 | */ | ||
368 | |||
369 | .align 4 | ||
370 | .ent work_pending | ||
371 | work_pending: | ||
372 | and $5, _TIF_NEED_RESCHED, $2 | ||
373 | beq $2, $work_notifysig | ||
374 | |||
375 | $work_resched: | ||
376 | subq $sp, 16, $sp | ||
377 | stq $19, 0($sp) /* save syscall nr */ | ||
378 | stq $20, 8($sp) /* and error indication (a3) */ | ||
379 | jsr $26, schedule | ||
380 | ldq $19, 0($sp) | ||
381 | ldq $20, 8($sp) | ||
382 | addq $sp, 16, $sp | ||
383 | /* Make sure need_resched and sigpending don't change between | ||
384 | sampling and the rti. */ | ||
385 | lda $16, 7 | ||
386 | call_pal PAL_swpipl | ||
387 | ldl $5, TI_FLAGS($8) | ||
388 | and $5, _TIF_WORK_MASK, $2 | ||
389 | beq $2, restore_all | ||
390 | and $5, _TIF_NEED_RESCHED, $2 | ||
391 | bne $2, $work_resched | ||
392 | |||
393 | $work_notifysig: | ||
394 | mov $sp, $17 | ||
395 | br $1, do_switch_stack | ||
396 | mov $5, $21 | ||
397 | mov $sp, $18 | ||
398 | mov $31, $16 | ||
399 | jsr $26, do_notify_resume | ||
400 | bsr $1, undo_switch_stack | ||
401 | br restore_all | ||
402 | .end work_pending | ||
403 | |||
404 | /* | ||
405 | * PTRACE syscall handler | ||
406 | */ | ||
407 | |||
408 | .align 4 | ||
409 | .ent strace | ||
410 | strace: | ||
411 | /* set up signal stack, call syscall_trace */ | ||
412 | bsr $1, do_switch_stack | ||
413 | jsr $26, syscall_trace | ||
414 | bsr $1, undo_switch_stack | ||
415 | |||
416 | /* get the system call number and the arguments back.. */ | ||
417 | ldq $0, 0($sp) | ||
418 | ldq $16, SP_OFF+24($sp) | ||
419 | ldq $17, SP_OFF+32($sp) | ||
420 | ldq $18, SP_OFF+40($sp) | ||
421 | ldq $19, 72($sp) | ||
422 | ldq $20, 80($sp) | ||
423 | ldq $21, 88($sp) | ||
424 | |||
425 | /* get the system call pointer.. */ | ||
426 | lda $1, NR_SYSCALLS($31) | ||
427 | lda $2, sys_call_table | ||
428 | lda $27, alpha_ni_syscall | ||
429 | cmpult $0, $1, $1 | ||
430 | s8addq $0, $2, $2 | ||
431 | beq $1, 1f | ||
432 | ldq $27, 0($2) | ||
433 | 1: jsr $26, ($27), sys_gettimeofday | ||
434 | ldgp $gp, 0($26) | ||
435 | |||
436 | /* check return.. */ | ||
437 | blt $0, $strace_error /* the call failed */ | ||
438 | stq $31, 72($sp) /* a3=0 => no error */ | ||
439 | $strace_success: | ||
440 | stq $0, 0($sp) /* save return value */ | ||
441 | |||
442 | bsr $1, do_switch_stack | ||
443 | jsr $26, syscall_trace | ||
444 | bsr $1, undo_switch_stack | ||
445 | br $31, ret_from_sys_call | ||
446 | |||
447 | .align 3 | ||
448 | $strace_error: | ||
449 | ldq $19, 0($sp) /* old syscall nr (zero if success) */ | ||
450 | beq $19, $strace_success | ||
451 | ldq $20, 72($sp) /* .. and this a3 */ | ||
452 | |||
453 | subq $31, $0, $0 /* with error in v0 */ | ||
454 | addq $31, 1, $1 /* set a3 for errno return */ | ||
455 | stq $0, 0($sp) | ||
456 | stq $1, 72($sp) /* a3 for return */ | ||
457 | |||
458 | bsr $1, do_switch_stack | ||
459 | mov $19, $9 /* save old syscall number */ | ||
460 | mov $20, $10 /* save old a3 */ | ||
461 | jsr $26, syscall_trace | ||
462 | mov $9, $19 | ||
463 | mov $10, $20 | ||
464 | bsr $1, undo_switch_stack | ||
465 | |||
466 | mov $31, $26 /* tell "ret_from_sys_call" we can restart */ | ||
467 | br ret_from_sys_call | ||
468 | .end strace | ||
469 | |||
470 | /* | ||
471 | * Save and restore the switch stack -- aka the balance of the user context. | ||
472 | */ | ||
473 | |||
474 | .align 4 | ||
475 | .ent do_switch_stack | ||
476 | do_switch_stack: | ||
477 | lda $sp, -SWITCH_STACK_SIZE($sp) | ||
478 | stq $9, 0($sp) | ||
479 | stq $10, 8($sp) | ||
480 | stq $11, 16($sp) | ||
481 | stq $12, 24($sp) | ||
482 | stq $13, 32($sp) | ||
483 | stq $14, 40($sp) | ||
484 | stq $15, 48($sp) | ||
485 | stq $26, 56($sp) | ||
486 | stt $f0, 64($sp) | ||
487 | stt $f1, 72($sp) | ||
488 | stt $f2, 80($sp) | ||
489 | stt $f3, 88($sp) | ||
490 | stt $f4, 96($sp) | ||
491 | stt $f5, 104($sp) | ||
492 | stt $f6, 112($sp) | ||
493 | stt $f7, 120($sp) | ||
494 | stt $f8, 128($sp) | ||
495 | stt $f9, 136($sp) | ||
496 | stt $f10, 144($sp) | ||
497 | stt $f11, 152($sp) | ||
498 | stt $f12, 160($sp) | ||
499 | stt $f13, 168($sp) | ||
500 | stt $f14, 176($sp) | ||
501 | stt $f15, 184($sp) | ||
502 | stt $f16, 192($sp) | ||
503 | stt $f17, 200($sp) | ||
504 | stt $f18, 208($sp) | ||
505 | stt $f19, 216($sp) | ||
506 | stt $f20, 224($sp) | ||
507 | stt $f21, 232($sp) | ||
508 | stt $f22, 240($sp) | ||
509 | stt $f23, 248($sp) | ||
510 | stt $f24, 256($sp) | ||
511 | stt $f25, 264($sp) | ||
512 | stt $f26, 272($sp) | ||
513 | stt $f27, 280($sp) | ||
514 | mf_fpcr $f0 # get fpcr | ||
515 | stt $f28, 288($sp) | ||
516 | stt $f29, 296($sp) | ||
517 | stt $f30, 304($sp) | ||
518 | stt $f0, 312($sp) # save fpcr in slot of $f31 | ||
519 | ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. | ||
520 | ret $31, ($1), 1 | ||
521 | .end do_switch_stack | ||
522 | |||
523 | .align 4 | ||
524 | .ent undo_switch_stack | ||
525 | undo_switch_stack: | ||
526 | ldq $9, 0($sp) | ||
527 | ldq $10, 8($sp) | ||
528 | ldq $11, 16($sp) | ||
529 | ldq $12, 24($sp) | ||
530 | ldq $13, 32($sp) | ||
531 | ldq $14, 40($sp) | ||
532 | ldq $15, 48($sp) | ||
533 | ldq $26, 56($sp) | ||
534 | ldt $f30, 312($sp) # get saved fpcr | ||
535 | ldt $f0, 64($sp) | ||
536 | ldt $f1, 72($sp) | ||
537 | ldt $f2, 80($sp) | ||
538 | ldt $f3, 88($sp) | ||
539 | mt_fpcr $f30 # install saved fpcr | ||
540 | ldt $f4, 96($sp) | ||
541 | ldt $f5, 104($sp) | ||
542 | ldt $f6, 112($sp) | ||
543 | ldt $f7, 120($sp) | ||
544 | ldt $f8, 128($sp) | ||
545 | ldt $f9, 136($sp) | ||
546 | ldt $f10, 144($sp) | ||
547 | ldt $f11, 152($sp) | ||
548 | ldt $f12, 160($sp) | ||
549 | ldt $f13, 168($sp) | ||
550 | ldt $f14, 176($sp) | ||
551 | ldt $f15, 184($sp) | ||
552 | ldt $f16, 192($sp) | ||
553 | ldt $f17, 200($sp) | ||
554 | ldt $f18, 208($sp) | ||
555 | ldt $f19, 216($sp) | ||
556 | ldt $f20, 224($sp) | ||
557 | ldt $f21, 232($sp) | ||
558 | ldt $f22, 240($sp) | ||
559 | ldt $f23, 248($sp) | ||
560 | ldt $f24, 256($sp) | ||
561 | ldt $f25, 264($sp) | ||
562 | ldt $f26, 272($sp) | ||
563 | ldt $f27, 280($sp) | ||
564 | ldt $f28, 288($sp) | ||
565 | ldt $f29, 296($sp) | ||
566 | ldt $f30, 304($sp) | ||
567 | lda $sp, SWITCH_STACK_SIZE($sp) | ||
568 | ret $31, ($1), 1 | ||
569 | .end undo_switch_stack | ||
570 | |||
571 | /* | ||
572 | * The meat of the context switch code. | ||
573 | */ | ||
574 | |||
575 | .align 4 | ||
576 | .globl alpha_switch_to | ||
577 | .ent alpha_switch_to | ||
578 | alpha_switch_to: | ||
579 | .prologue 0 | ||
580 | bsr $1, do_switch_stack | ||
581 | call_pal PAL_swpctx | ||
582 | lda $8, 0x3fff | ||
583 | bsr $1, undo_switch_stack | ||
584 | bic $sp, $8, $8 | ||
585 | mov $17, $0 | ||
586 | ret | ||
587 | .end alpha_switch_to | ||
588 | |||
589 | /* | ||
590 | * New processes begin life here. | ||
591 | */ | ||
592 | |||
593 | .globl ret_from_fork | ||
594 | .align 4 | ||
595 | .ent ret_from_fork | ||
596 | ret_from_fork: | ||
597 | lda $26, ret_from_sys_call | ||
598 | mov $17, $16 | ||
599 | jmp $31, schedule_tail | ||
600 | .end ret_from_fork | ||
601 | |||
602 | /* | ||
603 | * kernel_thread(fn, arg, clone_flags) | ||
604 | */ | ||
605 | .align 4 | ||
606 | .globl kernel_thread | ||
607 | .ent kernel_thread | ||
608 | kernel_thread: | ||
609 | /* We can be called from a module. */ | ||
610 | ldgp $gp, 0($27) | ||
611 | .prologue 1 | ||
612 | subq $sp, SP_OFF+6*8, $sp | ||
613 | br $1, 2f /* load start address */ | ||
614 | |||
615 | /* We've now "returned" from a fake system call. */ | ||
616 | unop | ||
617 | blt $0, 1f /* error? */ | ||
618 | ldi $1, 0x3fff | ||
619 | beq $20, 1f /* parent or child? */ | ||
620 | |||
621 | bic $sp, $1, $8 /* in child. */ | ||
622 | jsr $26, ($27) | ||
623 | ldgp $gp, 0($26) | ||
624 | mov $0, $16 | ||
625 | mov $31, $26 | ||
626 | jmp $31, sys_exit | ||
627 | |||
628 | 1: ret /* in parent. */ | ||
629 | |||
630 | .align 4 | ||
631 | 2: /* Fake a system call stack frame, as we can't do system calls | ||
632 | from kernel space. Note that we store FN and ARG as they | ||
633 | need to be set up in the child for the call. Also store $8 | ||
634 | and $26 for use in the parent. */ | ||
635 | stq $31, SP_OFF($sp) /* ps */ | ||
636 | stq $1, SP_OFF+8($sp) /* pc */ | ||
637 | stq $gp, SP_OFF+16($sp) /* gp */ | ||
638 | stq $16, 136($sp) /* $27; FN for child */ | ||
639 | stq $17, SP_OFF+24($sp) /* $16; ARG for child */ | ||
640 | stq $8, 64($sp) /* $8 */ | ||
641 | stq $26, 128($sp) /* $26 */ | ||
642 | /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */ | ||
643 | ldq $2, alpha_mv+HAE_CACHE | ||
644 | stq $2, 152($sp) /* HAE */ | ||
645 | |||
646 | /* Shuffle FLAGS to the front; add CLONE_VM. */ | ||
647 | ldi $1, CLONE_VM|CLONE_UNTRACED | ||
648 | or $18, $1, $16 | ||
649 | bsr $26, sys_clone | ||
650 | |||
651 | /* We don't actually care for a3 success widgetry in the kernel. | ||
652 | Not for positive errno values. */ | ||
653 | stq $0, 0($sp) /* $0 */ | ||
654 | br restore_all | ||
655 | .end kernel_thread | ||
656 | |||
657 | /* | ||
658 | * execve(path, argv, envp) | ||
659 | */ | ||
660 | .align 4 | ||
661 | .globl execve | ||
662 | .ent execve | ||
663 | execve: | ||
664 | /* We can be called from a module. */ | ||
665 | ldgp $gp, 0($27) | ||
666 | lda $sp, -(32+SIZEOF_PT_REGS+8)($sp) | ||
667 | .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0 | ||
668 | stq $26, 0($sp) | ||
669 | stq $16, 8($sp) | ||
670 | stq $17, 16($sp) | ||
671 | stq $18, 24($sp) | ||
672 | .prologue 1 | ||
673 | |||
674 | lda $16, 32($sp) | ||
675 | lda $17, 0 | ||
676 | lda $18, SIZEOF_PT_REGS | ||
677 | bsr $26, memset !samegp | ||
678 | |||
679 | /* Avoid the HAE being gratuitously wrong, which would cause us | ||
680 | to do the whole turn off interrupts thing and restore it. */ | ||
681 | ldq $2, alpha_mv+HAE_CACHE | ||
682 | stq $2, 152+32($sp) | ||
683 | |||
684 | ldq $16, 8($sp) | ||
685 | ldq $17, 16($sp) | ||
686 | ldq $18, 24($sp) | ||
687 | lda $19, 32($sp) | ||
688 | bsr $26, do_execve !samegp | ||
689 | |||
690 | ldq $26, 0($sp) | ||
691 | bne $0, 1f /* error! */ | ||
692 | |||
693 | /* Move the temporary pt_regs struct from its current location | ||
694 | to the top of the kernel stack frame. See copy_thread for | ||
695 | details for a normal process. */ | ||
696 | lda $16, 0x4000 - SIZEOF_PT_REGS($8) | ||
697 | lda $17, 32($sp) | ||
698 | lda $18, SIZEOF_PT_REGS | ||
699 | bsr $26, memmove !samegp | ||
700 | |||
701 | /* Take that over as our new stack frame and visit userland! */ | ||
702 | lda $sp, 0x4000 - SIZEOF_PT_REGS($8) | ||
703 | br $31, ret_from_sys_call | ||
704 | |||
705 | 1: lda $sp, 32+SIZEOF_PT_REGS+8($sp) | ||
706 | ret | ||
707 | .end execve | ||
708 | |||
709 | |||
710 | /* | ||
711 | * Special system calls. Most of these are special in that they either | ||
712 | * have to play switch_stack games or in some way use the pt_regs struct. | ||
713 | */ | ||
714 | .align 4 | ||
715 | .globl sys_fork | ||
716 | .ent sys_fork | ||
717 | sys_fork: | ||
718 | .prologue 0 | ||
719 | mov $sp, $21 | ||
720 | bsr $1, do_switch_stack | ||
721 | bis $31, SIGCHLD, $16 | ||
722 | mov $31, $17 | ||
723 | mov $31, $18 | ||
724 | mov $31, $19 | ||
725 | mov $31, $20 | ||
726 | jsr $26, alpha_clone | ||
727 | bsr $1, undo_switch_stack | ||
728 | ret | ||
729 | .end sys_fork | ||
730 | |||
731 | .align 4 | ||
732 | .globl sys_clone | ||
733 | .ent sys_clone | ||
734 | sys_clone: | ||
735 | .prologue 0 | ||
736 | mov $sp, $21 | ||
737 | bsr $1, do_switch_stack | ||
738 | /* $16, $17, $18, $19, $20 come from the user. */ | ||
739 | jsr $26, alpha_clone | ||
740 | bsr $1, undo_switch_stack | ||
741 | ret | ||
742 | .end sys_clone | ||
743 | |||
744 | .align 4 | ||
745 | .globl sys_vfork | ||
746 | .ent sys_vfork | ||
747 | sys_vfork: | ||
748 | .prologue 0 | ||
749 | mov $sp, $16 | ||
750 | bsr $1, do_switch_stack | ||
751 | jsr $26, alpha_vfork | ||
752 | bsr $1, undo_switch_stack | ||
753 | ret | ||
754 | .end sys_vfork | ||
755 | |||
756 | .align 4 | ||
757 | .globl sys_sigreturn | ||
758 | .ent sys_sigreturn | ||
759 | sys_sigreturn: | ||
760 | .prologue 0 | ||
761 | mov $sp, $17 | ||
762 | lda $18, -SWITCH_STACK_SIZE($sp) | ||
763 | lda $sp, -SWITCH_STACK_SIZE($sp) | ||
764 | jsr $26, do_sigreturn | ||
765 | br $1, undo_switch_stack | ||
766 | br ret_from_sys_call | ||
767 | .end sys_sigreturn | ||
768 | |||
769 | .align 4 | ||
770 | .globl sys_rt_sigreturn | ||
771 | .ent sys_rt_sigreturn | ||
772 | sys_rt_sigreturn: | ||
773 | .prologue 0 | ||
774 | mov $sp, $17 | ||
775 | lda $18, -SWITCH_STACK_SIZE($sp) | ||
776 | lda $sp, -SWITCH_STACK_SIZE($sp) | ||
777 | jsr $26, do_rt_sigreturn | ||
778 | br $1, undo_switch_stack | ||
779 | br ret_from_sys_call | ||
780 | .end sys_rt_sigreturn | ||
781 | |||
782 | .align 4 | ||
783 | .globl sys_sigsuspend | ||
784 | .ent sys_sigsuspend | ||
785 | sys_sigsuspend: | ||
786 | .prologue 0 | ||
787 | mov $sp, $17 | ||
788 | br $1, do_switch_stack | ||
789 | mov $sp, $18 | ||
790 | subq $sp, 16, $sp | ||
791 | stq $26, 0($sp) | ||
792 | jsr $26, do_sigsuspend | ||
793 | ldq $26, 0($sp) | ||
794 | lda $sp, SWITCH_STACK_SIZE+16($sp) | ||
795 | ret | ||
796 | .end sys_sigsuspend | ||
797 | |||
798 | .align 4 | ||
799 | .globl sys_rt_sigsuspend | ||
800 | .ent sys_rt_sigsuspend | ||
801 | sys_rt_sigsuspend: | ||
802 | .prologue 0 | ||
803 | mov $sp, $18 | ||
804 | br $1, do_switch_stack | ||
805 | mov $sp, $19 | ||
806 | subq $sp, 16, $sp | ||
807 | stq $26, 0($sp) | ||
808 | jsr $26, do_rt_sigsuspend | ||
809 | ldq $26, 0($sp) | ||
810 | lda $sp, SWITCH_STACK_SIZE+16($sp) | ||
811 | ret | ||
812 | .end sys_rt_sigsuspend | ||
813 | |||
814 | .align 4 | ||
815 | .globl sys_sethae | ||
816 | .ent sys_sethae | ||
817 | sys_sethae: | ||
818 | .prologue 0 | ||
819 | stq $16, 152($sp) | ||
820 | ret | ||
821 | .end sys_sethae | ||
822 | |||
823 | .align 4 | ||
824 | .globl osf_getpriority | ||
825 | .ent osf_getpriority | ||
826 | osf_getpriority: | ||
827 | lda $sp, -16($sp) | ||
828 | stq $26, 0($sp) | ||
829 | .prologue 0 | ||
830 | |||
831 | jsr $26, sys_getpriority | ||
832 | |||
833 | ldq $26, 0($sp) | ||
834 | blt $0, 1f | ||
835 | |||
836 | /* Return value is the unbiased priority, i.e. 20 - prio. | ||
837 | This does result in negative return values, so signal | ||
838 | no error by writing into the R0 slot. */ | ||
839 | lda $1, 20 | ||
840 | stq $31, 16($sp) | ||
841 | subl $1, $0, $0 | ||
842 | unop | ||
843 | |||
844 | 1: lda $sp, 16($sp) | ||
845 | ret | ||
846 | .end osf_getpriority | ||
847 | |||
848 | .align 4 | ||
849 | .globl sys_getxuid | ||
850 | .ent sys_getxuid | ||
851 | sys_getxuid: | ||
852 | .prologue 0 | ||
853 | ldq $2, TI_TASK($8) | ||
854 | ldl $0, TASK_UID($2) | ||
855 | ldl $1, TASK_EUID($2) | ||
856 | stq $1, 80($sp) | ||
857 | ret | ||
858 | .end sys_getxuid | ||
859 | |||
860 | .align 4 | ||
861 | .globl sys_getxgid | ||
862 | .ent sys_getxgid | ||
863 | sys_getxgid: | ||
864 | .prologue 0 | ||
865 | ldq $2, TI_TASK($8) | ||
866 | ldl $0, TASK_GID($2) | ||
867 | ldl $1, TASK_EGID($2) | ||
868 | stq $1, 80($sp) | ||
869 | ret | ||
870 | .end sys_getxgid | ||
871 | |||
872 | .align 4 | ||
873 | .globl sys_getxpid | ||
874 | .ent sys_getxpid | ||
875 | sys_getxpid: | ||
876 | .prologue 0 | ||
877 | ldq $2, TI_TASK($8) | ||
878 | |||
879 | /* See linux/kernel/timer.c sys_getppid for discussion | ||
880 | about this loop. */ | ||
881 | ldq $3, TASK_REAL_PARENT($2) | ||
882 | 1: ldl $1, TASK_TGID($3) | ||
883 | #ifdef CONFIG_SMP | ||
884 | mov $3, $4 | ||
885 | mb | ||
886 | ldq $3, TASK_REAL_PARENT($2) | ||
887 | cmpeq $3, $4, $4 | ||
888 | beq $4, 1b | ||
889 | #endif | ||
890 | stq $1, 80($sp) | ||
891 | ldl $0, TASK_TGID($2) | ||
892 | ret | ||
893 | .end sys_getxpid | ||
894 | |||
895 | .align 4 | ||
896 | .globl sys_pipe | ||
897 | .ent sys_pipe | ||
898 | sys_pipe: | ||
899 | lda $sp, -16($sp) | ||
900 | stq $26, 0($sp) | ||
901 | .prologue 0 | ||
902 | |||
903 | lda $16, 8($sp) | ||
904 | jsr $26, do_pipe | ||
905 | |||
906 | ldq $26, 0($sp) | ||
907 | bne $0, 1f | ||
908 | |||
909 | /* The return values are in $0 and $20. */ | ||
910 | ldl $1, 12($sp) | ||
911 | ldl $0, 8($sp) | ||
912 | |||
913 | stq $1, 80+16($sp) | ||
914 | 1: lda $sp, 16($sp) | ||
915 | ret | ||
916 | .end sys_pipe | ||
917 | |||
918 | .align 4 | ||
919 | .globl sys_ptrace | ||
920 | .ent sys_ptrace | ||
921 | sys_ptrace: | ||
922 | .prologue 0 | ||
923 | mov $sp, $20 | ||
924 | jmp $31, do_sys_ptrace | ||
925 | .end sys_ptrace | ||
926 | |||
927 | .align 4 | ||
928 | .globl sys_execve | ||
929 | .ent sys_execve | ||
930 | sys_execve: | ||
931 | .prologue 0 | ||
932 | mov $sp, $19 | ||
933 | jmp $31, do_sys_execve | ||
934 | .end sys_execve | ||
935 | |||
936 | .align 4 | ||
937 | .globl osf_sigprocmask | ||
938 | .ent osf_sigprocmask | ||
939 | osf_sigprocmask: | ||
940 | .prologue 0 | ||
941 | mov $sp, $18 | ||
942 | jmp $31, do_osf_sigprocmask | ||
943 | .end osf_sigprocmask | ||
944 | |||
945 | .align 4 | ||
946 | .globl alpha_ni_syscall | ||
947 | .ent alpha_ni_syscall | ||
948 | alpha_ni_syscall: | ||
949 | .prologue 0 | ||
950 | /* Special because it also implements overflow handling via | ||
951 | syscall number 0. And if you recall, zero is a special | ||
952 | trigger for "not an error". Store large non-zero there. */ | ||
953 | lda $0, -ENOSYS | ||
954 | unop | ||
955 | stq $0, 0($sp) | ||
956 | ret | ||
957 | .end alpha_ni_syscall | ||
diff --git a/arch/alpha/kernel/err_common.c b/arch/alpha/kernel/err_common.c new file mode 100644 index 000000000000..687580b16b41 --- /dev/null +++ b/arch/alpha/kernel/err_common.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/err_common.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) | ||
5 | * | ||
6 | * Error handling code supporting Alpha systems | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/sched.h> | ||
12 | |||
13 | #include <asm/io.h> | ||
14 | #include <asm/hwrpb.h> | ||
15 | #include <asm/smp.h> | ||
16 | #include <asm/err_common.h> | ||
17 | |||
18 | #include "err_impl.h" | ||
19 | #include "proto.h" | ||
20 | |||
21 | /* | ||
22 | * err_print_prefix -- error handling print routines should prefix | ||
23 | * all prints with this | ||
24 | */ | ||
25 | char *err_print_prefix = KERN_NOTICE; | ||
26 | |||
27 | |||
28 | /* | ||
29 | * Generic | ||
30 | */ | ||
31 | void | ||
32 | mchk_dump_mem(void *data, size_t length, char **annotation) | ||
33 | { | ||
34 | unsigned long *ldata = data; | ||
35 | size_t i; | ||
36 | |||
37 | for (i = 0; (i * sizeof(*ldata)) < length; i++) { | ||
38 | if (annotation && !annotation[i]) | ||
39 | annotation = NULL; | ||
40 | printk("%s %08x: %016lx %s\n", | ||
41 | err_print_prefix, | ||
42 | (unsigned)(i * sizeof(*ldata)), ldata[i], | ||
43 | annotation ? annotation[i] : ""); | ||
44 | } | ||
45 | } | ||
46 | |||
47 | void | ||
48 | mchk_dump_logout_frame(struct el_common *mchk_header) | ||
49 | { | ||
50 | printk("%s -- Frame Header --\n" | ||
51 | " Frame Size: %d (0x%x) bytes\n" | ||
52 | " Flags: %s%s\n" | ||
53 | " MCHK Code: 0x%x\n" | ||
54 | " Frame Rev: %d\n" | ||
55 | " Proc Offset: 0x%08x\n" | ||
56 | " Sys Offset: 0x%08x\n" | ||
57 | " -- Processor Region --\n", | ||
58 | err_print_prefix, | ||
59 | mchk_header->size, mchk_header->size, | ||
60 | mchk_header->retry ? "RETRY " : "", | ||
61 | mchk_header->err2 ? "SECOND_ERR " : "", | ||
62 | mchk_header->code, | ||
63 | mchk_header->frame_rev, | ||
64 | mchk_header->proc_offset, | ||
65 | mchk_header->sys_offset); | ||
66 | |||
67 | mchk_dump_mem((void *) | ||
68 | ((unsigned long)mchk_header + mchk_header->proc_offset), | ||
69 | mchk_header->sys_offset - mchk_header->proc_offset, | ||
70 | NULL); | ||
71 | |||
72 | printk("%s -- System Region --\n", err_print_prefix); | ||
73 | mchk_dump_mem((void *) | ||
74 | ((unsigned long)mchk_header + mchk_header->sys_offset), | ||
75 | mchk_header->size - mchk_header->sys_offset, | ||
76 | NULL); | ||
77 | printk("%s -- End of Frame --\n", err_print_prefix); | ||
78 | } | ||
79 | |||
80 | |||
81 | /* | ||
82 | * Console Data Log | ||
83 | */ | ||
84 | /* Data */ | ||
85 | static struct el_subpacket_handler *subpacket_handler_list = NULL; | ||
86 | static struct el_subpacket_annotation *subpacket_annotation_list = NULL; | ||
87 | |||
88 | static struct el_subpacket * | ||
89 | el_process_header_subpacket(struct el_subpacket *header) | ||
90 | { | ||
91 | union el_timestamp timestamp; | ||
92 | char *name = "UNKNOWN EVENT"; | ||
93 | int packet_count = 0; | ||
94 | int length = 0; | ||
95 | |||
96 | if (header->class != EL_CLASS__HEADER) { | ||
97 | printk("%s** Unexpected header CLASS %d TYPE %d, aborting\n", | ||
98 | err_print_prefix, | ||
99 | header->class, header->type); | ||
100 | return NULL; | ||
101 | } | ||
102 | |||
103 | switch(header->type) { | ||
104 | case EL_TYPE__HEADER__SYSTEM_ERROR_FRAME: | ||
105 | name = "SYSTEM ERROR"; | ||
106 | length = header->by_type.sys_err.frame_length; | ||
107 | packet_count = | ||
108 | header->by_type.sys_err.frame_packet_count; | ||
109 | timestamp.as_int = 0; | ||
110 | break; | ||
111 | case EL_TYPE__HEADER__SYSTEM_EVENT_FRAME: | ||
112 | name = "SYSTEM EVENT"; | ||
113 | length = header->by_type.sys_event.frame_length; | ||
114 | packet_count = | ||
115 | header->by_type.sys_event.frame_packet_count; | ||
116 | timestamp = header->by_type.sys_event.timestamp; | ||
117 | break; | ||
118 | case EL_TYPE__HEADER__HALT_FRAME: | ||
119 | name = "ERROR HALT"; | ||
120 | length = header->by_type.err_halt.frame_length; | ||
121 | packet_count = | ||
122 | header->by_type.err_halt.frame_packet_count; | ||
123 | timestamp = header->by_type.err_halt.timestamp; | ||
124 | break; | ||
125 | case EL_TYPE__HEADER__LOGOUT_FRAME: | ||
126 | name = "LOGOUT FRAME"; | ||
127 | length = header->by_type.logout_header.frame_length; | ||
128 | packet_count = 1; | ||
129 | timestamp.as_int = 0; | ||
130 | break; | ||
131 | default: /* Unknown */ | ||
132 | printk("%s** Unknown header - CLASS %d TYPE %d, aborting\n", | ||
133 | err_print_prefix, | ||
134 | header->class, header->type); | ||
135 | return NULL; | ||
136 | } | ||
137 | |||
138 | printk("%s*** %s:\n" | ||
139 | " CLASS %d, TYPE %d\n", | ||
140 | err_print_prefix, | ||
141 | name, | ||
142 | header->class, header->type); | ||
143 | el_print_timestamp(×tamp); | ||
144 | |||
145 | /* | ||
146 | * Process the subpackets | ||
147 | */ | ||
148 | el_process_subpackets(header, packet_count); | ||
149 | |||
150 | /* return the next header */ | ||
151 | header = (struct el_subpacket *) | ||
152 | ((unsigned long)header + header->length + length); | ||
153 | return header; | ||
154 | } | ||
155 | |||
156 | static struct el_subpacket * | ||
157 | el_process_subpacket_reg(struct el_subpacket *header) | ||
158 | { | ||
159 | struct el_subpacket *next = NULL; | ||
160 | struct el_subpacket_handler *h = subpacket_handler_list; | ||
161 | |||
162 | for (; h && h->class != header->class; h = h->next); | ||
163 | if (h) next = h->handler(header); | ||
164 | |||
165 | return next; | ||
166 | } | ||
167 | |||
168 | void | ||
169 | el_print_timestamp(union el_timestamp *timestamp) | ||
170 | { | ||
171 | if (timestamp->as_int) | ||
172 | printk("%s TIMESTAMP: %d/%d/%02d %d:%02d:%0d\n", | ||
173 | err_print_prefix, | ||
174 | timestamp->b.month, timestamp->b.day, | ||
175 | timestamp->b.year, timestamp->b.hour, | ||
176 | timestamp->b.minute, timestamp->b.second); | ||
177 | } | ||
178 | |||
179 | void | ||
180 | el_process_subpackets(struct el_subpacket *header, int packet_count) | ||
181 | { | ||
182 | struct el_subpacket *subpacket; | ||
183 | int i; | ||
184 | |||
185 | subpacket = (struct el_subpacket *) | ||
186 | ((unsigned long)header + header->length); | ||
187 | |||
188 | for (i = 0; subpacket && i < packet_count; i++) { | ||
189 | printk("%sPROCESSING SUBPACKET %d\n", err_print_prefix, i); | ||
190 | subpacket = el_process_subpacket(subpacket); | ||
191 | } | ||
192 | } | ||
193 | |||
194 | struct el_subpacket * | ||
195 | el_process_subpacket(struct el_subpacket *header) | ||
196 | { | ||
197 | struct el_subpacket *next = NULL; | ||
198 | |||
199 | switch(header->class) { | ||
200 | case EL_CLASS__TERMINATION: | ||
201 | /* Termination packet, there are no more */ | ||
202 | break; | ||
203 | case EL_CLASS__HEADER: | ||
204 | next = el_process_header_subpacket(header); | ||
205 | break; | ||
206 | default: | ||
207 | if (NULL == (next = el_process_subpacket_reg(header))) { | ||
208 | printk("%s** Unexpected header CLASS %d TYPE %d" | ||
209 | " -- aborting.\n", | ||
210 | err_print_prefix, | ||
211 | header->class, header->type); | ||
212 | } | ||
213 | break; | ||
214 | } | ||
215 | |||
216 | return next; | ||
217 | } | ||
218 | |||
219 | void | ||
220 | el_annotate_subpacket(struct el_subpacket *header) | ||
221 | { | ||
222 | struct el_subpacket_annotation *a; | ||
223 | char **annotation = NULL; | ||
224 | |||
225 | for (a = subpacket_annotation_list; a; a = a->next) { | ||
226 | if (a->class == header->class && | ||
227 | a->type == header->type && | ||
228 | a->revision == header->revision) { | ||
229 | /* | ||
230 | * We found the annotation | ||
231 | */ | ||
232 | annotation = a->annotation; | ||
233 | printk("%s %s\n", err_print_prefix, a->description); | ||
234 | break; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | mchk_dump_mem(header, header->length, annotation); | ||
239 | } | ||
240 | |||
241 | static void __init | ||
242 | cdl_process_console_data_log(int cpu, struct percpu_struct *pcpu) | ||
243 | { | ||
244 | struct el_subpacket *header = (struct el_subpacket *) | ||
245 | (IDENT_ADDR | pcpu->console_data_log_pa); | ||
246 | int err; | ||
247 | |||
248 | printk("%s******* CONSOLE DATA LOG FOR CPU %d. *******\n" | ||
249 | "*** Error(s) were logged on a previous boot\n", | ||
250 | err_print_prefix, cpu); | ||
251 | |||
252 | for (err = 0; header && (header->class != EL_CLASS__TERMINATION); err++) | ||
253 | header = el_process_subpacket(header); | ||
254 | |||
255 | /* let the console know it's ok to clear the error(s) at restart */ | ||
256 | pcpu->console_data_log_pa = 0; | ||
257 | |||
258 | printk("%s*** %d total error(s) logged\n" | ||
259 | "**** END OF CONSOLE DATA LOG FOR CPU %d ****\n", | ||
260 | err_print_prefix, err, cpu); | ||
261 | } | ||
262 | |||
263 | void __init | ||
264 | cdl_check_console_data_log(void) | ||
265 | { | ||
266 | struct percpu_struct *pcpu; | ||
267 | unsigned long cpu; | ||
268 | |||
269 | for (cpu = 0; cpu < hwrpb->nr_processors; cpu++) { | ||
270 | pcpu = (struct percpu_struct *) | ||
271 | ((unsigned long)hwrpb + hwrpb->processor_offset | ||
272 | + cpu * hwrpb->processor_size); | ||
273 | if (pcpu->console_data_log_pa) | ||
274 | cdl_process_console_data_log(cpu, pcpu); | ||
275 | } | ||
276 | |||
277 | } | ||
278 | |||
279 | int __init | ||
280 | cdl_register_subpacket_annotation(struct el_subpacket_annotation *new) | ||
281 | { | ||
282 | struct el_subpacket_annotation *a = subpacket_annotation_list; | ||
283 | |||
284 | if (a == NULL) subpacket_annotation_list = new; | ||
285 | else { | ||
286 | for (; a->next != NULL; a = a->next) { | ||
287 | if ((a->class == new->class && a->type == new->type) || | ||
288 | a == new) { | ||
289 | printk("Attempted to re-register " | ||
290 | "subpacket annotation\n"); | ||
291 | return -EINVAL; | ||
292 | } | ||
293 | } | ||
294 | a->next = new; | ||
295 | } | ||
296 | new->next = NULL; | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | int __init | ||
302 | cdl_register_subpacket_handler(struct el_subpacket_handler *new) | ||
303 | { | ||
304 | struct el_subpacket_handler *h = subpacket_handler_list; | ||
305 | |||
306 | if (h == NULL) subpacket_handler_list = new; | ||
307 | else { | ||
308 | for (; h->next != NULL; h = h->next) { | ||
309 | if (h->class == new->class || h == new) { | ||
310 | printk("Attempted to re-register " | ||
311 | "subpacket handler\n"); | ||
312 | return -EINVAL; | ||
313 | } | ||
314 | } | ||
315 | h->next = new; | ||
316 | } | ||
317 | new->next = NULL; | ||
318 | |||
319 | return 0; | ||
320 | } | ||
321 | |||
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c new file mode 100644 index 000000000000..64f59f2fcf5c --- /dev/null +++ b/arch/alpha/kernel/err_ev6.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/err_ev6.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) | ||
5 | * | ||
6 | * Error handling code supporting Alpha systems | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/sched.h> | ||
12 | |||
13 | #include <asm/io.h> | ||
14 | #include <asm/hwrpb.h> | ||
15 | #include <asm/smp.h> | ||
16 | #include <asm/err_common.h> | ||
17 | #include <asm/err_ev6.h> | ||
18 | |||
19 | #include "err_impl.h" | ||
20 | #include "proto.h" | ||
21 | |||
22 | static int | ||
23 | ev6_parse_ibox(u64 i_stat, int print) | ||
24 | { | ||
25 | int status = MCHK_DISPOSITION_REPORT; | ||
26 | |||
27 | #define EV6__I_STAT__PAR (1UL << 29) | ||
28 | #define EV6__I_STAT__ERRMASK (EV6__I_STAT__PAR) | ||
29 | |||
30 | if (!(i_stat & EV6__I_STAT__ERRMASK)) | ||
31 | return MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
32 | |||
33 | if (!print) | ||
34 | return status; | ||
35 | |||
36 | if (i_stat & EV6__I_STAT__PAR) | ||
37 | printk("%s Icache parity error\n", err_print_prefix); | ||
38 | |||
39 | return status; | ||
40 | } | ||
41 | |||
42 | static int | ||
43 | ev6_parse_mbox(u64 mm_stat, u64 d_stat, u64 c_stat, int print) | ||
44 | { | ||
45 | int status = MCHK_DISPOSITION_REPORT; | ||
46 | |||
47 | #define EV6__MM_STAT__DC_TAG_PERR (1UL << 10) | ||
48 | #define EV6__MM_STAT__ERRMASK (EV6__MM_STAT__DC_TAG_PERR) | ||
49 | #define EV6__D_STAT__TPERR_P0 (1UL << 0) | ||
50 | #define EV6__D_STAT__TPERR_P1 (1UL << 1) | ||
51 | #define EV6__D_STAT__ECC_ERR_ST (1UL << 2) | ||
52 | #define EV6__D_STAT__ECC_ERR_LD (1UL << 3) | ||
53 | #define EV6__D_STAT__SEO (1UL << 4) | ||
54 | #define EV6__D_STAT__ERRMASK (EV6__D_STAT__TPERR_P0 | \ | ||
55 | EV6__D_STAT__TPERR_P1 | \ | ||
56 | EV6__D_STAT__ECC_ERR_ST | \ | ||
57 | EV6__D_STAT__ECC_ERR_LD | \ | ||
58 | EV6__D_STAT__SEO) | ||
59 | |||
60 | if (!(d_stat & EV6__D_STAT__ERRMASK) && | ||
61 | !(mm_stat & EV6__MM_STAT__ERRMASK)) | ||
62 | return MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
63 | |||
64 | if (!print) | ||
65 | return status; | ||
66 | |||
67 | if (mm_stat & EV6__MM_STAT__DC_TAG_PERR) | ||
68 | printk("%s Dcache tag parity error on probe\n", | ||
69 | err_print_prefix); | ||
70 | if (d_stat & EV6__D_STAT__TPERR_P0) | ||
71 | printk("%s Dcache tag parity error - pipe 0\n", | ||
72 | err_print_prefix); | ||
73 | if (d_stat & EV6__D_STAT__TPERR_P1) | ||
74 | printk("%s Dcache tag parity error - pipe 1\n", | ||
75 | err_print_prefix); | ||
76 | if (d_stat & EV6__D_STAT__ECC_ERR_ST) | ||
77 | printk("%s ECC error occurred on a store\n", | ||
78 | err_print_prefix); | ||
79 | if (d_stat & EV6__D_STAT__ECC_ERR_LD) | ||
80 | printk("%s ECC error occurred on a %s load\n", | ||
81 | err_print_prefix, | ||
82 | c_stat ? "" : "speculative "); | ||
83 | if (d_stat & EV6__D_STAT__SEO) | ||
84 | printk("%s Dcache second error\n", err_print_prefix); | ||
85 | |||
86 | return status; | ||
87 | } | ||
88 | |||
89 | static int | ||
90 | ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, | ||
91 | u64 c_stat, u64 c_sts, int print) | ||
92 | { | ||
93 | char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN", | ||
94 | "MEMORY", "BCACHE", "DCACHE", | ||
95 | "BCACHE PROBE", "BCACHE PROBE" }; | ||
96 | char *streamname[] = { "D", "I" }; | ||
97 | char *bitsname[] = { "SINGLE", "DOUBLE" }; | ||
98 | int status = MCHK_DISPOSITION_REPORT; | ||
99 | int source = -1, stream = -1, bits = -1; | ||
100 | |||
101 | #define EV6__C_STAT__BC_PERR (0x01) | ||
102 | #define EV6__C_STAT__DC_PERR (0x02) | ||
103 | #define EV6__C_STAT__DSTREAM_MEM_ERR (0x03) | ||
104 | #define EV6__C_STAT__DSTREAM_BC_ERR (0x04) | ||
105 | #define EV6__C_STAT__DSTREAM_DC_ERR (0x05) | ||
106 | #define EV6__C_STAT__PROBE_BC_ERR0 (0x06) /* both 6 and 7 indicate... */ | ||
107 | #define EV6__C_STAT__PROBE_BC_ERR1 (0x07) /* ...probe bc error. */ | ||
108 | #define EV6__C_STAT__ISTREAM_MEM_ERR (0x0B) | ||
109 | #define EV6__C_STAT__ISTREAM_BC_ERR (0x0C) | ||
110 | #define EV6__C_STAT__DSTREAM_MEM_DBL (0x13) | ||
111 | #define EV6__C_STAT__DSTREAM_BC_DBL (0x14) | ||
112 | #define EV6__C_STAT__ISTREAM_MEM_DBL (0x1B) | ||
113 | #define EV6__C_STAT__ISTREAM_BC_DBL (0x1C) | ||
114 | #define EV6__C_STAT__SOURCE_MEMORY (0x03) | ||
115 | #define EV6__C_STAT__SOURCE_BCACHE (0x04) | ||
116 | #define EV6__C_STAT__SOURCE__S (0) | ||
117 | #define EV6__C_STAT__SOURCE__M (0x07) | ||
118 | #define EV6__C_STAT__ISTREAM__S (3) | ||
119 | #define EV6__C_STAT__ISTREAM__M (0x01) | ||
120 | #define EV6__C_STAT__DOUBLE__S (4) | ||
121 | #define EV6__C_STAT__DOUBLE__M (0x01) | ||
122 | #define EV6__C_STAT__ERRMASK (0x1F) | ||
123 | #define EV6__C_STS__SHARED (1 << 0) | ||
124 | #define EV6__C_STS__DIRTY (1 << 1) | ||
125 | #define EV6__C_STS__VALID (1 << 2) | ||
126 | #define EV6__C_STS__PARITY (1 << 3) | ||
127 | |||
128 | if (!(c_stat & EV6__C_STAT__ERRMASK)) | ||
129 | return MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
130 | |||
131 | if (!print) | ||
132 | return status; | ||
133 | |||
134 | source = EXTRACT(c_stat, EV6__C_STAT__SOURCE); | ||
135 | stream = EXTRACT(c_stat, EV6__C_STAT__ISTREAM); | ||
136 | bits = EXTRACT(c_stat, EV6__C_STAT__DOUBLE); | ||
137 | |||
138 | if (c_stat & EV6__C_STAT__BC_PERR) { | ||
139 | printk("%s Bcache tag parity error\n", err_print_prefix); | ||
140 | source = -1; | ||
141 | } | ||
142 | |||
143 | if (c_stat & EV6__C_STAT__DC_PERR) { | ||
144 | printk("%s Dcache tag parity error\n", err_print_prefix); | ||
145 | source = -1; | ||
146 | } | ||
147 | |||
148 | if (c_stat == EV6__C_STAT__PROBE_BC_ERR0 || | ||
149 | c_stat == EV6__C_STAT__PROBE_BC_ERR1) { | ||
150 | printk("%s Bcache single-bit error on a probe hit\n", | ||
151 | err_print_prefix); | ||
152 | source = -1; | ||
153 | } | ||
154 | |||
155 | if (source != -1) | ||
156 | printk("%s %s-STREAM %s-BIT ECC error from %s\n", | ||
157 | err_print_prefix, | ||
158 | streamname[stream], bitsname[bits], sourcename[source]); | ||
159 | |||
160 | printk("%s Address: 0x%016lx\n" | ||
161 | " Syndrome[upper.lower]: %02lx.%02lx\n", | ||
162 | err_print_prefix, | ||
163 | c_addr, | ||
164 | c2_syn, c1_syn); | ||
165 | |||
166 | if (source == EV6__C_STAT__SOURCE_MEMORY || | ||
167 | source == EV6__C_STAT__SOURCE_BCACHE) | ||
168 | printk("%s Block status: %s%s%s%s\n", | ||
169 | err_print_prefix, | ||
170 | (c_sts & EV6__C_STS__SHARED) ? "SHARED " : "", | ||
171 | (c_sts & EV6__C_STS__DIRTY) ? "DIRTY " : "", | ||
172 | (c_sts & EV6__C_STS__VALID) ? "VALID " : "", | ||
173 | (c_sts & EV6__C_STS__PARITY) ? "PARITY " : ""); | ||
174 | |||
175 | return status; | ||
176 | } | ||
177 | |||
178 | void | ||
179 | ev6_register_error_handlers(void) | ||
180 | { | ||
181 | /* None right now. */ | ||
182 | } | ||
183 | |||
184 | int | ||
185 | ev6_process_logout_frame(struct el_common *mchk_header, int print) | ||
186 | { | ||
187 | struct el_common_EV6_mcheck *ev6mchk = | ||
188 | (struct el_common_EV6_mcheck *)mchk_header; | ||
189 | int status = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
190 | |||
191 | status |= ev6_parse_ibox(ev6mchk->I_STAT, print); | ||
192 | status |= ev6_parse_mbox(ev6mchk->MM_STAT, ev6mchk->DC_STAT, | ||
193 | ev6mchk->C_STAT, print); | ||
194 | status |= ev6_parse_cbox(ev6mchk->C_ADDR, ev6mchk->DC1_SYNDROME, | ||
195 | ev6mchk->DC0_SYNDROME, ev6mchk->C_STAT, | ||
196 | ev6mchk->C_STS, print); | ||
197 | |||
198 | if (!print) | ||
199 | return status; | ||
200 | |||
201 | if (status != MCHK_DISPOSITION_DISMISS) { | ||
202 | char *saved_err_prefix = err_print_prefix; | ||
203 | |||
204 | /* | ||
205 | * Dump some additional information from the frame | ||
206 | */ | ||
207 | printk("%s EXC_ADDR: 0x%016lx IER_CM: 0x%016lx" | ||
208 | " ISUM: 0x%016lx\n" | ||
209 | " PAL_BASE: 0x%016lx I_CTL: 0x%016lx" | ||
210 | " PCTX: 0x%016lx\n", | ||
211 | err_print_prefix, | ||
212 | ev6mchk->EXC_ADDR, ev6mchk->IER_CM, ev6mchk->ISUM, | ||
213 | ev6mchk->PAL_BASE, ev6mchk->I_CTL, ev6mchk->PCTX); | ||
214 | |||
215 | if (status == MCHK_DISPOSITION_UNKNOWN_ERROR) { | ||
216 | printk("%s UNKNOWN error, frame follows:\n", | ||
217 | err_print_prefix); | ||
218 | } else { | ||
219 | /* had decode -- downgrade print level for frame */ | ||
220 | err_print_prefix = KERN_NOTICE; | ||
221 | } | ||
222 | |||
223 | mchk_dump_logout_frame(mchk_header); | ||
224 | |||
225 | err_print_prefix = saved_err_prefix; | ||
226 | } | ||
227 | |||
228 | return status; | ||
229 | } | ||
230 | |||
231 | void | ||
232 | ev6_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) | ||
233 | { | ||
234 | struct el_common *mchk_header = (struct el_common *)la_ptr; | ||
235 | |||
236 | /* | ||
237 | * Sync the processor | ||
238 | */ | ||
239 | mb(); | ||
240 | draina(); | ||
241 | |||
242 | /* | ||
243 | * Parse the logout frame without printing first. If the only error(s) | ||
244 | * found are have a disposition of "dismiss", then just dismiss them | ||
245 | * and don't print any message | ||
246 | */ | ||
247 | if (ev6_process_logout_frame(mchk_header, 0) != | ||
248 | MCHK_DISPOSITION_DISMISS) { | ||
249 | char *saved_err_prefix = err_print_prefix; | ||
250 | err_print_prefix = KERN_CRIT; | ||
251 | |||
252 | /* | ||
253 | * Either a nondismissable error was detected or no | ||
254 | * recognized error was detected in the logout frame | ||
255 | * -- report the error in either case | ||
256 | */ | ||
257 | printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d:\n", | ||
258 | err_print_prefix, | ||
259 | (vector == SCB_Q_PROCERR)?"Correctable":"Uncorrectable", | ||
260 | (unsigned int)vector, (int)smp_processor_id()); | ||
261 | |||
262 | ev6_process_logout_frame(mchk_header, 1); | ||
263 | dik_show_regs(regs, NULL); | ||
264 | |||
265 | err_print_prefix = saved_err_prefix; | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * Release the logout frame | ||
270 | */ | ||
271 | wrmces(0x7); | ||
272 | mb(); | ||
273 | } | ||
274 | |||
diff --git a/arch/alpha/kernel/err_ev7.c b/arch/alpha/kernel/err_ev7.c new file mode 100644 index 000000000000..bf52ba691957 --- /dev/null +++ b/arch/alpha/kernel/err_ev7.c | |||
@@ -0,0 +1,289 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/err_ev7.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) | ||
5 | * | ||
6 | * Error handling code supporting Alpha systems | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/sched.h> | ||
12 | |||
13 | #include <asm/io.h> | ||
14 | #include <asm/hwrpb.h> | ||
15 | #include <asm/smp.h> | ||
16 | #include <asm/err_common.h> | ||
17 | #include <asm/err_ev7.h> | ||
18 | |||
19 | #include "err_impl.h" | ||
20 | #include "proto.h" | ||
21 | |||
22 | struct ev7_lf_subpackets * | ||
23 | ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr, | ||
24 | struct ev7_lf_subpackets *lf_subpackets) | ||
25 | { | ||
26 | struct el_subpacket *subpacket; | ||
27 | int i; | ||
28 | |||
29 | /* | ||
30 | * A Marvel machine check frame is always packaged in an | ||
31 | * el_subpacket of class HEADER, type LOGOUT_FRAME. | ||
32 | */ | ||
33 | if (el_ptr->class != EL_CLASS__HEADER || | ||
34 | el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME) | ||
35 | return NULL; | ||
36 | |||
37 | /* | ||
38 | * It is a logout frame header. Look at the one subpacket. | ||
39 | */ | ||
40 | el_ptr = (struct el_subpacket *) | ||
41 | ((unsigned long)el_ptr + el_ptr->length); | ||
42 | |||
43 | /* | ||
44 | * It has to be class PAL, type LOGOUT_FRAME. | ||
45 | */ | ||
46 | if (el_ptr->class != EL_CLASS__PAL || | ||
47 | el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME) | ||
48 | return NULL; | ||
49 | |||
50 | lf_subpackets->logout = (struct ev7_pal_logout_subpacket *) | ||
51 | el_ptr->by_type.raw.data_start; | ||
52 | |||
53 | /* | ||
54 | * Process the subpackets. | ||
55 | */ | ||
56 | subpacket = (struct el_subpacket *) | ||
57 | ((unsigned long)el_ptr + el_ptr->length); | ||
58 | for (i = 0; | ||
59 | subpacket && i < lf_subpackets->logout->subpacket_count; | ||
60 | subpacket = (struct el_subpacket *) | ||
61 | ((unsigned long)subpacket + subpacket->length), i++) { | ||
62 | /* | ||
63 | * All subpackets should be class PAL. | ||
64 | */ | ||
65 | if (subpacket->class != EL_CLASS__PAL) { | ||
66 | printk("%s**UNEXPECTED SUBPACKET CLASS %d " | ||
67 | "IN LOGOUT FRAME (packet %d\n", | ||
68 | err_print_prefix, subpacket->class, i); | ||
69 | return NULL; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Remember the subpacket. | ||
74 | */ | ||
75 | switch(subpacket->type) { | ||
76 | case EL_TYPE__PAL__EV7_PROCESSOR: | ||
77 | lf_subpackets->ev7 = | ||
78 | (struct ev7_pal_processor_subpacket *) | ||
79 | subpacket->by_type.raw.data_start; | ||
80 | break; | ||
81 | |||
82 | case EL_TYPE__PAL__EV7_RBOX: | ||
83 | lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *) | ||
84 | subpacket->by_type.raw.data_start; | ||
85 | break; | ||
86 | |||
87 | case EL_TYPE__PAL__EV7_ZBOX: | ||
88 | lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *) | ||
89 | subpacket->by_type.raw.data_start; | ||
90 | break; | ||
91 | |||
92 | case EL_TYPE__PAL__EV7_IO: | ||
93 | lf_subpackets->io = (struct ev7_pal_io_subpacket *) | ||
94 | subpacket->by_type.raw.data_start; | ||
95 | break; | ||
96 | |||
97 | case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE: | ||
98 | case EL_TYPE__PAL__ENV__AIRMOVER_FAN: | ||
99 | case EL_TYPE__PAL__ENV__VOLTAGE: | ||
100 | case EL_TYPE__PAL__ENV__INTRUSION: | ||
101 | case EL_TYPE__PAL__ENV__POWER_SUPPLY: | ||
102 | case EL_TYPE__PAL__ENV__LAN: | ||
103 | case EL_TYPE__PAL__ENV__HOT_PLUG: | ||
104 | lf_subpackets->env[ev7_lf_env_index(subpacket->type)] = | ||
105 | (struct ev7_pal_environmental_subpacket *) | ||
106 | subpacket->by_type.raw.data_start; | ||
107 | break; | ||
108 | |||
109 | default: | ||
110 | /* | ||
111 | * Don't know what kind of frame this is. | ||
112 | */ | ||
113 | return NULL; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | return lf_subpackets; | ||
118 | } | ||
119 | |||
120 | void | ||
121 | ev7_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) | ||
122 | { | ||
123 | struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; | ||
124 | char *saved_err_prefix = err_print_prefix; | ||
125 | |||
126 | /* | ||
127 | * Sync the processor | ||
128 | */ | ||
129 | mb(); | ||
130 | draina(); | ||
131 | |||
132 | err_print_prefix = KERN_CRIT; | ||
133 | printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n", | ||
134 | err_print_prefix, | ||
135 | (vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable", | ||
136 | (unsigned int)vector, (int)smp_processor_id()); | ||
137 | el_process_subpacket(el_ptr); | ||
138 | err_print_prefix = saved_err_prefix; | ||
139 | |||
140 | /* | ||
141 | * Release the logout frame | ||
142 | */ | ||
143 | wrmces(0x7); | ||
144 | mb(); | ||
145 | } | ||
146 | |||
147 | static char *el_ev7_processor_subpacket_annotation[] = { | ||
148 | "Subpacket Header", "I_STAT", "DC_STAT", | ||
149 | "C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0", | ||
150 | "C_STAT", "C_STS", "MM_STAT", | ||
151 | "EXC_ADDR", "IER_CM", "ISUM", | ||
152 | "PAL_BASE", "I_CTL", "PROCESS_CONTEXT", | ||
153 | "CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL", | ||
154 | "CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL", | ||
155 | "BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS", | ||
156 | "BBOX_DAT_RMP", NULL | ||
157 | }; | ||
158 | |||
159 | static char *el_ev7_zbox_subpacket_annotation[] = { | ||
160 | "Subpacket Header", | ||
161 | "ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1", | ||
162 | "ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3", | ||
163 | "ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR", | ||
164 | "ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL", | ||
165 | "ZBOX(0): reserved / DIFT_ERR_STATUS", | ||
166 | "ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1", | ||
167 | "ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3", | ||
168 | "ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR", | ||
169 | "ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL", | ||
170 | "ZBOX(1): reserved / DIFT_ERR_STATUS", | ||
171 | "CBOX_CTL", "CBOX_STP_CTL", | ||
172 | "ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA", | ||
173 | "ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME", | ||
174 | NULL | ||
175 | }; | ||
176 | |||
177 | static char *el_ev7_rbox_subpacket_annotation[] = { | ||
178 | "Subpacket Header", "RBOX_CFG", "RBOX_N_CFG", | ||
179 | "RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG", | ||
180 | "RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR", | ||
181 | "RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR", | ||
182 | "RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL", | ||
183 | "RBOX_INTQ", "RBOX_INT", NULL | ||
184 | }; | ||
185 | |||
186 | static char *el_ev7_io_subpacket_annotation[] = { | ||
187 | "Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV", | ||
188 | "IO7_UPH", "HPI_CTL", "CRD_CTL", | ||
189 | "HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM", | ||
190 | "PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0", | ||
191 | "PO7_ERR_PKT1", "reserved", "reserved", | ||
192 | "PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT", | ||
193 | "PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR", | ||
194 | "DM CSR PH", "DM CSR PH", "DM CSR PH", | ||
195 | "DM CSR PH", "reserved", | ||
196 | "PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT", | ||
197 | "PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR", | ||
198 | "DM CSR PH", "DM CSR PH", "DM CSR PH", | ||
199 | "DM CSR PH", "reserved", | ||
200 | "PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT", | ||
201 | "PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR", | ||
202 | "DM CSR PH", "DM CSR PH", "DM CSR PH", | ||
203 | "DM CSR PH", "reserved", | ||
204 | "PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT", | ||
205 | "PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR", | ||
206 | "DM CSR PH", "DM CSR PH", "DM CSR PH", | ||
207 | "DM CSR PH", "reserved", | ||
208 | NULL | ||
209 | }; | ||
210 | |||
211 | static struct el_subpacket_annotation el_ev7_pal_annotations[] = { | ||
212 | SUBPACKET_ANNOTATION(EL_CLASS__PAL, | ||
213 | EL_TYPE__PAL__EV7_PROCESSOR, | ||
214 | 1, | ||
215 | "EV7 Processor Subpacket", | ||
216 | el_ev7_processor_subpacket_annotation), | ||
217 | SUBPACKET_ANNOTATION(EL_CLASS__PAL, | ||
218 | EL_TYPE__PAL__EV7_ZBOX, | ||
219 | 1, | ||
220 | "EV7 ZBOX Subpacket", | ||
221 | el_ev7_zbox_subpacket_annotation), | ||
222 | SUBPACKET_ANNOTATION(EL_CLASS__PAL, | ||
223 | EL_TYPE__PAL__EV7_RBOX, | ||
224 | 1, | ||
225 | "EV7 RBOX Subpacket", | ||
226 | el_ev7_rbox_subpacket_annotation), | ||
227 | SUBPACKET_ANNOTATION(EL_CLASS__PAL, | ||
228 | EL_TYPE__PAL__EV7_IO, | ||
229 | 1, | ||
230 | "EV7 IO Subpacket", | ||
231 | el_ev7_io_subpacket_annotation) | ||
232 | }; | ||
233 | |||
234 | static struct el_subpacket * | ||
235 | ev7_process_pal_subpacket(struct el_subpacket *header) | ||
236 | { | ||
237 | struct ev7_pal_subpacket *packet; | ||
238 | |||
239 | if (header->class != EL_CLASS__PAL) { | ||
240 | printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", | ||
241 | err_print_prefix, | ||
242 | header->class, header->type); | ||
243 | return NULL; | ||
244 | } | ||
245 | |||
246 | packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start; | ||
247 | |||
248 | switch(header->type) { | ||
249 | case EL_TYPE__PAL__LOGOUT_FRAME: | ||
250 | printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n", | ||
251 | err_print_prefix, | ||
252 | packet->by_type.logout.whami, | ||
253 | packet->by_type.logout.rbox_whami); | ||
254 | el_print_timestamp(&packet->by_type.logout.timestamp); | ||
255 | printk("%s EXC_ADDR: %016lx\n" | ||
256 | " HALT_CODE: %lx\n", | ||
257 | err_print_prefix, | ||
258 | packet->by_type.logout.exc_addr, | ||
259 | packet->by_type.logout.halt_code); | ||
260 | el_process_subpackets(header, | ||
261 | packet->by_type.logout.subpacket_count); | ||
262 | break; | ||
263 | default: | ||
264 | printk("%s ** PAL TYPE %d SUBPACKET\n", | ||
265 | err_print_prefix, | ||
266 | header->type); | ||
267 | el_annotate_subpacket(header); | ||
268 | break; | ||
269 | } | ||
270 | |||
271 | return (struct el_subpacket *)((unsigned long)header + header->length); | ||
272 | } | ||
273 | |||
274 | struct el_subpacket_handler ev7_pal_subpacket_handler = | ||
275 | SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket); | ||
276 | |||
277 | void | ||
278 | ev7_register_error_handlers(void) | ||
279 | { | ||
280 | int i; | ||
281 | |||
282 | for(i = 0; | ||
283 | i<sizeof(el_ev7_pal_annotations)/sizeof(el_ev7_pal_annotations[1]); | ||
284 | i++) { | ||
285 | cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]); | ||
286 | } | ||
287 | cdl_register_subpacket_handler(&ev7_pal_subpacket_handler); | ||
288 | } | ||
289 | |||
diff --git a/arch/alpha/kernel/err_impl.h b/arch/alpha/kernel/err_impl.h new file mode 100644 index 000000000000..64e9b73809fa --- /dev/null +++ b/arch/alpha/kernel/err_impl.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/err_impl.h | ||
3 | * | ||
4 | * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) | ||
5 | * | ||
6 | * Contains declarations and macros to support Alpha error handling | ||
7 | * implementations. | ||
8 | */ | ||
9 | |||
10 | union el_timestamp; | ||
11 | struct el_subpacket; | ||
12 | struct ev7_lf_subpackets; | ||
13 | |||
14 | struct el_subpacket_annotation { | ||
15 | struct el_subpacket_annotation *next; | ||
16 | u16 class; | ||
17 | u16 type; | ||
18 | u16 revision; | ||
19 | char *description; | ||
20 | char **annotation; | ||
21 | }; | ||
22 | #define SUBPACKET_ANNOTATION(c, t, r, d, a) {NULL, (c), (t), (r), (d), (a)} | ||
23 | |||
24 | struct el_subpacket_handler { | ||
25 | struct el_subpacket_handler *next; | ||
26 | u16 class; | ||
27 | struct el_subpacket *(*handler)(struct el_subpacket *); | ||
28 | }; | ||
29 | #define SUBPACKET_HANDLER_INIT(c, h) {NULL, (c), (h)} | ||
30 | |||
31 | /* | ||
32 | * Manipulate a field from a register given it's name. defines | ||
33 | * for the LSB (__S - shift count) and bitmask (__M) are required | ||
34 | * | ||
35 | * EXTRACT(u, f) - extracts the field and places it at bit position 0 | ||
36 | * GEN_MASK(f) - creates an in-position mask for the field | ||
37 | */ | ||
38 | #define EXTRACT(u, f) (((u) >> f##__S) & f##__M) | ||
39 | #define GEN_MASK(f) ((u64)f##__M << f##__S) | ||
40 | |||
41 | /* | ||
42 | * err_common.c | ||
43 | */ | ||
44 | extern char *err_print_prefix; | ||
45 | |||
46 | extern void mchk_dump_mem(void *, size_t, char **); | ||
47 | extern void mchk_dump_logout_frame(struct el_common *); | ||
48 | extern void el_print_timestamp(union el_timestamp *); | ||
49 | extern void el_process_subpackets(struct el_subpacket *, int); | ||
50 | extern struct el_subpacket *el_process_subpacket(struct el_subpacket *); | ||
51 | extern void el_annotate_subpacket(struct el_subpacket *); | ||
52 | extern void cdl_check_console_data_log(void); | ||
53 | extern int cdl_register_subpacket_annotation(struct el_subpacket_annotation *); | ||
54 | extern int cdl_register_subpacket_handler(struct el_subpacket_handler *); | ||
55 | |||
56 | /* | ||
57 | * err_ev7.c | ||
58 | */ | ||
59 | extern struct ev7_lf_subpackets * | ||
60 | ev7_collect_logout_frame_subpackets(struct el_subpacket *, | ||
61 | struct ev7_lf_subpackets *); | ||
62 | extern void ev7_register_error_handlers(void); | ||
63 | extern void ev7_machine_check(u64, u64, struct pt_regs *); | ||
64 | |||
65 | /* | ||
66 | * err_ev6.c | ||
67 | */ | ||
68 | extern void ev6_register_error_handlers(void); | ||
69 | extern int ev6_process_logout_frame(struct el_common *, int); | ||
70 | extern void ev6_machine_check(u64, u64, struct pt_regs *); | ||
71 | |||
72 | /* | ||
73 | * err_marvel.c | ||
74 | */ | ||
75 | extern void marvel_machine_check(u64, u64, struct pt_regs *); | ||
76 | extern void marvel_register_error_handlers(void); | ||
77 | |||
78 | /* | ||
79 | * err_titan.c | ||
80 | */ | ||
81 | extern int titan_process_logout_frame(struct el_common *, int); | ||
82 | extern void titan_machine_check(u64, u64, struct pt_regs *); | ||
83 | extern void titan_register_error_handlers(void); | ||
84 | extern int privateer_process_logout_frame(struct el_common *, int); | ||
85 | extern void privateer_machine_check(u64, u64, struct pt_regs *); | ||
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c new file mode 100644 index 000000000000..70b38b1d2af3 --- /dev/null +++ b/arch/alpha/kernel/err_marvel.c | |||
@@ -0,0 +1,1159 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/err_marvel.c | ||
3 | * | ||
4 | * Copyright (C) 2001 Jeff Wiedemeier (Compaq Computer Corporation) | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <linux/sched.h> | ||
11 | |||
12 | #include <asm/io.h> | ||
13 | #include <asm/console.h> | ||
14 | #include <asm/core_marvel.h> | ||
15 | #include <asm/hwrpb.h> | ||
16 | #include <asm/smp.h> | ||
17 | #include <asm/err_common.h> | ||
18 | #include <asm/err_ev7.h> | ||
19 | |||
20 | #include "err_impl.h" | ||
21 | #include "proto.h" | ||
22 | |||
23 | static void | ||
24 | marvel_print_680_frame(struct ev7_lf_subpackets *lf_subpackets) | ||
25 | { | ||
26 | #ifdef CONFIG_VERBOSE_MCHECK | ||
27 | struct ev7_pal_environmental_subpacket *env; | ||
28 | struct { int type; char *name; } ev_packets[] = { | ||
29 | { EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE, | ||
30 | "Ambient Temperature" }, | ||
31 | { EL_TYPE__PAL__ENV__AIRMOVER_FAN, | ||
32 | "AirMover / Fan" }, | ||
33 | { EL_TYPE__PAL__ENV__VOLTAGE, | ||
34 | "Voltage" }, | ||
35 | { EL_TYPE__PAL__ENV__INTRUSION, | ||
36 | "Intrusion" }, | ||
37 | { EL_TYPE__PAL__ENV__POWER_SUPPLY, | ||
38 | "Power Supply" }, | ||
39 | { EL_TYPE__PAL__ENV__LAN, | ||
40 | "LAN" }, | ||
41 | { EL_TYPE__PAL__ENV__HOT_PLUG, | ||
42 | "Hot Plug" }, | ||
43 | { 0, NULL } | ||
44 | }; | ||
45 | int i; | ||
46 | |||
47 | for (i = 0; ev_packets[i].type != 0; i++) { | ||
48 | env = lf_subpackets->env[ev7_lf_env_index(ev_packets[i].type)]; | ||
49 | if (!env) | ||
50 | continue; | ||
51 | |||
52 | printk("%s**%s event (cabinet %d, drawer %d)\n", | ||
53 | err_print_prefix, | ||
54 | ev_packets[i].name, | ||
55 | env->cabinet, | ||
56 | env->drawer); | ||
57 | printk("%s Module Type: 0x%x - Unit ID 0x%x - " | ||
58 | "Condition 0x%x\n", | ||
59 | err_print_prefix, | ||
60 | env->module_type, | ||
61 | env->unit_id, | ||
62 | env->condition); | ||
63 | } | ||
64 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
65 | } | ||
66 | |||
67 | static int | ||
68 | marvel_process_680_frame(struct ev7_lf_subpackets *lf_subpackets, int print) | ||
69 | { | ||
70 | int status = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
71 | int i; | ||
72 | |||
73 | for (i = ev7_lf_env_index(EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE); | ||
74 | i <= ev7_lf_env_index(EL_TYPE__PAL__ENV__HOT_PLUG); | ||
75 | i++) { | ||
76 | if (lf_subpackets->env[i]) | ||
77 | status = MCHK_DISPOSITION_REPORT; | ||
78 | } | ||
79 | |||
80 | if (print) | ||
81 | marvel_print_680_frame(lf_subpackets); | ||
82 | |||
83 | return status; | ||
84 | } | ||
85 | |||
86 | #ifdef CONFIG_VERBOSE_MCHECK | ||
87 | |||
88 | static void | ||
89 | marvel_print_err_cyc(u64 err_cyc) | ||
90 | { | ||
91 | static char *packet_desc[] = { | ||
92 | "No Error", | ||
93 | "UNKNOWN", | ||
94 | "1 cycle (1 or 2 flit packet)", | ||
95 | "2 cycles (3 flit packet)", | ||
96 | "9 cycles (18 flit packet)", | ||
97 | "10 cycles (19 flit packet)", | ||
98 | "UNKNOWN", | ||
99 | "UNKNOWN", | ||
100 | "UNKNOWN" | ||
101 | }; | ||
102 | |||
103 | #define IO7__ERR_CYC__ODD_FLT (1UL << 0) | ||
104 | #define IO7__ERR_CYC__EVN_FLT (1UL << 1) | ||
105 | #define IO7__ERR_CYC__PACKET__S (6) | ||
106 | #define IO7__ERR_CYC__PACKET__M (0x7) | ||
107 | #define IO7__ERR_CYC__LOC (1UL << 5) | ||
108 | #define IO7__ERR_CYC__CYCLE__S (2) | ||
109 | #define IO7__ERR_CYC__CYCLE__M (0x7) | ||
110 | |||
111 | printk("%s Packet In Error: %s\n" | ||
112 | "%s Error in %s, cycle %ld%s%s\n", | ||
113 | err_print_prefix, | ||
114 | packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], | ||
115 | err_print_prefix, | ||
116 | (err_cyc & IO7__ERR_CYC__LOC) ? "DATA" : "HEADER", | ||
117 | EXTRACT(err_cyc, IO7__ERR_CYC__CYCLE), | ||
118 | (err_cyc & IO7__ERR_CYC__ODD_FLT) ? " [ODD Flit]": "", | ||
119 | (err_cyc & IO7__ERR_CYC__EVN_FLT) ? " [Even Flit]": ""); | ||
120 | } | ||
121 | |||
122 | static void | ||
123 | marvel_print_po7_crrct_sym(u64 crrct_sym) | ||
124 | { | ||
125 | #define IO7__PO7_CRRCT_SYM__SYN__S (0) | ||
126 | #define IO7__PO7_CRRCT_SYM__SYN__M (0x7f) | ||
127 | #define IO7__PO7_CRRCT_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT + EVN_FLT */ | ||
128 | #define IO7__PO7_CRRCT_SYM__ERR_CYC__M (0x1ff) | ||
129 | |||
130 | |||
131 | printk("%s Correctable Error Symptoms:\n" | ||
132 | "%s Syndrome: 0x%lx\n", | ||
133 | err_print_prefix, | ||
134 | err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN)); | ||
135 | marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC)); | ||
136 | } | ||
137 | |||
138 | static void | ||
139 | marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask) | ||
140 | { | ||
141 | static char *clk_names[] = { "_h[0]", "_h[1]", "_n[0]", "_n[1]" }; | ||
142 | static char *clk_decode[] = { | ||
143 | "No Error", | ||
144 | "One extra rising edge", | ||
145 | "Two extra rising edges", | ||
146 | "Lost one clock" | ||
147 | }; | ||
148 | static char *port_names[] = { "Port 0", "Port 1", | ||
149 | "Port 2", "Port 3", | ||
150 | "Unknown Port", "Unknown Port", | ||
151 | "Unknown Port", "Port 7" }; | ||
152 | int scratch, i; | ||
153 | |||
154 | #define IO7__PO7_UNCRR_SYM__SYN__S (0) | ||
155 | #define IO7__PO7_UNCRR_SYM__SYN__M (0x7f) | ||
156 | #define IO7__PO7_UNCRR_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT... */ | ||
157 | #define IO7__PO7_UNCRR_SYM__ERR_CYC__M (0x1ff) /* ... + EVN_FLT */ | ||
158 | #define IO7__PO7_UNCRR_SYM__CLK__S (16) | ||
159 | #define IO7__PO7_UNCRR_SYM__CLK__M (0xff) | ||
160 | #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ (1UL << 24) | ||
161 | #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO (1UL << 25) | ||
162 | #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO (1UL << 26) | ||
163 | #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK (1UL << 27) | ||
164 | #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK (1UL << 28) | ||
165 | #define IO7__PO7_UNCRR_SYM__OVF__READIO (1UL << 29) | ||
166 | #define IO7__PO7_UNCRR_SYM__OVF__WRITEIO (1UL << 30) | ||
167 | #define IO7__PO7_UNCRR_SYM__OVF__FWD (1UL << 31) | ||
168 | #define IO7__PO7_UNCRR_SYM__VICTIM_SP__S (32) | ||
169 | #define IO7__PO7_UNCRR_SYM__VICTIM_SP__M (0xff) | ||
170 | #define IO7__PO7_UNCRR_SYM__DETECT_SP__S (40) | ||
171 | #define IO7__PO7_UNCRR_SYM__DETECT_SP__M (0xff) | ||
172 | #define IO7__PO7_UNCRR_SYM__STRV_VTR__S (48) | ||
173 | #define IO7__PO7_UNCRR_SYM__STRV_VTR__M (0x3ff) | ||
174 | |||
175 | #define IO7__STRV_VTR__LSI__INTX__S (0) | ||
176 | #define IO7__STRV_VTR__LSI__INTX__M (0x3) | ||
177 | #define IO7__STRV_VTR__LSI__SLOT__S (2) | ||
178 | #define IO7__STRV_VTR__LSI__SLOT__M (0x7) | ||
179 | #define IO7__STRV_VTR__LSI__BUS__S (5) | ||
180 | #define IO7__STRV_VTR__LSI__BUS__M (0x3) | ||
181 | #define IO7__STRV_VTR__MSI__INTNUM__S (0) | ||
182 | #define IO7__STRV_VTR__MSI__INTNUM__M (0x1ff) | ||
183 | #define IO7__STRV_VTR__IS_MSI (1UL << 9) | ||
184 | |||
185 | printk("%s Uncorrectable Error Symptoms:\n", err_print_prefix); | ||
186 | uncrr_sym &= valid_mask; | ||
187 | |||
188 | if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN)) | ||
189 | printk("%s Syndrome: 0x%lx\n", | ||
190 | err_print_prefix, | ||
191 | EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN)); | ||
192 | |||
193 | if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__ERR_CYC)) | ||
194 | marvel_print_err_cyc(EXTRACT(uncrr_sym, | ||
195 | IO7__PO7_UNCRR_SYM__ERR_CYC)); | ||
196 | |||
197 | scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__CLK); | ||
198 | for (i = 0; i < 4; i++, scratch >>= 2) { | ||
199 | if (scratch & 0x3) | ||
200 | printk("%s Clock %s: %s\n", | ||
201 | err_print_prefix, | ||
202 | clk_names[i], clk_decode[scratch & 0x3]); | ||
203 | } | ||
204 | |||
205 | if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ) | ||
206 | printk("%s REQ Credit Timeout or Overflow\n", | ||
207 | err_print_prefix); | ||
208 | if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO) | ||
209 | printk("%s RIO Credit Timeout or Overflow\n", | ||
210 | err_print_prefix); | ||
211 | if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO) | ||
212 | printk("%s WIO Credit Timeout or Overflow\n", | ||
213 | err_print_prefix); | ||
214 | if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK) | ||
215 | printk("%s BLK Credit Timeout or Overflow\n", | ||
216 | err_print_prefix); | ||
217 | if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK) | ||
218 | printk("%s NBK Credit Timeout or Overflow\n", | ||
219 | err_print_prefix); | ||
220 | |||
221 | if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__READIO) | ||
222 | printk("%s Read I/O Buffer Overflow\n", | ||
223 | err_print_prefix); | ||
224 | if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__WRITEIO) | ||
225 | printk("%s Write I/O Buffer Overflow\n", | ||
226 | err_print_prefix); | ||
227 | if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__FWD) | ||
228 | printk("%s FWD Buffer Overflow\n", | ||
229 | err_print_prefix); | ||
230 | |||
231 | if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__VICTIM_SP))) { | ||
232 | int lost = scratch & (1UL << 4); | ||
233 | scratch &= ~lost; | ||
234 | for (i = 0; i < 8; i++, scratch >>= 1) { | ||
235 | if (!(scratch & 1)) | ||
236 | continue; | ||
237 | printk("%s Error Response sent to %s", | ||
238 | err_print_prefix, port_names[i]); | ||
239 | } | ||
240 | if (lost) | ||
241 | printk("%s Lost Error sent somewhere else\n", | ||
242 | err_print_prefix); | ||
243 | } | ||
244 | |||
245 | if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__DETECT_SP))) { | ||
246 | for (i = 0; i < 8; i++, scratch >>= 1) { | ||
247 | if (!(scratch & 1)) | ||
248 | continue; | ||
249 | printk("%s Error Reported by %s", | ||
250 | err_print_prefix, port_names[i]); | ||
251 | } | ||
252 | } | ||
253 | |||
254 | if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__STRV_VTR)) { | ||
255 | char starvation_message[80]; | ||
256 | |||
257 | scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__STRV_VTR); | ||
258 | if (scratch & IO7__STRV_VTR__IS_MSI) | ||
259 | sprintf(starvation_message, | ||
260 | "MSI Interrupt 0x%x", | ||
261 | EXTRACT(scratch, IO7__STRV_VTR__MSI__INTNUM)); | ||
262 | else | ||
263 | sprintf(starvation_message, | ||
264 | "LSI INT%c for Bus:Slot (%d:%d)\n", | ||
265 | 'A' + EXTRACT(scratch, | ||
266 | IO7__STRV_VTR__LSI__INTX), | ||
267 | EXTRACT(scratch, IO7__STRV_VTR__LSI__BUS), | ||
268 | EXTRACT(scratch, IO7__STRV_VTR__LSI__SLOT)); | ||
269 | |||
270 | printk("%s Starvation Int Trigger By: %s\n", | ||
271 | err_print_prefix, starvation_message); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | static void | ||
276 | marvel_print_po7_ugbge_sym(u64 ugbge_sym) | ||
277 | { | ||
278 | char opcode_str[10]; | ||
279 | |||
280 | #define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__S (6) | ||
281 | #define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__M (0xfffffffful) | ||
282 | #define IO7__PO7_UGBGE_SYM__UPH_OPCODE__S (40) | ||
283 | #define IO7__PO7_UGBGE_SYM__UPH_OPCODE__M (0xff) | ||
284 | #define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__S (48) | ||
285 | #define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__M (0xf) | ||
286 | #define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__S (52) | ||
287 | #define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__M (0x7ff) | ||
288 | #define IO7__PO7_UGBGE_SYM__VALID (1UL << 63) | ||
289 | |||
290 | if (!(ugbge_sym & IO7__PO7_UGBGE_SYM__VALID)) | ||
291 | return; | ||
292 | |||
293 | switch(EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) { | ||
294 | case 0x51: | ||
295 | sprintf(opcode_str, "Wr32"); | ||
296 | break; | ||
297 | case 0x50: | ||
298 | sprintf(opcode_str, "WrQW"); | ||
299 | break; | ||
300 | case 0x54: | ||
301 | sprintf(opcode_str, "WrIPR"); | ||
302 | break; | ||
303 | case 0xD8: | ||
304 | sprintf(opcode_str, "Victim"); | ||
305 | break; | ||
306 | case 0xC5: | ||
307 | sprintf(opcode_str, "BlkIO"); | ||
308 | break; | ||
309 | default: | ||
310 | sprintf(opcode_str, "0x%lx\n", | ||
311 | EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)); | ||
312 | break; | ||
313 | } | ||
314 | |||
315 | printk("%s Up Hose Garbage Symptom:\n" | ||
316 | "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n", | ||
317 | err_print_prefix, | ||
318 | err_print_prefix, | ||
319 | EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), | ||
320 | EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_DEST_PID), | ||
321 | opcode_str); | ||
322 | |||
323 | if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) | ||
324 | printk("%s Packet Offset 0x%08lx\n", | ||
325 | err_print_prefix, | ||
326 | EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF)); | ||
327 | } | ||
328 | |||
329 | static void | ||
330 | marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io) | ||
331 | { | ||
332 | u64 uncrr_sym_valid = 0; | ||
333 | |||
334 | #define IO7__PO7_ERRSUM__CR_SBE (1UL << 32) | ||
335 | #define IO7__PO7_ERRSUM__CR_SBE2 (1UL << 33) | ||
336 | #define IO7__PO7_ERRSUM__CR_PIO_WBYTE (1UL << 34) | ||
337 | #define IO7__PO7_ERRSUM__CR_CSR_NXM (1UL << 35) | ||
338 | #define IO7__PO7_ERRSUM__CR_RPID_ACV (1UL << 36) | ||
339 | #define IO7__PO7_ERRSUM__CR_RSP_NXM (1UL << 37) | ||
340 | #define IO7__PO7_ERRSUM__CR_ERR_RESP (1UL << 38) | ||
341 | #define IO7__PO7_ERRSUM__CR_CLK_DERR (1UL << 39) | ||
342 | #define IO7__PO7_ERRSUM__CR_DAT_DBE (1UL << 40) | ||
343 | #define IO7__PO7_ERRSUM__CR_DAT_GRBG (1UL << 41) | ||
344 | #define IO7__PO7_ERRSUM__MAF_TO (1UL << 42) | ||
345 | #define IO7__PO7_ERRSUM__UGBGE (1UL << 43) | ||
346 | #define IO7__PO7_ERRSUM__UN_MAF_LOST (1UL << 44) | ||
347 | #define IO7__PO7_ERRSUM__UN_PKT_OVF (1UL << 45) | ||
348 | #define IO7__PO7_ERRSUM__UN_CDT_OVF (1UL << 46) | ||
349 | #define IO7__PO7_ERRSUM__UN_DEALLOC (1UL << 47) | ||
350 | #define IO7__PO7_ERRSUM__BH_CDT_TO (1UL << 51) | ||
351 | #define IO7__PO7_ERRSUM__BH_CLK_HDR (1UL << 52) | ||
352 | #define IO7__PO7_ERRSUM__BH_DBE_HDR (1UL << 53) | ||
353 | #define IO7__PO7_ERRSUM__BH_GBG_HDR (1UL << 54) | ||
354 | #define IO7__PO7_ERRSUM__BH_BAD_CMD (1UL << 55) | ||
355 | #define IO7__PO7_ERRSUM__HLT_INT (1UL << 56) | ||
356 | #define IO7__PO7_ERRSUM__HP_INT (1UL << 57) | ||
357 | #define IO7__PO7_ERRSUM__CRD_INT (1UL << 58) | ||
358 | #define IO7__PO7_ERRSUM__STV_INT (1UL << 59) | ||
359 | #define IO7__PO7_ERRSUM__HRD_INT (1UL << 60) | ||
360 | #define IO7__PO7_ERRSUM__BH_SUM (1UL << 61) | ||
361 | #define IO7__PO7_ERRSUM__ERR_LST (1UL << 62) | ||
362 | #define IO7__PO7_ERRSUM__ERR_VALID (1UL << 63) | ||
363 | |||
364 | #define IO7__PO7_ERRSUM__ERR_MASK (IO7__PO7_ERRSUM__ERR_VALID | \ | ||
365 | IO7__PO7_ERRSUM__CR_SBE) | ||
366 | |||
367 | /* | ||
368 | * Single bit errors aren't covered by ERR_VALID. | ||
369 | */ | ||
370 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE) { | ||
371 | printk("%s %sSingle Bit Error(s) detected/corrected\n", | ||
372 | err_print_prefix, | ||
373 | (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE2) | ||
374 | ? "Multiple " : ""); | ||
375 | marvel_print_po7_crrct_sym(io->po7_crrct_sym); | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Neither are the interrupt status bits | ||
380 | */ | ||
381 | if (io->po7_error_sum & IO7__PO7_ERRSUM__HLT_INT) | ||
382 | printk("%s Halt Interrupt posted", err_print_prefix); | ||
383 | if (io->po7_error_sum & IO7__PO7_ERRSUM__HP_INT) { | ||
384 | printk("%s Hot Plug Event Interrupt posted", | ||
385 | err_print_prefix); | ||
386 | uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); | ||
387 | } | ||
388 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CRD_INT) | ||
389 | printk("%s Correctable Error Interrupt posted", | ||
390 | err_print_prefix); | ||
391 | if (io->po7_error_sum & IO7__PO7_ERRSUM__STV_INT) { | ||
392 | printk("%s Starvation Interrupt posted", err_print_prefix); | ||
393 | uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__STRV_VTR); | ||
394 | } | ||
395 | if (io->po7_error_sum & IO7__PO7_ERRSUM__HRD_INT) { | ||
396 | printk("%s Hard Error Interrupt posted", err_print_prefix); | ||
397 | uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Everything else is valid only with ERR_VALID, so skip to the end | ||
402 | * (uncrr_sym check) unless ERR_VALID is set. | ||
403 | */ | ||
404 | if (!(io->po7_error_sum & IO7__PO7_ERRSUM__ERR_VALID)) | ||
405 | goto check_uncrr_sym; | ||
406 | |||
407 | /* | ||
408 | * Since ERR_VALID is set, VICTIM_SP in uncrr_sym is valid. | ||
409 | * For bits [29:0] to also be valid, the following bits must | ||
410 | * not be set: | ||
411 | * CR_PIO_WBYTE CR_CSR_NXM CR_RSP_NXM | ||
412 | * CR_ERR_RESP MAF_TO | ||
413 | */ | ||
414 | uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__VICTIM_SP); | ||
415 | if (!(io->po7_error_sum & (IO7__PO7_ERRSUM__CR_PIO_WBYTE | | ||
416 | IO7__PO7_ERRSUM__CR_CSR_NXM | | ||
417 | IO7__PO7_ERRSUM__CR_RSP_NXM | | ||
418 | IO7__PO7_ERRSUM__CR_ERR_RESP | | ||
419 | IO7__PO7_ERRSUM__MAF_TO))) | ||
420 | uncrr_sym_valid |= 0x3ffffffful; | ||
421 | |||
422 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_PIO_WBYTE) | ||
423 | printk("%s Write byte into IO7 CSR\n", err_print_prefix); | ||
424 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CSR_NXM) | ||
425 | printk("%s PIO to non-existent CSR\n", err_print_prefix); | ||
426 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RPID_ACV) | ||
427 | printk("%s Bus Requester PID (Access Violation)\n", | ||
428 | err_print_prefix); | ||
429 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RSP_NXM) | ||
430 | printk("%s Received NXM response from EV7\n", | ||
431 | err_print_prefix); | ||
432 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_ERR_RESP) | ||
433 | printk("%s Received ERROR RESPONSE\n", err_print_prefix); | ||
434 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CLK_DERR) | ||
435 | printk("%s Clock error on data flit\n", err_print_prefix); | ||
436 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_DBE) | ||
437 | printk("%s Double Bit Error Data Error Detected\n", | ||
438 | err_print_prefix); | ||
439 | if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_GRBG) | ||
440 | printk("%s Garbage Encoding Detected on the data\n", | ||
441 | err_print_prefix); | ||
442 | if (io->po7_error_sum & IO7__PO7_ERRSUM__UGBGE) { | ||
443 | printk("%s Garbage Encoding sent up hose\n", | ||
444 | err_print_prefix); | ||
445 | marvel_print_po7_ugbge_sym(io->po7_ugbge_sym); | ||
446 | } | ||
447 | if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_MAF_LOST) | ||
448 | printk("%s Orphan response (unexpected response)\n", | ||
449 | err_print_prefix); | ||
450 | if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_PKT_OVF) | ||
451 | printk("%s Down hose packet overflow\n", err_print_prefix); | ||
452 | if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_CDT_OVF) | ||
453 | printk("%s Down hose credit overflow\n", err_print_prefix); | ||
454 | if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_DEALLOC) | ||
455 | printk("%s Unexpected or bad dealloc field\n", | ||
456 | err_print_prefix); | ||
457 | |||
458 | /* | ||
459 | * The black hole events. | ||
460 | */ | ||
461 | if (io->po7_error_sum & IO7__PO7_ERRSUM__MAF_TO) | ||
462 | printk("%s BLACK HOLE: Timeout for all responses\n", | ||
463 | err_print_prefix); | ||
464 | if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CDT_TO) | ||
465 | printk("%s BLACK HOLE: Credit Timeout\n", err_print_prefix); | ||
466 | if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CLK_HDR) | ||
467 | printk("%s BLACK HOLE: Clock check on header\n", | ||
468 | err_print_prefix); | ||
469 | if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_DBE_HDR) | ||
470 | printk("%s BLACK HOLE: Uncorrectable Error on header\n", | ||
471 | err_print_prefix); | ||
472 | if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_GBG_HDR) | ||
473 | printk("%s BLACK HOLE: Garbage on header\n", | ||
474 | err_print_prefix); | ||
475 | if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_BAD_CMD) | ||
476 | printk("%s BLACK HOLE: Bad EV7 command\n", | ||
477 | err_print_prefix); | ||
478 | |||
479 | if (io->po7_error_sum & IO7__PO7_ERRSUM__ERR_LST) | ||
480 | printk("%s Lost Error\n", err_print_prefix); | ||
481 | |||
482 | printk("%s Failing Packet:\n" | ||
483 | "%s Cycle 1: %016lx\n" | ||
484 | "%s Cycle 2: %016lx\n", | ||
485 | err_print_prefix, | ||
486 | err_print_prefix, io->po7_err_pkt0, | ||
487 | err_print_prefix, io->po7_err_pkt1); | ||
488 | /* | ||
489 | * If there are any valid bits in UNCRR sym for this err, | ||
490 | * print UNCRR_SYM as well. | ||
491 | */ | ||
492 | check_uncrr_sym: | ||
493 | if (uncrr_sym_valid) | ||
494 | marvel_print_po7_uncrr_sym(io->po7_uncrr_sym, uncrr_sym_valid); | ||
495 | } | ||
496 | |||
497 | static void | ||
498 | marvel_print_pox_tlb_err(u64 tlb_err) | ||
499 | { | ||
500 | static char *tlb_errors[] = { | ||
501 | "No Error", | ||
502 | "North Port Signaled Error fetching TLB entry", | ||
503 | "PTE invalid or UCC or GBG error on this entry", | ||
504 | "Address did not hit any DMA window" | ||
505 | }; | ||
506 | |||
507 | #define IO7__POX_TLBERR__ERR_VALID (1UL << 63) | ||
508 | #define IO7__POX_TLBERR__ERRCODE__S (0) | ||
509 | #define IO7__POX_TLBERR__ERRCODE__M (0x3) | ||
510 | #define IO7__POX_TLBERR__ERR_TLB_PTR__S (3) | ||
511 | #define IO7__POX_TLBERR__ERR_TLB_PTR__M (0x7) | ||
512 | #define IO7__POX_TLBERR__FADDR__S (6) | ||
513 | #define IO7__POX_TLBERR__FADDR__M (0x3fffffffffful) | ||
514 | |||
515 | if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID)) | ||
516 | return; | ||
517 | |||
518 | printk("%s TLB Error on index 0x%lx:\n" | ||
519 | "%s - %s\n" | ||
520 | "%s - Addr: 0x%016lx\n", | ||
521 | err_print_prefix, | ||
522 | EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR), | ||
523 | err_print_prefix, | ||
524 | tlb_errors[EXTRACT(tlb_err, IO7__POX_TLBERR__ERRCODE)], | ||
525 | err_print_prefix, | ||
526 | EXTRACT(tlb_err, IO7__POX_TLBERR__FADDR) << 6); | ||
527 | } | ||
528 | |||
529 | static void | ||
530 | marvel_print_pox_spl_cmplt(u64 spl_cmplt) | ||
531 | { | ||
532 | char message[80]; | ||
533 | |||
534 | #define IO7__POX_SPLCMPLT__MESSAGE__S (0) | ||
535 | #define IO7__POX_SPLCMPLT__MESSAGE__M (0x0fffffffful) | ||
536 | #define IO7__POX_SPLCMPLT__SOURCE_BUS__S (40) | ||
537 | #define IO7__POX_SPLCMPLT__SOURCE_BUS__M (0xfful) | ||
538 | #define IO7__POX_SPLCMPLT__SOURCE_DEV__S (35) | ||
539 | #define IO7__POX_SPLCMPLT__SOURCE_DEV__M (0x1ful) | ||
540 | #define IO7__POX_SPLCMPLT__SOURCE_FUNC__S (32) | ||
541 | #define IO7__POX_SPLCMPLT__SOURCE_FUNC__M (0x07ul) | ||
542 | |||
543 | #define IO7__POX_SPLCMPLT__MSG_CLASS__S (28) | ||
544 | #define IO7__POX_SPLCMPLT__MSG_CLASS__M (0xf) | ||
545 | #define IO7__POX_SPLCMPLT__MSG_INDEX__S (20) | ||
546 | #define IO7__POX_SPLCMPLT__MSG_INDEX__M (0xff) | ||
547 | #define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__S (20) | ||
548 | #define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__M (0xfff) | ||
549 | #define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__S (12) | ||
550 | #define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__M (0x7f) | ||
551 | #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__S (0) | ||
552 | #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) | ||
553 | |||
554 | printk("%s Split Completion Error:\n" | ||
555 | "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n", | ||
556 | err_print_prefix, | ||
557 | err_print_prefix, | ||
558 | EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), | ||
559 | EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_DEV), | ||
560 | EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_FUNC)); | ||
561 | |||
562 | switch(EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MSG_CLASSINDEX)) { | ||
563 | case 0x000: | ||
564 | sprintf(message, "Normal completion"); | ||
565 | break; | ||
566 | case 0x100: | ||
567 | sprintf(message, "Bridge - Master Abort"); | ||
568 | break; | ||
569 | case 0x101: | ||
570 | sprintf(message, "Bridge - Target Abort"); | ||
571 | break; | ||
572 | case 0x102: | ||
573 | sprintf(message, "Bridge - Uncorrectable Write Data Error"); | ||
574 | break; | ||
575 | case 0x200: | ||
576 | sprintf(message, "Byte Count Out of Range"); | ||
577 | break; | ||
578 | case 0x201: | ||
579 | sprintf(message, "Uncorrectable Split Write Data Error"); | ||
580 | break; | ||
581 | default: | ||
582 | sprintf(message, "%08lx\n", | ||
583 | EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE)); | ||
584 | break; | ||
585 | } | ||
586 | printk("%s Message: %s\n", err_print_prefix, message); | ||
587 | } | ||
588 | |||
589 | static void | ||
590 | marvel_print_pox_trans_sum(u64 trans_sum) | ||
591 | { | ||
592 | char *pcix_cmd[] = { "Interrupt Acknowledge", | ||
593 | "Special Cycle", | ||
594 | "I/O Read", | ||
595 | "I/O Write", | ||
596 | "Reserved", | ||
597 | "Reserved / Device ID Message", | ||
598 | "Memory Read", | ||
599 | "Memory Write", | ||
600 | "Reserved / Alias to Memory Read Block", | ||
601 | "Reserved / Alias to Memory Write Block", | ||
602 | "Configuration Read", | ||
603 | "Configuration Write", | ||
604 | "Memory Read Multiple / Split Completion", | ||
605 | "Dual Address Cycle", | ||
606 | "Memory Read Line / Memory Read Block", | ||
607 | "Memory Write and Invalidate / Memory Write Block" | ||
608 | }; | ||
609 | |||
610 | #define IO7__POX_TRANSUM__PCI_ADDR__S (0) | ||
611 | #define IO7__POX_TRANSUM__PCI_ADDR__M (0x3fffffffffffful) | ||
612 | #define IO7__POX_TRANSUM__DAC (1UL << 50) | ||
613 | #define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__S (52) | ||
614 | #define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__M (0xf) | ||
615 | #define IO7__POX_TRANSUM__PCIX_CMD__S (56) | ||
616 | #define IO7__POX_TRANSUM__PCIX_CMD__M (0xf) | ||
617 | #define IO7__POX_TRANSUM__ERR_VALID (1UL << 63) | ||
618 | |||
619 | if (!(trans_sum & IO7__POX_TRANSUM__ERR_VALID)) | ||
620 | return; | ||
621 | |||
622 | printk("%s Transaction Summary:\n" | ||
623 | "%s Command: 0x%lx - %s\n" | ||
624 | "%s Address: 0x%016lx%s\n" | ||
625 | "%s PCI-X Master Slot: 0x%lx\n", | ||
626 | err_print_prefix, | ||
627 | err_print_prefix, | ||
628 | EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD), | ||
629 | pcix_cmd[EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD)], | ||
630 | err_print_prefix, | ||
631 | EXTRACT(trans_sum, IO7__POX_TRANSUM__PCI_ADDR), | ||
632 | (trans_sum & IO7__POX_TRANSUM__DAC) ? " (DAC)" : "", | ||
633 | err_print_prefix, | ||
634 | EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_MASTER_SLOT)); | ||
635 | } | ||
636 | |||
637 | static void | ||
638 | marvel_print_pox_err(u64 err_sum, struct ev7_pal_io_one_port *port) | ||
639 | { | ||
640 | #define IO7__POX_ERRSUM__AGP_REQQ_OVFL (1UL << 4) | ||
641 | #define IO7__POX_ERRSUM__AGP_SYNC_ERR (1UL << 5) | ||
642 | #define IO7__POX_ERRSUM__MRETRY_TO (1UL << 6) | ||
643 | #define IO7__POX_ERRSUM__PCIX_UX_SPL (1UL << 7) | ||
644 | #define IO7__POX_ERRSUM__PCIX_SPLIT_TO (1UL << 8) | ||
645 | #define IO7__POX_ERRSUM__PCIX_DISCARD_SPL (1UL << 9) | ||
646 | #define IO7__POX_ERRSUM__DMA_RD_TO (1UL << 10) | ||
647 | #define IO7__POX_ERRSUM__CSR_NXM_RD (1UL << 11) | ||
648 | #define IO7__POX_ERRSUM__CSR_NXM_WR (1UL << 12) | ||
649 | #define IO7__POX_ERRSUM__DMA_TO (1UL << 13) | ||
650 | #define IO7__POX_ERRSUM__ALL_MABORTS (1UL << 14) | ||
651 | #define IO7__POX_ERRSUM__MABORT (1UL << 15) | ||
652 | #define IO7__POX_ERRSUM__MABORT_MASK (IO7__POX_ERRSUM__ALL_MABORTS|\ | ||
653 | IO7__POX_ERRSUM__MABORT) | ||
654 | #define IO7__POX_ERRSUM__PT_TABORT (1UL << 16) | ||
655 | #define IO7__POX_ERRSUM__PM_TABORT (1UL << 17) | ||
656 | #define IO7__POX_ERRSUM__TABORT_MASK (IO7__POX_ERRSUM__PT_TABORT | \ | ||
657 | IO7__POX_ERRSUM__PM_TABORT) | ||
658 | #define IO7__POX_ERRSUM__SERR (1UL << 18) | ||
659 | #define IO7__POX_ERRSUM__ADDRERR_STB (1UL << 19) | ||
660 | #define IO7__POX_ERRSUM__DETECTED_SERR (1UL << 20) | ||
661 | #define IO7__POX_ERRSUM__PERR (1UL << 21) | ||
662 | #define IO7__POX_ERRSUM__DATAERR_STB_NIOW (1UL << 22) | ||
663 | #define IO7__POX_ERRSUM__DETECTED_PERR (1UL << 23) | ||
664 | #define IO7__POX_ERRSUM__PM_PERR (1UL << 24) | ||
665 | #define IO7__POX_ERRSUM__PT_SCERROR (1UL << 26) | ||
666 | #define IO7__POX_ERRSUM__HUNG_BUS (1UL << 28) | ||
667 | #define IO7__POX_ERRSUM__UPE_ERROR__S (51) | ||
668 | #define IO7__POX_ERRSUM__UPE_ERROR__M (0xffUL) | ||
669 | #define IO7__POX_ERRSUM__UPE_ERROR GEN_MASK(IO7__POX_ERRSUM__UPE_ERROR) | ||
670 | #define IO7__POX_ERRSUM__TLB_ERR (1UL << 59) | ||
671 | #define IO7__POX_ERRSUM__ERR_VALID (1UL << 63) | ||
672 | |||
673 | #define IO7__POX_ERRSUM__TRANS_SUM__MASK (IO7__POX_ERRSUM__MRETRY_TO | \ | ||
674 | IO7__POX_ERRSUM__PCIX_UX_SPL | \ | ||
675 | IO7__POX_ERRSUM__PCIX_SPLIT_TO | \ | ||
676 | IO7__POX_ERRSUM__DMA_TO | \ | ||
677 | IO7__POX_ERRSUM__MABORT_MASK | \ | ||
678 | IO7__POX_ERRSUM__TABORT_MASK | \ | ||
679 | IO7__POX_ERRSUM__SERR | \ | ||
680 | IO7__POX_ERRSUM__ADDRERR_STB | \ | ||
681 | IO7__POX_ERRSUM__PERR | \ | ||
682 | IO7__POX_ERRSUM__DATAERR_STB_NIOW |\ | ||
683 | IO7__POX_ERRSUM__DETECTED_PERR | \ | ||
684 | IO7__POX_ERRSUM__PM_PERR | \ | ||
685 | IO7__POX_ERRSUM__PT_SCERROR | \ | ||
686 | IO7__POX_ERRSUM__UPE_ERROR) | ||
687 | |||
688 | if (!(err_sum & IO7__POX_ERRSUM__ERR_VALID)) | ||
689 | return; | ||
690 | |||
691 | /* | ||
692 | * First the transaction summary errors | ||
693 | */ | ||
694 | if (err_sum & IO7__POX_ERRSUM__MRETRY_TO) | ||
695 | printk("%s IO7 Master Retry Timeout expired\n", | ||
696 | err_print_prefix); | ||
697 | if (err_sum & IO7__POX_ERRSUM__PCIX_UX_SPL) | ||
698 | printk("%s Unexpected Split Completion\n", | ||
699 | err_print_prefix); | ||
700 | if (err_sum & IO7__POX_ERRSUM__PCIX_SPLIT_TO) | ||
701 | printk("%s IO7 Split Completion Timeout expired\n", | ||
702 | err_print_prefix); | ||
703 | if (err_sum & IO7__POX_ERRSUM__DMA_TO) | ||
704 | printk("%s Hung bus during DMA transaction\n", | ||
705 | err_print_prefix); | ||
706 | if (err_sum & IO7__POX_ERRSUM__MABORT_MASK) | ||
707 | printk("%s Master Abort\n", err_print_prefix); | ||
708 | if (err_sum & IO7__POX_ERRSUM__PT_TABORT) | ||
709 | printk("%s IO7 Asserted Target Abort\n", err_print_prefix); | ||
710 | if (err_sum & IO7__POX_ERRSUM__PM_TABORT) | ||
711 | printk("%s IO7 Received Target Abort\n", err_print_prefix); | ||
712 | if (err_sum & IO7__POX_ERRSUM__ADDRERR_STB) { | ||
713 | printk("%s Address or PCI-X Attribute Parity Error\n", | ||
714 | err_print_prefix); | ||
715 | if (err_sum & IO7__POX_ERRSUM__SERR) | ||
716 | printk("%s IO7 Asserted SERR\n", err_print_prefix); | ||
717 | } | ||
718 | if (err_sum & IO7__POX_ERRSUM__PERR) { | ||
719 | if (err_sum & IO7__POX_ERRSUM__DATAERR_STB_NIOW) | ||
720 | printk("%s IO7 Detected Data Parity Error\n", | ||
721 | err_print_prefix); | ||
722 | else | ||
723 | printk("%s Split Completion Response with " | ||
724 | "Parity Error\n", err_print_prefix); | ||
725 | } | ||
726 | if (err_sum & IO7__POX_ERRSUM__DETECTED_PERR) | ||
727 | printk("%s PERR detected\n", err_print_prefix); | ||
728 | if (err_sum & IO7__POX_ERRSUM__PM_PERR) | ||
729 | printk("%s PERR while IO7 is master\n", err_print_prefix); | ||
730 | if (err_sum & IO7__POX_ERRSUM__PT_SCERROR) { | ||
731 | printk("%s IO7 Received Split Completion Error message\n", | ||
732 | err_print_prefix); | ||
733 | marvel_print_pox_spl_cmplt(port->pox_spl_cmplt); | ||
734 | } | ||
735 | if (err_sum & IO7__POX_ERRSUM__UPE_ERROR) { | ||
736 | unsigned int upe_error = EXTRACT(err_sum, | ||
737 | IO7__POX_ERRSUM__UPE_ERROR); | ||
738 | int i; | ||
739 | static char *upe_errors[] = { | ||
740 | "Parity Error on MSI write data", | ||
741 | "MSI read (MSI window is write only", | ||
742 | "TLB - Invalid WR transaction", | ||
743 | "TLB - Invalid RD transaction", | ||
744 | "DMA - WR error (see north port)", | ||
745 | "DMA - RD error (see north port)", | ||
746 | "PPR - WR error (see north port)", | ||
747 | "PPR - RD error (see north port)" | ||
748 | }; | ||
749 | |||
750 | printk("%s UPE Error:\n", err_print_prefix); | ||
751 | for (i = 0; i < 8; i++) { | ||
752 | if (upe_error & (1 << i)) | ||
753 | printk("%s %s\n", err_print_prefix, | ||
754 | upe_errors[i]); | ||
755 | } | ||
756 | } | ||
757 | |||
758 | /* | ||
759 | * POx_TRANS_SUM, if appropriate. | ||
760 | */ | ||
761 | if (err_sum & IO7__POX_ERRSUM__TRANS_SUM__MASK) | ||
762 | marvel_print_pox_trans_sum(port->pox_trans_sum); | ||
763 | |||
764 | /* | ||
765 | * Then TLB_ERR. | ||
766 | */ | ||
767 | if (err_sum & IO7__POX_ERRSUM__TLB_ERR) { | ||
768 | printk("%s TLB ERROR\n", err_print_prefix); | ||
769 | marvel_print_pox_tlb_err(port->pox_tlb_err); | ||
770 | } | ||
771 | |||
772 | /* | ||
773 | * And the single bit status errors. | ||
774 | */ | ||
775 | if (err_sum & IO7__POX_ERRSUM__AGP_REQQ_OVFL) | ||
776 | printk("%s AGP Request Queue Overflow\n", err_print_prefix); | ||
777 | if (err_sum & IO7__POX_ERRSUM__AGP_SYNC_ERR) | ||
778 | printk("%s AGP Sync Error\n", err_print_prefix); | ||
779 | if (err_sum & IO7__POX_ERRSUM__PCIX_DISCARD_SPL) | ||
780 | printk("%s Discarded split completion\n", err_print_prefix); | ||
781 | if (err_sum & IO7__POX_ERRSUM__DMA_RD_TO) | ||
782 | printk("%s DMA Read Timeout\n", err_print_prefix); | ||
783 | if (err_sum & IO7__POX_ERRSUM__CSR_NXM_RD) | ||
784 | printk("%s CSR NXM READ\n", err_print_prefix); | ||
785 | if (err_sum & IO7__POX_ERRSUM__CSR_NXM_WR) | ||
786 | printk("%s CSR NXM WRITE\n", err_print_prefix); | ||
787 | if (err_sum & IO7__POX_ERRSUM__DETECTED_SERR) | ||
788 | printk("%s SERR detected\n", err_print_prefix); | ||
789 | if (err_sum & IO7__POX_ERRSUM__HUNG_BUS) | ||
790 | printk("%s HUNG BUS detected\n", err_print_prefix); | ||
791 | } | ||
792 | |||
793 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
794 | |||
795 | static struct ev7_pal_io_subpacket * | ||
796 | marvel_find_io7_with_error(struct ev7_lf_subpackets *lf_subpackets) | ||
797 | { | ||
798 | struct ev7_pal_io_subpacket *io = lf_subpackets->io; | ||
799 | struct io7 *io7; | ||
800 | int i; | ||
801 | |||
802 | /* | ||
803 | * Caller must provide the packet to fill | ||
804 | */ | ||
805 | if (!io) | ||
806 | return NULL; | ||
807 | |||
808 | /* | ||
809 | * Fill the subpacket with the console's standard fill pattern | ||
810 | */ | ||
811 | memset(io, 0x55, sizeof(*io)); | ||
812 | |||
813 | for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) { | ||
814 | unsigned long err_sum = 0; | ||
815 | |||
816 | err_sum |= io7->csrs->PO7_ERROR_SUM.csr; | ||
817 | for (i = 0; i < IO7_NUM_PORTS; i++) { | ||
818 | if (!io7->ports[i].enabled) | ||
819 | continue; | ||
820 | err_sum |= io7->ports[i].csrs->POx_ERR_SUM.csr; | ||
821 | } | ||
822 | |||
823 | /* | ||
824 | * Is there at least one error? | ||
825 | */ | ||
826 | if (err_sum & (1UL << 63)) | ||
827 | break; | ||
828 | } | ||
829 | |||
830 | /* | ||
831 | * Did we find an IO7 with an error? | ||
832 | */ | ||
833 | if (!io7) | ||
834 | return NULL; | ||
835 | |||
836 | /* | ||
837 | * We have an IO7 with an error. | ||
838 | * | ||
839 | * Fill in the IO subpacket. | ||
840 | */ | ||
841 | io->io_asic_rev = io7->csrs->IO_ASIC_REV.csr; | ||
842 | io->io_sys_rev = io7->csrs->IO_SYS_REV.csr; | ||
843 | io->io7_uph = io7->csrs->IO7_UPH.csr; | ||
844 | io->hpi_ctl = io7->csrs->HPI_CTL.csr; | ||
845 | io->crd_ctl = io7->csrs->CRD_CTL.csr; | ||
846 | io->hei_ctl = io7->csrs->HEI_CTL.csr; | ||
847 | io->po7_error_sum = io7->csrs->PO7_ERROR_SUM.csr; | ||
848 | io->po7_uncrr_sym = io7->csrs->PO7_UNCRR_SYM.csr; | ||
849 | io->po7_crrct_sym = io7->csrs->PO7_CRRCT_SYM.csr; | ||
850 | io->po7_ugbge_sym = io7->csrs->PO7_UGBGE_SYM.csr; | ||
851 | io->po7_err_pkt0 = io7->csrs->PO7_ERR_PKT[0].csr; | ||
852 | io->po7_err_pkt1 = io7->csrs->PO7_ERR_PKT[1].csr; | ||
853 | |||
854 | for (i = 0; i < IO7_NUM_PORTS; i++) { | ||
855 | io7_ioport_csrs *csrs = io7->ports[i].csrs; | ||
856 | |||
857 | if (!io7->ports[i].enabled) | ||
858 | continue; | ||
859 | |||
860 | io->ports[i].pox_err_sum = csrs->POx_ERR_SUM.csr; | ||
861 | io->ports[i].pox_tlb_err = csrs->POx_TLB_ERR.csr; | ||
862 | io->ports[i].pox_spl_cmplt = csrs->POx_SPL_COMPLT.csr; | ||
863 | io->ports[i].pox_trans_sum = csrs->POx_TRANS_SUM.csr; | ||
864 | io->ports[i].pox_first_err = csrs->POx_FIRST_ERR.csr; | ||
865 | io->ports[i].pox_mult_err = csrs->POx_MULT_ERR.csr; | ||
866 | io->ports[i].pox_dm_source = csrs->POx_DM_SOURCE.csr; | ||
867 | io->ports[i].pox_dm_dest = csrs->POx_DM_DEST.csr; | ||
868 | io->ports[i].pox_dm_size = csrs->POx_DM_SIZE.csr; | ||
869 | io->ports[i].pox_dm_ctrl = csrs->POx_DM_CTRL.csr; | ||
870 | |||
871 | /* | ||
872 | * Ack this port's errors, if any. POx_ERR_SUM must be last. | ||
873 | * | ||
874 | * Most of the error registers get cleared and unlocked when | ||
875 | * the associated bits in POx_ERR_SUM are cleared (by writing | ||
876 | * 1). POx_TLB_ERR is an exception and must be explicitly | ||
877 | * cleared. | ||
878 | */ | ||
879 | csrs->POx_TLB_ERR.csr = io->ports[i].pox_tlb_err; | ||
880 | csrs->POx_ERR_SUM.csr = io->ports[i].pox_err_sum; | ||
881 | mb(); | ||
882 | csrs->POx_ERR_SUM.csr; | ||
883 | } | ||
884 | |||
885 | /* | ||
886 | * Ack any port 7 error(s). | ||
887 | */ | ||
888 | io7->csrs->PO7_ERROR_SUM.csr = io->po7_error_sum; | ||
889 | mb(); | ||
890 | io7->csrs->PO7_ERROR_SUM.csr; | ||
891 | |||
892 | /* | ||
893 | * Correct the io7_pid. | ||
894 | */ | ||
895 | lf_subpackets->io_pid = io7->pe; | ||
896 | |||
897 | return io; | ||
898 | } | ||
899 | |||
900 | static int | ||
901 | marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) | ||
902 | { | ||
903 | int status = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
904 | |||
905 | #ifdef CONFIG_VERBOSE_MCHECK | ||
906 | struct ev7_pal_io_subpacket *io = lf_subpackets->io; | ||
907 | int i; | ||
908 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
909 | |||
910 | #define MARVEL_IO_ERR_VALID(x) ((x) & (1UL << 63)) | ||
911 | |||
912 | if (!lf_subpackets->logout || !lf_subpackets->io) | ||
913 | return status; | ||
914 | |||
915 | /* | ||
916 | * The PALcode only builds an IO subpacket if there is a | ||
917 | * locally connected IO7. In the cases of | ||
918 | * 1) a uniprocessor kernel | ||
919 | * 2) an mp kernel before the local secondary has called in | ||
920 | * error interrupts are all directed to the primary processor. | ||
921 | * In that case, we may not have an IO subpacket at all and, event | ||
922 | * if we do, it may not be the right now. | ||
923 | * | ||
924 | * If the RBOX indicates an I/O error interrupt, make sure we have | ||
925 | * the correct IO7 information. If we don't have an IO subpacket | ||
926 | * or it's the wrong one, try to find the right one. | ||
927 | * | ||
928 | * RBOX I/O error interrupts are indicated by RBOX_INT<29> and | ||
929 | * RBOX_INT<10>. | ||
930 | */ | ||
931 | if ((lf_subpackets->io->po7_error_sum & (1UL << 32)) || | ||
932 | ((lf_subpackets->io->po7_error_sum | | ||
933 | lf_subpackets->io->ports[0].pox_err_sum | | ||
934 | lf_subpackets->io->ports[1].pox_err_sum | | ||
935 | lf_subpackets->io->ports[2].pox_err_sum | | ||
936 | lf_subpackets->io->ports[3].pox_err_sum) & (1UL << 63))) { | ||
937 | /* | ||
938 | * Either we have no IO subpacket or no error is | ||
939 | * indicated in the one we do have. Try find the | ||
940 | * one with the error. | ||
941 | */ | ||
942 | if (!marvel_find_io7_with_error(lf_subpackets)) | ||
943 | return status; | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * We have an IO7 indicating an error - we're going to report it | ||
948 | */ | ||
949 | status = MCHK_DISPOSITION_REPORT; | ||
950 | |||
951 | #ifdef CONFIG_VERBOSE_MCHECK | ||
952 | |||
953 | if (!print) | ||
954 | return status; | ||
955 | |||
956 | printk("%s*Error occurred on IO7 at PID %u\n", | ||
957 | err_print_prefix, lf_subpackets->io_pid); | ||
958 | |||
959 | /* | ||
960 | * Check port 7 first | ||
961 | */ | ||
962 | if (lf_subpackets->io->po7_error_sum & IO7__PO7_ERRSUM__ERR_MASK) { | ||
963 | marvel_print_po7_err_sum(io); | ||
964 | |||
965 | #if 0 | ||
966 | printk("%s PORT 7 ERROR:\n" | ||
967 | "%s PO7_ERROR_SUM: %016lx\n" | ||
968 | "%s PO7_UNCRR_SYM: %016lx\n" | ||
969 | "%s PO7_CRRCT_SYM: %016lx\n" | ||
970 | "%s PO7_UGBGE_SYM: %016lx\n" | ||
971 | "%s PO7_ERR_PKT0: %016lx\n" | ||
972 | "%s PO7_ERR_PKT1: %016lx\n", | ||
973 | err_print_prefix, | ||
974 | err_print_prefix, io->po7_error_sum, | ||
975 | err_print_prefix, io->po7_uncrr_sym, | ||
976 | err_print_prefix, io->po7_crrct_sym, | ||
977 | err_print_prefix, io->po7_ugbge_sym, | ||
978 | err_print_prefix, io->po7_err_pkt0, | ||
979 | err_print_prefix, io->po7_err_pkt1); | ||
980 | #endif | ||
981 | } | ||
982 | |||
983 | /* | ||
984 | * Then loop through the ports | ||
985 | */ | ||
986 | for (i = 0; i < IO7_NUM_PORTS; i++) { | ||
987 | if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) | ||
988 | continue; | ||
989 | |||
990 | printk("%s PID %u PORT %d POx_ERR_SUM: %016lx\n", | ||
991 | err_print_prefix, | ||
992 | lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); | ||
993 | marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); | ||
994 | |||
995 | printk("%s [ POx_FIRST_ERR: %016lx ]\n", | ||
996 | err_print_prefix, io->ports[i].pox_first_err); | ||
997 | marvel_print_pox_err(io->ports[i].pox_first_err, | ||
998 | &io->ports[i]); | ||
999 | |||
1000 | } | ||
1001 | |||
1002 | |||
1003 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
1004 | |||
1005 | return status; | ||
1006 | } | ||
1007 | |||
1008 | static int | ||
1009 | marvel_process_logout_frame(struct ev7_lf_subpackets *lf_subpackets, int print) | ||
1010 | { | ||
1011 | int status = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
1012 | |||
1013 | /* | ||
1014 | * I/O error? | ||
1015 | */ | ||
1016 | #define EV7__RBOX_INT__IO_ERROR__MASK 0x20000400ul | ||
1017 | if (lf_subpackets->logout && | ||
1018 | (lf_subpackets->logout->rbox_int & 0x20000400ul)) | ||
1019 | status = marvel_process_io_error(lf_subpackets, print); | ||
1020 | |||
1021 | /* | ||
1022 | * Probing behind PCI-X bridges can cause machine checks on | ||
1023 | * Marvel when the probe is handled by the bridge as a split | ||
1024 | * completion transaction. The symptom is an ERROR_RESPONSE | ||
1025 | * to a CONFIG address. Since these errors will happen in | ||
1026 | * normal operation, dismiss them. | ||
1027 | * | ||
1028 | * Dismiss if: | ||
1029 | * C_STAT = 0x14 (Error Reponse) | ||
1030 | * C_STS<3> = 0 (C_ADDR valid) | ||
1031 | * C_ADDR<42> = 1 (I/O) | ||
1032 | * C_ADDR<31:22> = 111110xxb (PCI Config space) | ||
1033 | */ | ||
1034 | if (lf_subpackets->ev7 && | ||
1035 | (lf_subpackets->ev7->c_stat == 0x14) && | ||
1036 | !(lf_subpackets->ev7->c_sts & 0x8) && | ||
1037 | ((lf_subpackets->ev7->c_addr & 0x400ff000000ul) | ||
1038 | == 0x400fe000000ul)) | ||
1039 | status = MCHK_DISPOSITION_DISMISS; | ||
1040 | |||
1041 | return status; | ||
1042 | } | ||
1043 | |||
1044 | void | ||
1045 | marvel_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) | ||
1046 | { | ||
1047 | struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; | ||
1048 | int (*process_frame)(struct ev7_lf_subpackets *, int) = NULL; | ||
1049 | struct ev7_lf_subpackets subpacket_collection = { NULL, }; | ||
1050 | struct ev7_pal_io_subpacket scratch_io_packet = { 0, }; | ||
1051 | struct ev7_lf_subpackets *lf_subpackets = NULL; | ||
1052 | int disposition = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
1053 | char *saved_err_prefix = err_print_prefix; | ||
1054 | char *error_type = NULL; | ||
1055 | |||
1056 | /* | ||
1057 | * Sync the processor | ||
1058 | */ | ||
1059 | mb(); | ||
1060 | draina(); | ||
1061 | |||
1062 | switch(vector) { | ||
1063 | case SCB_Q_SYSEVENT: | ||
1064 | process_frame = marvel_process_680_frame; | ||
1065 | error_type = "System Event"; | ||
1066 | break; | ||
1067 | |||
1068 | case SCB_Q_SYSMCHK: | ||
1069 | process_frame = marvel_process_logout_frame; | ||
1070 | error_type = "System Uncorrectable Error"; | ||
1071 | break; | ||
1072 | |||
1073 | case SCB_Q_SYSERR: | ||
1074 | process_frame = marvel_process_logout_frame; | ||
1075 | error_type = "System Correctable Error"; | ||
1076 | break; | ||
1077 | |||
1078 | default: | ||
1079 | /* Don't know it - pass it up. */ | ||
1080 | ev7_machine_check(vector, la_ptr, regs); | ||
1081 | return; | ||
1082 | } | ||
1083 | |||
1084 | /* | ||
1085 | * A system event or error has occured, handle it here. | ||
1086 | * | ||
1087 | * Any errors in the logout frame have already been cleared by the | ||
1088 | * PALcode, so just parse it. | ||
1089 | */ | ||
1090 | err_print_prefix = KERN_CRIT; | ||
1091 | |||
1092 | /* | ||
1093 | * Parse the logout frame without printing first. If the only error(s) | ||
1094 | * found are classified as "dismissable", then just dismiss them and | ||
1095 | * don't print any message | ||
1096 | */ | ||
1097 | lf_subpackets = | ||
1098 | ev7_collect_logout_frame_subpackets(el_ptr, | ||
1099 | &subpacket_collection); | ||
1100 | if (process_frame && lf_subpackets && lf_subpackets->logout) { | ||
1101 | /* | ||
1102 | * We might not have the correct (or any) I/O subpacket. | ||
1103 | * [ See marvel_process_io_error() for explanation. ] | ||
1104 | * If we don't have one, point the io subpacket in | ||
1105 | * lf_subpackets at scratch_io_packet so that | ||
1106 | * marvel_find_io7_with_error() will have someplace to | ||
1107 | * store the info. | ||
1108 | */ | ||
1109 | if (!lf_subpackets->io) | ||
1110 | lf_subpackets->io = &scratch_io_packet; | ||
1111 | |||
1112 | /* | ||
1113 | * Default io_pid to the processor reporting the error | ||
1114 | * [this will get changed in marvel_find_io7_with_error() | ||
1115 | * if a different one is needed] | ||
1116 | */ | ||
1117 | lf_subpackets->io_pid = lf_subpackets->logout->whami; | ||
1118 | |||
1119 | /* | ||
1120 | * Evaluate the frames. | ||
1121 | */ | ||
1122 | disposition = process_frame(lf_subpackets, 0); | ||
1123 | } | ||
1124 | switch(disposition) { | ||
1125 | case MCHK_DISPOSITION_DISMISS: | ||
1126 | /* Nothing to do. */ | ||
1127 | break; | ||
1128 | |||
1129 | case MCHK_DISPOSITION_REPORT: | ||
1130 | /* Recognized error, report it. */ | ||
1131 | printk("%s*%s (Vector 0x%x) reported on CPU %d\n", | ||
1132 | err_print_prefix, error_type, | ||
1133 | (unsigned int)vector, (int)smp_processor_id()); | ||
1134 | el_print_timestamp(&lf_subpackets->logout->timestamp); | ||
1135 | process_frame(lf_subpackets, 1); | ||
1136 | break; | ||
1137 | |||
1138 | default: | ||
1139 | /* Unknown - dump the annotated subpackets. */ | ||
1140 | printk("%s*%s (Vector 0x%x) reported on CPU %d\n", | ||
1141 | err_print_prefix, error_type, | ||
1142 | (unsigned int)vector, (int)smp_processor_id()); | ||
1143 | el_process_subpacket(el_ptr); | ||
1144 | break; | ||
1145 | |||
1146 | } | ||
1147 | |||
1148 | err_print_prefix = saved_err_prefix; | ||
1149 | |||
1150 | /* Release the logout frame. */ | ||
1151 | wrmces(0x7); | ||
1152 | mb(); | ||
1153 | } | ||
1154 | |||
1155 | void | ||
1156 | marvel_register_error_handlers(void) | ||
1157 | { | ||
1158 | ev7_register_error_handlers(); | ||
1159 | } | ||
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c new file mode 100644 index 000000000000..7e6720d45f02 --- /dev/null +++ b/arch/alpha/kernel/err_titan.c | |||
@@ -0,0 +1,756 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/err_titan.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) | ||
5 | * | ||
6 | * Error handling code supporting TITAN systems | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/sched.h> | ||
12 | |||
13 | #include <asm/io.h> | ||
14 | #include <asm/core_titan.h> | ||
15 | #include <asm/hwrpb.h> | ||
16 | #include <asm/smp.h> | ||
17 | #include <asm/err_common.h> | ||
18 | #include <asm/err_ev6.h> | ||
19 | |||
20 | #include "err_impl.h" | ||
21 | #include "proto.h" | ||
22 | |||
23 | |||
24 | static int | ||
25 | titan_parse_c_misc(u64 c_misc, int print) | ||
26 | { | ||
27 | #ifdef CONFIG_VERBOSE_MCHECK | ||
28 | char *src; | ||
29 | int nxs = 0; | ||
30 | #endif | ||
31 | int status = MCHK_DISPOSITION_REPORT; | ||
32 | |||
33 | #define TITAN__CCHIP_MISC__NXM (1UL << 28) | ||
34 | #define TITAN__CCHIP_MISC__NXS__S (29) | ||
35 | #define TITAN__CCHIP_MISC__NXS__M (0x7) | ||
36 | |||
37 | if (!(c_misc & TITAN__CCHIP_MISC__NXM)) | ||
38 | return MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
39 | |||
40 | #ifdef CONFIG_VERBOSE_MCHECK | ||
41 | if (!print) | ||
42 | return status; | ||
43 | |||
44 | nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS); | ||
45 | switch(nxs) { | ||
46 | case 0: /* CPU 0 */ | ||
47 | case 1: /* CPU 1 */ | ||
48 | case 2: /* CPU 2 */ | ||
49 | case 3: /* CPU 3 */ | ||
50 | src = "CPU"; | ||
51 | /* num is already the CPU number */ | ||
52 | break; | ||
53 | case 4: /* Pchip 0 */ | ||
54 | case 5: /* Pchip 1 */ | ||
55 | src = "Pchip"; | ||
56 | nxs -= 4; | ||
57 | break; | ||
58 | default:/* reserved */ | ||
59 | src = "Unknown, NXS ="; | ||
60 | /* leave num untouched */ | ||
61 | break; | ||
62 | } | ||
63 | |||
64 | printk("%s Non-existent memory access from: %s %d\n", | ||
65 | err_print_prefix, src, nxs); | ||
66 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
67 | |||
68 | return status; | ||
69 | } | ||
70 | |||
71 | static int | ||
72 | titan_parse_p_serror(int which, u64 serror, int print) | ||
73 | { | ||
74 | int status = MCHK_DISPOSITION_REPORT; | ||
75 | |||
76 | #ifdef CONFIG_VERBOSE_MCHECK | ||
77 | char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"}; | ||
78 | char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"}; | ||
79 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
80 | |||
81 | #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) | ||
82 | #define TITAN__PCHIP_SERROR__UECC (1UL << 1) | ||
83 | #define TITAN__PCHIP_SERROR__CRE (1UL << 2) | ||
84 | #define TITAN__PCHIP_SERROR__NXIO (1UL << 3) | ||
85 | #define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4) | ||
86 | #define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \ | ||
87 | TITAN__PCHIP_SERROR__CRE) | ||
88 | #define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \ | ||
89 | TITAN__PCHIP_SERROR__UECC | \ | ||
90 | TITAN__PCHIP_SERROR__CRE | \ | ||
91 | TITAN__PCHIP_SERROR__NXIO | \ | ||
92 | TITAN__PCHIP_SERROR__LOST_CRE) | ||
93 | #define TITAN__PCHIP_SERROR__SRC__S (52) | ||
94 | #define TITAN__PCHIP_SERROR__SRC__M (0x3) | ||
95 | #define TITAN__PCHIP_SERROR__CMD__S (54) | ||
96 | #define TITAN__PCHIP_SERROR__CMD__M (0x3) | ||
97 | #define TITAN__PCHIP_SERROR__SYN__S (56) | ||
98 | #define TITAN__PCHIP_SERROR__SYN__M (0xff) | ||
99 | #define TITAN__PCHIP_SERROR__ADDR__S (15) | ||
100 | #define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL) | ||
101 | |||
102 | if (!(serror & TITAN__PCHIP_SERROR__ERRMASK)) | ||
103 | return MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
104 | |||
105 | #ifdef CONFIG_VERBOSE_MCHECK | ||
106 | if (!print) | ||
107 | return status; | ||
108 | |||
109 | printk("%s PChip %d SERROR: %016lx\n", | ||
110 | err_print_prefix, which, serror); | ||
111 | if (serror & TITAN__PCHIP_SERROR__ECCMASK) { | ||
112 | printk("%s %sorrectable ECC Error:\n" | ||
113 | " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" | ||
114 | " Address: 0x%lx\n", | ||
115 | err_print_prefix, | ||
116 | (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", | ||
117 | serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], | ||
118 | serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)], | ||
119 | (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN), | ||
120 | EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR)); | ||
121 | } | ||
122 | if (serror & TITAN__PCHIP_SERROR__NXIO) | ||
123 | printk("%s Non Existent I/O Error\n", err_print_prefix); | ||
124 | if (serror & TITAN__PCHIP_SERROR__LOST_UECC) | ||
125 | printk("%s Lost Uncorrectable ECC Error\n", | ||
126 | err_print_prefix); | ||
127 | if (serror & TITAN__PCHIP_SERROR__LOST_CRE) | ||
128 | printk("%s Lost Correctable ECC Error\n", err_print_prefix); | ||
129 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
130 | |||
131 | return status; | ||
132 | } | ||
133 | |||
134 | static int | ||
135 | titan_parse_p_perror(int which, int port, u64 perror, int print) | ||
136 | { | ||
137 | int cmd; | ||
138 | unsigned long addr; | ||
139 | int status = MCHK_DISPOSITION_REPORT; | ||
140 | |||
141 | #ifdef CONFIG_VERBOSE_MCHECK | ||
142 | char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", | ||
143 | "I/O Read", "I/O Write", | ||
144 | "Reserved", "Reserved", | ||
145 | "Memory Read", "Memory Write", | ||
146 | "Reserved", "Reserved", | ||
147 | "Configuration Read", "Configuration Write", | ||
148 | "Memory Read Multiple", "Dual Address Cycle", | ||
149 | "Memory Read Line","Memory Write and Invalidate" | ||
150 | }; | ||
151 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
152 | |||
153 | #define TITAN__PCHIP_PERROR__LOST (1UL << 0) | ||
154 | #define TITAN__PCHIP_PERROR__SERR (1UL << 1) | ||
155 | #define TITAN__PCHIP_PERROR__PERR (1UL << 2) | ||
156 | #define TITAN__PCHIP_PERROR__DCRTO (1UL << 3) | ||
157 | #define TITAN__PCHIP_PERROR__SGE (1UL << 4) | ||
158 | #define TITAN__PCHIP_PERROR__APE (1UL << 5) | ||
159 | #define TITAN__PCHIP_PERROR__TA (1UL << 6) | ||
160 | #define TITAN__PCHIP_PERROR__DPE (1UL << 7) | ||
161 | #define TITAN__PCHIP_PERROR__NDS (1UL << 8) | ||
162 | #define TITAN__PCHIP_PERROR__IPTPR (1UL << 9) | ||
163 | #define TITAN__PCHIP_PERROR__IPTPW (1UL << 10) | ||
164 | #define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \ | ||
165 | TITAN__PCHIP_PERROR__SERR | \ | ||
166 | TITAN__PCHIP_PERROR__PERR | \ | ||
167 | TITAN__PCHIP_PERROR__DCRTO | \ | ||
168 | TITAN__PCHIP_PERROR__SGE | \ | ||
169 | TITAN__PCHIP_PERROR__APE | \ | ||
170 | TITAN__PCHIP_PERROR__TA | \ | ||
171 | TITAN__PCHIP_PERROR__DPE | \ | ||
172 | TITAN__PCHIP_PERROR__NDS | \ | ||
173 | TITAN__PCHIP_PERROR__IPTPR | \ | ||
174 | TITAN__PCHIP_PERROR__IPTPW) | ||
175 | #define TITAN__PCHIP_PERROR__DAC (1UL << 47) | ||
176 | #define TITAN__PCHIP_PERROR__MWIN (1UL << 48) | ||
177 | #define TITAN__PCHIP_PERROR__CMD__S (52) | ||
178 | #define TITAN__PCHIP_PERROR__CMD__M (0x0f) | ||
179 | #define TITAN__PCHIP_PERROR__ADDR__S (14) | ||
180 | #define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful) | ||
181 | |||
182 | if (!(perror & TITAN__PCHIP_PERROR__ERRMASK)) | ||
183 | return MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
184 | |||
185 | cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD); | ||
186 | addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2; | ||
187 | |||
188 | /* | ||
189 | * Initializing the BIOS on a video card on a bus without | ||
190 | * a south bridge (subtractive decode agent) can result in | ||
191 | * master aborts as the BIOS probes the capabilities of the | ||
192 | * card. XFree86 does such initialization. If the error | ||
193 | * is a master abort (No DevSel as PCI Master) and the command | ||
194 | * is an I/O read or write below the address where we start | ||
195 | * assigning PCI I/O spaces (SRM uses 0x1000), then mark the | ||
196 | * error as dismissable so starting XFree86 doesn't result | ||
197 | * in a series of uncorrectable errors being reported. Also | ||
198 | * dismiss master aborts to VGA frame buffer space | ||
199 | * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000) | ||
200 | * for the same reason. | ||
201 | * | ||
202 | * Also mark the error dismissible if it looks like the right | ||
203 | * error but only the Lost bit is set. Since the BIOS initialization | ||
204 | * can cause multiple master aborts and the error interrupt can | ||
205 | * be handled on a different CPU than the BIOS code is run on, | ||
206 | * it is possible for a second master abort to occur between the | ||
207 | * time the PALcode reads PERROR and the time it writes PERROR | ||
208 | * to acknowledge the error. If this timing happens, a second | ||
209 | * error will be signalled after the first, and if no additional | ||
210 | * errors occur, will look like a Lost error with no additional | ||
211 | * errors on the same transaction as the previous error. | ||
212 | */ | ||
213 | if (((perror & TITAN__PCHIP_PERROR__NDS) || | ||
214 | ((perror & TITAN__PCHIP_PERROR__ERRMASK) == | ||
215 | TITAN__PCHIP_PERROR__LOST)) && | ||
216 | ((((cmd & 0xE) == 2) && (addr < 0x1000)) || | ||
217 | (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) { | ||
218 | status = MCHK_DISPOSITION_DISMISS; | ||
219 | } | ||
220 | |||
221 | #ifdef CONFIG_VERBOSE_MCHECK | ||
222 | if (!print) | ||
223 | return status; | ||
224 | |||
225 | printk("%s PChip %d %cPERROR: %016lx\n", | ||
226 | err_print_prefix, which, | ||
227 | port ? 'A' : 'G', perror); | ||
228 | if (perror & TITAN__PCHIP_PERROR__IPTPW) | ||
229 | printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix); | ||
230 | if (perror & TITAN__PCHIP_PERROR__IPTPR) | ||
231 | printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix); | ||
232 | if (perror & TITAN__PCHIP_PERROR__NDS) | ||
233 | printk("%s No DEVSEL as PCI Master [Master Abort]\n", | ||
234 | err_print_prefix); | ||
235 | if (perror & TITAN__PCHIP_PERROR__DPE) | ||
236 | printk("%s Data Parity Error\n", err_print_prefix); | ||
237 | if (perror & TITAN__PCHIP_PERROR__TA) | ||
238 | printk("%s Target Abort\n", err_print_prefix); | ||
239 | if (perror & TITAN__PCHIP_PERROR__APE) | ||
240 | printk("%s Address Parity Error\n", err_print_prefix); | ||
241 | if (perror & TITAN__PCHIP_PERROR__SGE) | ||
242 | printk("%s Scatter-Gather Error, Invalid PTE\n", | ||
243 | err_print_prefix); | ||
244 | if (perror & TITAN__PCHIP_PERROR__DCRTO) | ||
245 | printk("%s Delayed-Completion Retry Timeout\n", | ||
246 | err_print_prefix); | ||
247 | if (perror & TITAN__PCHIP_PERROR__PERR) | ||
248 | printk("%s PERR Asserted\n", err_print_prefix); | ||
249 | if (perror & TITAN__PCHIP_PERROR__SERR) | ||
250 | printk("%s SERR Asserted\n", err_print_prefix); | ||
251 | if (perror & TITAN__PCHIP_PERROR__LOST) | ||
252 | printk("%s Lost Error\n", err_print_prefix); | ||
253 | printk("%s Command: 0x%x - %s\n" | ||
254 | " Address: 0x%lx\n", | ||
255 | err_print_prefix, | ||
256 | cmd, perror_cmd[cmd], | ||
257 | addr); | ||
258 | if (perror & TITAN__PCHIP_PERROR__DAC) | ||
259 | printk("%s Dual Address Cycle\n", err_print_prefix); | ||
260 | if (perror & TITAN__PCHIP_PERROR__MWIN) | ||
261 | printk("%s Hit in Monster Window\n", err_print_prefix); | ||
262 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
263 | |||
264 | return status; | ||
265 | } | ||
266 | |||
267 | static int | ||
268 | titan_parse_p_agperror(int which, u64 agperror, int print) | ||
269 | { | ||
270 | int status = MCHK_DISPOSITION_REPORT; | ||
271 | #ifdef CONFIG_VERBOSE_MCHECK | ||
272 | int cmd, len; | ||
273 | unsigned long addr; | ||
274 | |||
275 | char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", | ||
276 | "Write (low-priority)", | ||
277 | "Write (high-priority)", | ||
278 | "Reserved", "Reserved", | ||
279 | "Flush", "Fence" | ||
280 | }; | ||
281 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
282 | |||
283 | #define TITAN__PCHIP_AGPERROR__LOST (1UL << 0) | ||
284 | #define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1) | ||
285 | #define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2) | ||
286 | #define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3) | ||
287 | #define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4) | ||
288 | #define TITAN__PCHIP_AGPERROR__PTP (1UL << 5) | ||
289 | #define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6) | ||
290 | #define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \ | ||
291 | TITAN__PCHIP_AGPERROR__LPQFULL | \ | ||
292 | TITAN__PCHIP_AGPERROR__HPQFULL | \ | ||
293 | TITAN__PCHIP_AGPERROR__RESCMD | \ | ||
294 | TITAN__PCHIP_AGPERROR__IPTE | \ | ||
295 | TITAN__PCHIP_AGPERROR__PTP | \ | ||
296 | TITAN__PCHIP_AGPERROR__NOWINDOW) | ||
297 | #define TITAN__PCHIP_AGPERROR__DAC (1UL << 48) | ||
298 | #define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49) | ||
299 | #define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59) | ||
300 | #define TITAN__PCHIP_AGPERROR__CMD__S (50) | ||
301 | #define TITAN__PCHIP_AGPERROR__CMD__M (0x07) | ||
302 | #define TITAN__PCHIP_AGPERROR__ADDR__S (15) | ||
303 | #define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL) | ||
304 | #define TITAN__PCHIP_AGPERROR__LEN__S (53) | ||
305 | #define TITAN__PCHIP_AGPERROR__LEN__M (0x3f) | ||
306 | |||
307 | if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK)) | ||
308 | return MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
309 | |||
310 | #ifdef CONFIG_VERBOSE_MCHECK | ||
311 | if (!print) | ||
312 | return status; | ||
313 | |||
314 | cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD); | ||
315 | addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; | ||
316 | len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); | ||
317 | |||
318 | printk("%s PChip %d AGPERROR: %016lx\n", err_print_prefix, | ||
319 | which, agperror); | ||
320 | if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) | ||
321 | printk("%s No Window\n", err_print_prefix); | ||
322 | if (agperror & TITAN__PCHIP_AGPERROR__PTP) | ||
323 | printk("%s Peer-to-Peer set\n", err_print_prefix); | ||
324 | if (agperror & TITAN__PCHIP_AGPERROR__IPTE) | ||
325 | printk("%s Invalid PTE\n", err_print_prefix); | ||
326 | if (agperror & TITAN__PCHIP_AGPERROR__RESCMD) | ||
327 | printk("%s Reserved Command\n", err_print_prefix); | ||
328 | if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL) | ||
329 | printk("%s HP Transaction Received while Queue Full\n", | ||
330 | err_print_prefix); | ||
331 | if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL) | ||
332 | printk("%s LP Transaction Received while Queue Full\n", | ||
333 | err_print_prefix); | ||
334 | if (agperror & TITAN__PCHIP_AGPERROR__LOST) | ||
335 | printk("%s Lost Error\n", err_print_prefix); | ||
336 | printk("%s Command: 0x%x - %s, %d Quadwords%s\n" | ||
337 | " Address: 0x%lx\n", | ||
338 | err_print_prefix, cmd, agperror_cmd[cmd], len, | ||
339 | (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "", | ||
340 | addr); | ||
341 | if (agperror & TITAN__PCHIP_AGPERROR__DAC) | ||
342 | printk("%s Dual Address Cycle\n", err_print_prefix); | ||
343 | if (agperror & TITAN__PCHIP_AGPERROR__MWIN) | ||
344 | printk("%s Hit in Monster Window\n", err_print_prefix); | ||
345 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
346 | |||
347 | return status; | ||
348 | } | ||
349 | |||
350 | static int | ||
351 | titan_parse_p_chip(int which, u64 serror, u64 gperror, | ||
352 | u64 aperror, u64 agperror, int print) | ||
353 | { | ||
354 | int status = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
355 | status |= titan_parse_p_serror(which, serror, print); | ||
356 | status |= titan_parse_p_perror(which, 0, gperror, print); | ||
357 | status |= titan_parse_p_perror(which, 1, aperror, print); | ||
358 | status |= titan_parse_p_agperror(which, agperror, print); | ||
359 | return status; | ||
360 | } | ||
361 | |||
362 | int | ||
363 | titan_process_logout_frame(struct el_common *mchk_header, int print) | ||
364 | { | ||
365 | struct el_TITAN_sysdata_mcheck *tmchk = | ||
366 | (struct el_TITAN_sysdata_mcheck *) | ||
367 | ((unsigned long)mchk_header + mchk_header->sys_offset); | ||
368 | int status = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
369 | |||
370 | status |= titan_parse_c_misc(tmchk->c_misc, print); | ||
371 | status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror, | ||
372 | tmchk->p0_aperror, tmchk->p0_agperror, | ||
373 | print); | ||
374 | status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror, | ||
375 | tmchk->p1_aperror, tmchk->p1_agperror, | ||
376 | print); | ||
377 | |||
378 | return status; | ||
379 | } | ||
380 | |||
381 | void | ||
382 | titan_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) | ||
383 | { | ||
384 | struct el_common *mchk_header = (struct el_common *)la_ptr; | ||
385 | struct el_TITAN_sysdata_mcheck *tmchk = | ||
386 | (struct el_TITAN_sysdata_mcheck *) | ||
387 | ((unsigned long)mchk_header + mchk_header->sys_offset); | ||
388 | u64 irqmask; | ||
389 | |||
390 | /* | ||
391 | * Mask of Titan interrupt sources which are reported as machine checks | ||
392 | * | ||
393 | * 63 - CChip Error | ||
394 | * 62 - PChip 0 H_Error | ||
395 | * 61 - PChip 1 H_Error | ||
396 | * 60 - PChip 0 C_Error | ||
397 | * 59 - PChip 1 C_Error | ||
398 | */ | ||
399 | #define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL | ||
400 | |||
401 | /* | ||
402 | * Sync the processor | ||
403 | */ | ||
404 | mb(); | ||
405 | draina(); | ||
406 | |||
407 | /* | ||
408 | * Only handle system errors here | ||
409 | */ | ||
410 | if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) { | ||
411 | ev6_machine_check(vector, la_ptr, regs); | ||
412 | return; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * It's a system error, handle it here | ||
417 | * | ||
418 | * The PALcode has already cleared the error, so just parse it | ||
419 | */ | ||
420 | |||
421 | /* | ||
422 | * Parse the logout frame without printing first. If the only error(s) | ||
423 | * found are classified as "dismissable", then just dismiss them and | ||
424 | * don't print any message | ||
425 | */ | ||
426 | if (titan_process_logout_frame(mchk_header, 0) != | ||
427 | MCHK_DISPOSITION_DISMISS) { | ||
428 | char *saved_err_prefix = err_print_prefix; | ||
429 | err_print_prefix = KERN_CRIT; | ||
430 | |||
431 | /* | ||
432 | * Either a nondismissable error was detected or no | ||
433 | * recognized error was detected in the logout frame | ||
434 | * -- report the error in either case | ||
435 | */ | ||
436 | printk("%s" | ||
437 | "*System %s Error (Vector 0x%x) reported on CPU %d:\n", | ||
438 | err_print_prefix, | ||
439 | (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable", | ||
440 | (unsigned int)vector, (int)smp_processor_id()); | ||
441 | |||
442 | #ifdef CONFIG_VERBOSE_MCHECK | ||
443 | titan_process_logout_frame(mchk_header, alpha_verbose_mcheck); | ||
444 | if (alpha_verbose_mcheck) | ||
445 | dik_show_regs(regs, NULL); | ||
446 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
447 | |||
448 | err_print_prefix = saved_err_prefix; | ||
449 | |||
450 | /* | ||
451 | * Convert any pending interrupts which report as system | ||
452 | * machine checks to interrupts | ||
453 | */ | ||
454 | irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK; | ||
455 | titan_dispatch_irqs(irqmask, regs); | ||
456 | } | ||
457 | |||
458 | |||
459 | /* | ||
460 | * Release the logout frame | ||
461 | */ | ||
462 | wrmces(0x7); | ||
463 | mb(); | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * Subpacket Annotations | ||
468 | */ | ||
469 | static char *el_titan_pchip0_extended_annotation[] = { | ||
470 | "Subpacket Header", "P0_SCTL", "P0_SERREN", | ||
471 | "P0_APCTL", "P0_APERREN", "P0_AGPERREN", | ||
472 | "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1", | ||
473 | "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0", | ||
474 | "P0_AWSM1", "P0_AWSM2", "P0_AWSM3", | ||
475 | "P0_ATBA0", "P0_ATBA1", "P0_ATBA2", | ||
476 | "P0_ATBA3", "P0_GPCTL", "P0_GPERREN", | ||
477 | "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1", | ||
478 | "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0", | ||
479 | "P0_GWSM1", "P0_GWSM2", "P0_GWSM3", | ||
480 | "P0_GTBA0", "P0_GTBA1", "P0_GTBA2", | ||
481 | "P0_GTBA3", NULL | ||
482 | }; | ||
483 | static char *el_titan_pchip1_extended_annotation[] = { | ||
484 | "Subpacket Header", "P1_SCTL", "P1_SERREN", | ||
485 | "P1_APCTL", "P1_APERREN", "P1_AGPERREN", | ||
486 | "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1", | ||
487 | "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0", | ||
488 | "P1_AWSM1", "P1_AWSM2", "P1_AWSM3", | ||
489 | "P1_ATBA0", "P1_ATBA1", "P1_ATBA2", | ||
490 | "P1_ATBA3", "P1_GPCTL", "P1_GPERREN", | ||
491 | "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1", | ||
492 | "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0", | ||
493 | "P1_GWSM1", "P1_GWSM2", "P1_GWSM3", | ||
494 | "P1_GTBA0", "P1_GTBA1", "P1_GTBA2", | ||
495 | "P1_GTBA3", NULL | ||
496 | }; | ||
497 | static char *el_titan_memory_extended_annotation[] = { | ||
498 | "Subpacket Header", "AAR0", "AAR1", | ||
499 | "AAR2", "AAR3", "P0_SCTL", | ||
500 | "P0_GPCTL", "P0_APCTL", "P1_SCTL", | ||
501 | "P1_GPCTL", "P1_SCTL", NULL | ||
502 | }; | ||
503 | |||
504 | static struct el_subpacket_annotation el_titan_annotations[] = { | ||
505 | SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, | ||
506 | EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED, | ||
507 | 1, | ||
508 | "Titan PChip 0 Extended Frame", | ||
509 | el_titan_pchip0_extended_annotation), | ||
510 | SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, | ||
511 | EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED, | ||
512 | 1, | ||
513 | "Titan PChip 1 Extended Frame", | ||
514 | el_titan_pchip1_extended_annotation), | ||
515 | SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, | ||
516 | EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED, | ||
517 | 1, | ||
518 | "Titan Memory Extended Frame", | ||
519 | el_titan_memory_extended_annotation), | ||
520 | SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, | ||
521 | EL_TYPE__TERMINATION__TERMINATION, | ||
522 | 1, | ||
523 | "Termination Subpacket", | ||
524 | NULL) | ||
525 | }; | ||
526 | |||
527 | static struct el_subpacket * | ||
528 | el_process_regatta_subpacket(struct el_subpacket *header) | ||
529 | { | ||
530 | int status; | ||
531 | |||
532 | if (header->class != EL_CLASS__REGATTA_FAMILY) { | ||
533 | printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", | ||
534 | err_print_prefix, | ||
535 | header->class, header->type); | ||
536 | return NULL; | ||
537 | } | ||
538 | |||
539 | switch(header->type) { | ||
540 | case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME: | ||
541 | case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME: | ||
542 | case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME: | ||
543 | case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT: | ||
544 | case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT: | ||
545 | printk("%s ** Occurred on CPU %d:\n", | ||
546 | err_print_prefix, | ||
547 | (int)header->by_type.regatta_frame.cpuid); | ||
548 | status = privateer_process_logout_frame((struct el_common *) | ||
549 | header->by_type.regatta_frame.data_start, 1); | ||
550 | break; | ||
551 | default: | ||
552 | printk("%s ** REGATTA TYPE %d SUBPACKET\n", | ||
553 | err_print_prefix, header->type); | ||
554 | el_annotate_subpacket(header); | ||
555 | break; | ||
556 | } | ||
557 | |||
558 | |||
559 | return (struct el_subpacket *)((unsigned long)header + header->length); | ||
560 | } | ||
561 | |||
562 | static struct el_subpacket_handler titan_subpacket_handler = | ||
563 | SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY, | ||
564 | el_process_regatta_subpacket); | ||
565 | |||
566 | void | ||
567 | titan_register_error_handlers(void) | ||
568 | { | ||
569 | size_t i; | ||
570 | |||
571 | for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++) | ||
572 | cdl_register_subpacket_annotation(&el_titan_annotations[i]); | ||
573 | |||
574 | cdl_register_subpacket_handler(&titan_subpacket_handler); | ||
575 | |||
576 | ev6_register_error_handlers(); | ||
577 | } | ||
578 | |||
579 | |||
580 | /* | ||
581 | * Privateer | ||
582 | */ | ||
583 | |||
584 | static int | ||
585 | privateer_process_680_frame(struct el_common *mchk_header, int print) | ||
586 | { | ||
587 | int status = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
588 | #ifdef CONFIG_VERBOSE_MCHECK | ||
589 | struct el_PRIVATEER_envdata_mcheck *emchk = | ||
590 | (struct el_PRIVATEER_envdata_mcheck *) | ||
591 | ((unsigned long)mchk_header + mchk_header->sys_offset); | ||
592 | |||
593 | /* TODO - catagorize errors, for now, no error */ | ||
594 | |||
595 | if (!print) | ||
596 | return status; | ||
597 | |||
598 | /* TODO - decode instead of just dumping... */ | ||
599 | printk("%s Summary Flags: %016lx\n" | ||
600 | " CChip DIRx: %016lx\n" | ||
601 | " System Management IR: %016lx\n" | ||
602 | " CPU IR: %016lx\n" | ||
603 | " Power Supply IR: %016lx\n" | ||
604 | " LM78 Fault Status: %016lx\n" | ||
605 | " System Doors: %016lx\n" | ||
606 | " Temperature Warning: %016lx\n" | ||
607 | " Fan Control: %016lx\n" | ||
608 | " Fatal Power Down Code: %016lx\n", | ||
609 | err_print_prefix, | ||
610 | emchk->summary, | ||
611 | emchk->c_dirx, | ||
612 | emchk->smir, | ||
613 | emchk->cpuir, | ||
614 | emchk->psir, | ||
615 | emchk->fault, | ||
616 | emchk->sys_doors, | ||
617 | emchk->temp_warn, | ||
618 | emchk->fan_ctrl, | ||
619 | emchk->code); | ||
620 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
621 | |||
622 | return status; | ||
623 | } | ||
624 | |||
625 | int | ||
626 | privateer_process_logout_frame(struct el_common *mchk_header, int print) | ||
627 | { | ||
628 | struct el_common_EV6_mcheck *ev6mchk = | ||
629 | (struct el_common_EV6_mcheck *)mchk_header; | ||
630 | int status = MCHK_DISPOSITION_UNKNOWN_ERROR; | ||
631 | |||
632 | /* | ||
633 | * Machine check codes | ||
634 | */ | ||
635 | #define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */ | ||
636 | #define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */ | ||
637 | #define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */ | ||
638 | #define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */ | ||
639 | #define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */ | ||
640 | #define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */ | ||
641 | #define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */ | ||
642 | #define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */ | ||
643 | #define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */ | ||
644 | #define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */ | ||
645 | |||
646 | switch(ev6mchk->MCHK_Code) { | ||
647 | /* | ||
648 | * Vector 630 - Processor, Correctable | ||
649 | */ | ||
650 | case PRIVATEER_MCHK__CORR_ECC: | ||
651 | case PRIVATEER_MCHK__DC_TAG_PERR: | ||
652 | /* | ||
653 | * Fall through to vector 670 for processing... | ||
654 | */ | ||
655 | /* | ||
656 | * Vector 670 - Processor, Uncorrectable | ||
657 | */ | ||
658 | case PRIVATEER_MCHK__PAL_BUGCHECK: | ||
659 | case PRIVATEER_MCHK__OS_BUGCHECK: | ||
660 | case PRIVATEER_MCHK__PROC_HRD_ERR: | ||
661 | case PRIVATEER_MCHK__ISTREAM_CMOV_PRX: | ||
662 | case PRIVATEER_MCHK__ISTREAM_CMOV_FLT: | ||
663 | status |= ev6_process_logout_frame(mchk_header, print); | ||
664 | break; | ||
665 | |||
666 | /* | ||
667 | * Vector 620 - System, Correctable | ||
668 | */ | ||
669 | case PRIVATEER_MCHK__SYS_CORR_ERR: | ||
670 | /* | ||
671 | * Fall through to vector 660 for processing... | ||
672 | */ | ||
673 | /* | ||
674 | * Vector 660 - System, Uncorrectable | ||
675 | */ | ||
676 | case PRIVATEER_MCHK__SYS_HRD_ERR: | ||
677 | status |= titan_process_logout_frame(mchk_header, print); | ||
678 | break; | ||
679 | |||
680 | /* | ||
681 | * Vector 680 - System, Environmental | ||
682 | */ | ||
683 | case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */ | ||
684 | status |= privateer_process_680_frame(mchk_header, print); | ||
685 | break; | ||
686 | |||
687 | /* | ||
688 | * Unknown | ||
689 | */ | ||
690 | default: | ||
691 | status |= MCHK_DISPOSITION_REPORT; | ||
692 | if (print) { | ||
693 | printk("%s** Unknown Error, frame follows\n", | ||
694 | err_print_prefix); | ||
695 | mchk_dump_logout_frame(mchk_header); | ||
696 | } | ||
697 | |||
698 | } | ||
699 | |||
700 | return status; | ||
701 | } | ||
702 | |||
703 | void | ||
704 | privateer_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) | ||
705 | { | ||
706 | struct el_common *mchk_header = (struct el_common *)la_ptr; | ||
707 | struct el_TITAN_sysdata_mcheck *tmchk = | ||
708 | (struct el_TITAN_sysdata_mcheck *) | ||
709 | (la_ptr + mchk_header->sys_offset); | ||
710 | u64 irqmask; | ||
711 | char *saved_err_prefix = err_print_prefix; | ||
712 | |||
713 | #define PRIVATEER_680_INTERRUPT_MASK (0xE00UL) | ||
714 | #define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL) | ||
715 | |||
716 | /* | ||
717 | * Sync the processor. | ||
718 | */ | ||
719 | mb(); | ||
720 | draina(); | ||
721 | |||
722 | /* | ||
723 | * Only handle system events here. | ||
724 | */ | ||
725 | if (vector != SCB_Q_SYSEVENT) | ||
726 | return titan_machine_check(vector, la_ptr, regs); | ||
727 | |||
728 | /* | ||
729 | * Report the event - System Events should be reported even if no | ||
730 | * error is indicated since the event could indicate the return | ||
731 | * to normal status. | ||
732 | */ | ||
733 | err_print_prefix = KERN_CRIT; | ||
734 | printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n", | ||
735 | err_print_prefix, | ||
736 | (unsigned int)vector, (int)smp_processor_id()); | ||
737 | privateer_process_680_frame(mchk_header, 1); | ||
738 | err_print_prefix = saved_err_prefix; | ||
739 | |||
740 | /* | ||
741 | * Convert any pending interrupts which report as 680 machine | ||
742 | * checks to interrupts. | ||
743 | */ | ||
744 | irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK; | ||
745 | |||
746 | /* | ||
747 | * Dispatch the interrupt(s). | ||
748 | */ | ||
749 | titan_dispatch_irqs(irqmask, regs); | ||
750 | |||
751 | /* | ||
752 | * Release the logout frame. | ||
753 | */ | ||
754 | wrmces(0x7); | ||
755 | mb(); | ||
756 | } | ||
diff --git a/arch/alpha/kernel/es1888.c b/arch/alpha/kernel/es1888.c new file mode 100644 index 000000000000..d584c85fea7a --- /dev/null +++ b/arch/alpha/kernel/es1888.c | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/es1888.c | ||
3 | * | ||
4 | * Init the built-in ES1888 sound chip (SB16 compatible) | ||
5 | */ | ||
6 | |||
7 | #include <linux/init.h> | ||
8 | #include <asm/io.h> | ||
9 | #include "proto.h" | ||
10 | |||
11 | void __init | ||
12 | es1888_init(void) | ||
13 | { | ||
14 | /* Sequence of IO reads to init the audio controller */ | ||
15 | inb(0x0229); | ||
16 | inb(0x0229); | ||
17 | inb(0x0229); | ||
18 | inb(0x022b); | ||
19 | inb(0x0229); | ||
20 | inb(0x022b); | ||
21 | inb(0x0229); | ||
22 | inb(0x0229); | ||
23 | inb(0x022b); | ||
24 | inb(0x0229); | ||
25 | inb(0x0220); /* This sets the base address to 0x220 */ | ||
26 | |||
27 | /* Sequence to set DMA channels */ | ||
28 | outb(0x01, 0x0226); /* reset */ | ||
29 | inb(0x0226); /* pause */ | ||
30 | outb(0x00, 0x0226); /* release reset */ | ||
31 | while (!(inb(0x022e) & 0x80)) /* wait for bit 7 to assert*/ | ||
32 | continue; | ||
33 | inb(0x022a); /* pause */ | ||
34 | outb(0xc6, 0x022c); /* enable extended mode */ | ||
35 | inb(0x022a); /* pause, also forces the write */ | ||
36 | while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ | ||
37 | continue; | ||
38 | outb(0xb1, 0x022c); /* setup for write to Interrupt CR */ | ||
39 | while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ | ||
40 | continue; | ||
41 | outb(0x14, 0x022c); /* set IRQ 5 */ | ||
42 | while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ | ||
43 | continue; | ||
44 | outb(0xb2, 0x022c); /* setup for write to DMA CR */ | ||
45 | while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ | ||
46 | continue; | ||
47 | outb(0x18, 0x022c); /* set DMA channel 1 */ | ||
48 | inb(0x022c); /* force the write */ | ||
49 | } | ||
diff --git a/arch/alpha/kernel/gct.c b/arch/alpha/kernel/gct.c new file mode 100644 index 000000000000..8827687b9f89 --- /dev/null +++ b/arch/alpha/kernel/gct.c | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/gct.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/errno.h> | ||
9 | |||
10 | #include <asm/hwrpb.h> | ||
11 | #include <asm/gct.h> | ||
12 | |||
13 | int | ||
14 | gct6_find_nodes(gct6_node *node, gct6_search_struct *search) | ||
15 | { | ||
16 | gct6_search_struct *wanted; | ||
17 | int status = 0; | ||
18 | |||
19 | /* First check the magic number. */ | ||
20 | if (node->magic != GCT_NODE_MAGIC) { | ||
21 | printk(KERN_ERR "GCT Node MAGIC incorrect - GCT invalid\n"); | ||
22 | return -EINVAL; | ||
23 | } | ||
24 | |||
25 | /* Check against the search struct. */ | ||
26 | for (wanted = search; | ||
27 | wanted && (wanted->type | wanted->subtype); | ||
28 | wanted++) { | ||
29 | if (node->type != wanted->type) | ||
30 | continue; | ||
31 | if (node->subtype != wanted->subtype) | ||
32 | continue; | ||
33 | |||
34 | /* Found it -- call out. */ | ||
35 | if (wanted->callout) | ||
36 | wanted->callout(node); | ||
37 | } | ||
38 | |||
39 | /* Now walk the tree, siblings first. */ | ||
40 | if (node->next) | ||
41 | status |= gct6_find_nodes(GCT_NODE_PTR(node->next), search); | ||
42 | |||
43 | /* Then the children. */ | ||
44 | if (node->child) | ||
45 | status |= gct6_find_nodes(GCT_NODE_PTR(node->child), search); | ||
46 | |||
47 | return status; | ||
48 | } | ||
diff --git a/arch/alpha/kernel/head.S b/arch/alpha/kernel/head.S new file mode 100644 index 000000000000..4ca2e404708a --- /dev/null +++ b/arch/alpha/kernel/head.S | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * alpha/boot/head.S | ||
3 | * | ||
4 | * initial boot stuff.. At this point, the bootloader has already | ||
5 | * switched into OSF/1 PAL-code, and loaded us at the correct address | ||
6 | * (START_ADDR). So there isn't much left for us to do: just set up | ||
7 | * the kernel global pointer and jump to the kernel entry-point. | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <asm/system.h> | ||
12 | #include <asm/asm_offsets.h> | ||
13 | |||
14 | .globl swapper_pg_dir | ||
15 | .globl _stext | ||
16 | swapper_pg_dir=SWAPPER_PGD | ||
17 | |||
18 | .set noreorder | ||
19 | .globl __start | ||
20 | .ent __start | ||
21 | _stext: | ||
22 | __start: | ||
23 | .prologue 0 | ||
24 | br $27,1f | ||
25 | 1: ldgp $29,0($27) | ||
26 | /* We need to get current_task_info loaded up... */ | ||
27 | lda $8,init_thread_union | ||
28 | /* ... and find our stack ... */ | ||
29 | lda $30,0x4000 - SIZEOF_PT_REGS($8) | ||
30 | /* ... and then we can start the kernel. */ | ||
31 | jsr $26,start_kernel | ||
32 | call_pal PAL_halt | ||
33 | .end __start | ||
34 | |||
35 | #ifdef CONFIG_SMP | ||
36 | .align 3 | ||
37 | .globl __smp_callin | ||
38 | .ent __smp_callin | ||
39 | /* On entry here from SRM console, the HWPCB of the per-cpu | ||
40 | slot for this processor has been loaded. We've arranged | ||
41 | for the UNIQUE value for this process to contain the PCBB | ||
42 | of the target idle task. */ | ||
43 | __smp_callin: | ||
44 | .prologue 1 | ||
45 | ldgp $29,0($27) # First order of business, load the GP. | ||
46 | |||
47 | call_pal PAL_rduniq # Grab the target PCBB. | ||
48 | mov $0,$16 # Install it. | ||
49 | call_pal PAL_swpctx | ||
50 | |||
51 | lda $8,0x3fff # Find "current". | ||
52 | bic $30,$8,$8 | ||
53 | |||
54 | jsr $26,smp_callin | ||
55 | call_pal PAL_halt | ||
56 | .end __smp_callin | ||
57 | #endif /* CONFIG_SMP */ | ||
58 | |||
59 | # | ||
60 | # The following two functions are needed for supporting SRM PALcode | ||
61 | # on the PC164 (at least), since that PALcode manages the interrupt | ||
62 | # masking, and we cannot duplicate the effort without causing problems | ||
63 | # | ||
64 | |||
65 | .align 3 | ||
66 | .globl cserve_ena | ||
67 | .ent cserve_ena | ||
68 | cserve_ena: | ||
69 | .prologue 0 | ||
70 | bis $16,$16,$17 | ||
71 | lda $16,52($31) | ||
72 | call_pal PAL_cserve | ||
73 | ret ($26) | ||
74 | .end cserve_ena | ||
75 | |||
76 | .align 3 | ||
77 | .globl cserve_dis | ||
78 | .ent cserve_dis | ||
79 | cserve_dis: | ||
80 | .prologue 0 | ||
81 | bis $16,$16,$17 | ||
82 | lda $16,53($31) | ||
83 | call_pal PAL_cserve | ||
84 | ret ($26) | ||
85 | .end cserve_dis | ||
86 | |||
87 | # | ||
88 | # It is handy, on occasion, to make halt actually just loop. | ||
89 | # Putting it here means we dont have to recompile the whole | ||
90 | # kernel. | ||
91 | # | ||
92 | |||
93 | .align 3 | ||
94 | .globl halt | ||
95 | .ent halt | ||
96 | halt: | ||
97 | .prologue 0 | ||
98 | call_pal PAL_halt | ||
99 | .end halt | ||
diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c new file mode 100644 index 000000000000..835d09a7b332 --- /dev/null +++ b/arch/alpha/kernel/init_task.c | |||
@@ -0,0 +1,23 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/init.h> | ||
5 | #include <linux/init_task.h> | ||
6 | #include <linux/fs.h> | ||
7 | #include <linux/mqueue.h> | ||
8 | #include <asm/uaccess.h> | ||
9 | |||
10 | |||
11 | static struct fs_struct init_fs = INIT_FS; | ||
12 | static struct files_struct init_files = INIT_FILES; | ||
13 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
14 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
15 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
16 | struct task_struct init_task = INIT_TASK(init_task); | ||
17 | |||
18 | EXPORT_SYMBOL(init_mm); | ||
19 | EXPORT_SYMBOL(init_task); | ||
20 | |||
21 | union thread_union init_thread_union | ||
22 | __attribute__((section(".data.init_thread"))) | ||
23 | = { INIT_THREAD_INFO(init_task) }; | ||
diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c new file mode 100644 index 000000000000..19c5875ab398 --- /dev/null +++ b/arch/alpha/kernel/io.c | |||
@@ -0,0 +1,630 @@ | |||
1 | /* | ||
2 | * Alpha IO and memory functions. | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <asm/io.h> | ||
10 | |||
11 | /* Out-of-line versions of the i/o routines that redirect into the | ||
12 | platform-specific version. Note that "platform-specific" may mean | ||
13 | "generic", which bumps through the machine vector. */ | ||
14 | |||
15 | unsigned int | ||
16 | ioread8(void __iomem *addr) | ||
17 | { | ||
18 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); | ||
19 | mb(); | ||
20 | return ret; | ||
21 | } | ||
22 | |||
23 | unsigned int ioread16(void __iomem *addr) | ||
24 | { | ||
25 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); | ||
26 | mb(); | ||
27 | return ret; | ||
28 | } | ||
29 | |||
30 | unsigned int ioread32(void __iomem *addr) | ||
31 | { | ||
32 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); | ||
33 | mb(); | ||
34 | return ret; | ||
35 | } | ||
36 | |||
37 | void iowrite8(u8 b, void __iomem *addr) | ||
38 | { | ||
39 | IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); | ||
40 | mb(); | ||
41 | } | ||
42 | |||
43 | void iowrite16(u16 b, void __iomem *addr) | ||
44 | { | ||
45 | IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); | ||
46 | mb(); | ||
47 | } | ||
48 | |||
49 | void iowrite32(u32 b, void __iomem *addr) | ||
50 | { | ||
51 | IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); | ||
52 | mb(); | ||
53 | } | ||
54 | |||
55 | EXPORT_SYMBOL(ioread8); | ||
56 | EXPORT_SYMBOL(ioread16); | ||
57 | EXPORT_SYMBOL(ioread32); | ||
58 | EXPORT_SYMBOL(iowrite8); | ||
59 | EXPORT_SYMBOL(iowrite16); | ||
60 | EXPORT_SYMBOL(iowrite32); | ||
61 | |||
62 | u8 inb(unsigned long port) | ||
63 | { | ||
64 | return ioread8(ioport_map(port, 1)); | ||
65 | } | ||
66 | |||
67 | u16 inw(unsigned long port) | ||
68 | { | ||
69 | return ioread16(ioport_map(port, 2)); | ||
70 | } | ||
71 | |||
72 | u32 inl(unsigned long port) | ||
73 | { | ||
74 | return ioread32(ioport_map(port, 4)); | ||
75 | } | ||
76 | |||
77 | void outb(u8 b, unsigned long port) | ||
78 | { | ||
79 | iowrite8(b, ioport_map(port, 1)); | ||
80 | } | ||
81 | |||
82 | void outw(u16 b, unsigned long port) | ||
83 | { | ||
84 | iowrite16(b, ioport_map(port, 2)); | ||
85 | } | ||
86 | |||
87 | void outl(u32 b, unsigned long port) | ||
88 | { | ||
89 | iowrite32(b, ioport_map(port, 4)); | ||
90 | } | ||
91 | |||
92 | EXPORT_SYMBOL(inb); | ||
93 | EXPORT_SYMBOL(inw); | ||
94 | EXPORT_SYMBOL(inl); | ||
95 | EXPORT_SYMBOL(outb); | ||
96 | EXPORT_SYMBOL(outw); | ||
97 | EXPORT_SYMBOL(outl); | ||
98 | |||
99 | u8 __raw_readb(const volatile void __iomem *addr) | ||
100 | { | ||
101 | return IO_CONCAT(__IO_PREFIX,readb)(addr); | ||
102 | } | ||
103 | |||
104 | u16 __raw_readw(const volatile void __iomem *addr) | ||
105 | { | ||
106 | return IO_CONCAT(__IO_PREFIX,readw)(addr); | ||
107 | } | ||
108 | |||
109 | u32 __raw_readl(const volatile void __iomem *addr) | ||
110 | { | ||
111 | return IO_CONCAT(__IO_PREFIX,readl)(addr); | ||
112 | } | ||
113 | |||
114 | u64 __raw_readq(const volatile void __iomem *addr) | ||
115 | { | ||
116 | return IO_CONCAT(__IO_PREFIX,readq)(addr); | ||
117 | } | ||
118 | |||
119 | void __raw_writeb(u8 b, volatile void __iomem *addr) | ||
120 | { | ||
121 | IO_CONCAT(__IO_PREFIX,writeb)(b, addr); | ||
122 | } | ||
123 | |||
124 | void __raw_writew(u16 b, volatile void __iomem *addr) | ||
125 | { | ||
126 | IO_CONCAT(__IO_PREFIX,writew)(b, addr); | ||
127 | } | ||
128 | |||
129 | void __raw_writel(u32 b, volatile void __iomem *addr) | ||
130 | { | ||
131 | IO_CONCAT(__IO_PREFIX,writel)(b, addr); | ||
132 | } | ||
133 | |||
134 | void __raw_writeq(u64 b, volatile void __iomem *addr) | ||
135 | { | ||
136 | IO_CONCAT(__IO_PREFIX,writeq)(b, addr); | ||
137 | } | ||
138 | |||
139 | EXPORT_SYMBOL(__raw_readb); | ||
140 | EXPORT_SYMBOL(__raw_readw); | ||
141 | EXPORT_SYMBOL(__raw_readl); | ||
142 | EXPORT_SYMBOL(__raw_readq); | ||
143 | EXPORT_SYMBOL(__raw_writeb); | ||
144 | EXPORT_SYMBOL(__raw_writew); | ||
145 | EXPORT_SYMBOL(__raw_writel); | ||
146 | EXPORT_SYMBOL(__raw_writeq); | ||
147 | |||
148 | u8 readb(const volatile void __iomem *addr) | ||
149 | { | ||
150 | u8 ret = __raw_readb(addr); | ||
151 | mb(); | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | u16 readw(const volatile void __iomem *addr) | ||
156 | { | ||
157 | u16 ret = __raw_readw(addr); | ||
158 | mb(); | ||
159 | return ret; | ||
160 | } | ||
161 | |||
162 | u32 readl(const volatile void __iomem *addr) | ||
163 | { | ||
164 | u32 ret = __raw_readl(addr); | ||
165 | mb(); | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | u64 readq(const volatile void __iomem *addr) | ||
170 | { | ||
171 | u64 ret = __raw_readq(addr); | ||
172 | mb(); | ||
173 | return ret; | ||
174 | } | ||
175 | |||
176 | void writeb(u8 b, volatile void __iomem *addr) | ||
177 | { | ||
178 | __raw_writeb(b, addr); | ||
179 | mb(); | ||
180 | } | ||
181 | |||
182 | void writew(u16 b, volatile void __iomem *addr) | ||
183 | { | ||
184 | __raw_writew(b, addr); | ||
185 | mb(); | ||
186 | } | ||
187 | |||
188 | void writel(u32 b, volatile void __iomem *addr) | ||
189 | { | ||
190 | __raw_writel(b, addr); | ||
191 | mb(); | ||
192 | } | ||
193 | |||
194 | void writeq(u64 b, volatile void __iomem *addr) | ||
195 | { | ||
196 | __raw_writeq(b, addr); | ||
197 | mb(); | ||
198 | } | ||
199 | |||
200 | EXPORT_SYMBOL(readb); | ||
201 | EXPORT_SYMBOL(readw); | ||
202 | EXPORT_SYMBOL(readl); | ||
203 | EXPORT_SYMBOL(readq); | ||
204 | EXPORT_SYMBOL(writeb); | ||
205 | EXPORT_SYMBOL(writew); | ||
206 | EXPORT_SYMBOL(writel); | ||
207 | EXPORT_SYMBOL(writeq); | ||
208 | |||
209 | |||
210 | /* | ||
211 | * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. | ||
212 | */ | ||
213 | void ioread8_rep(void __iomem *port, void *dst, unsigned long count) | ||
214 | { | ||
215 | while ((unsigned long)dst & 0x3) { | ||
216 | if (!count) | ||
217 | return; | ||
218 | count--; | ||
219 | *(unsigned char *)dst = ioread8(port); | ||
220 | dst += 1; | ||
221 | } | ||
222 | |||
223 | while (count >= 4) { | ||
224 | unsigned int w; | ||
225 | count -= 4; | ||
226 | w = ioread8(port); | ||
227 | w |= ioread8(port) << 8; | ||
228 | w |= ioread8(port) << 16; | ||
229 | w |= ioread8(port) << 24; | ||
230 | *(unsigned int *)dst = w; | ||
231 | dst += 4; | ||
232 | } | ||
233 | |||
234 | while (count) { | ||
235 | --count; | ||
236 | *(unsigned char *)dst = ioread8(port); | ||
237 | dst += 1; | ||
238 | } | ||
239 | } | ||
240 | |||
241 | void insb(unsigned long port, void *dst, unsigned long count) | ||
242 | { | ||
243 | ioread8_rep(ioport_map(port, 1), dst, count); | ||
244 | } | ||
245 | |||
246 | EXPORT_SYMBOL(ioread8_rep); | ||
247 | EXPORT_SYMBOL(insb); | ||
248 | |||
249 | /* | ||
250 | * Read COUNT 16-bit words from port PORT into memory starting at | ||
251 | * SRC. SRC must be at least short aligned. This is used by the | ||
252 | * IDE driver to read disk sectors. Performance is important, but | ||
253 | * the interfaces seems to be slow: just using the inlined version | ||
254 | * of the inw() breaks things. | ||
255 | */ | ||
256 | void ioread16_rep(void __iomem *port, void *dst, unsigned long count) | ||
257 | { | ||
258 | if (unlikely((unsigned long)dst & 0x3)) { | ||
259 | if (!count) | ||
260 | return; | ||
261 | BUG_ON((unsigned long)dst & 0x1); | ||
262 | count--; | ||
263 | *(unsigned short *)dst = ioread16(port); | ||
264 | dst += 2; | ||
265 | } | ||
266 | |||
267 | while (count >= 2) { | ||
268 | unsigned int w; | ||
269 | count -= 2; | ||
270 | w = ioread16(port); | ||
271 | w |= ioread16(port) << 16; | ||
272 | *(unsigned int *)dst = w; | ||
273 | dst += 4; | ||
274 | } | ||
275 | |||
276 | if (count) { | ||
277 | *(unsigned short*)dst = ioread16(port); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | void insw(unsigned long port, void *dst, unsigned long count) | ||
282 | { | ||
283 | ioread16_rep(ioport_map(port, 2), dst, count); | ||
284 | } | ||
285 | |||
286 | EXPORT_SYMBOL(ioread16_rep); | ||
287 | EXPORT_SYMBOL(insw); | ||
288 | |||
289 | |||
290 | /* | ||
291 | * Read COUNT 32-bit words from port PORT into memory starting at | ||
292 | * SRC. Now works with any alignment in SRC. Performance is important, | ||
293 | * but the interfaces seems to be slow: just using the inlined version | ||
294 | * of the inl() breaks things. | ||
295 | */ | ||
296 | void ioread32_rep(void __iomem *port, void *dst, unsigned long count) | ||
297 | { | ||
298 | if (unlikely((unsigned long)dst & 0x3)) { | ||
299 | while (count--) { | ||
300 | struct S { int x __attribute__((packed)); }; | ||
301 | ((struct S *)dst)->x = ioread32(port); | ||
302 | dst += 4; | ||
303 | } | ||
304 | } else { | ||
305 | /* Buffer 32-bit aligned. */ | ||
306 | while (count--) { | ||
307 | *(unsigned int *)dst = ioread32(port); | ||
308 | dst += 4; | ||
309 | } | ||
310 | } | ||
311 | } | ||
312 | |||
313 | void insl(unsigned long port, void *dst, unsigned long count) | ||
314 | { | ||
315 | ioread32_rep(ioport_map(port, 4), dst, count); | ||
316 | } | ||
317 | |||
318 | EXPORT_SYMBOL(ioread32_rep); | ||
319 | EXPORT_SYMBOL(insl); | ||
320 | |||
321 | |||
322 | /* | ||
323 | * Like insb but in the opposite direction. | ||
324 | * Don't worry as much about doing aligned memory transfers: | ||
325 | * doing byte reads the "slow" way isn't nearly as slow as | ||
326 | * doing byte writes the slow way (no r-m-w cycle). | ||
327 | */ | ||
328 | void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) | ||
329 | { | ||
330 | const unsigned char *src = xsrc; | ||
331 | while (count--) | ||
332 | iowrite8(*src++, port); | ||
333 | } | ||
334 | |||
335 | void outsb(unsigned long port, const void *src, unsigned long count) | ||
336 | { | ||
337 | iowrite8_rep(ioport_map(port, 1), src, count); | ||
338 | } | ||
339 | |||
340 | EXPORT_SYMBOL(iowrite8_rep); | ||
341 | EXPORT_SYMBOL(outsb); | ||
342 | |||
343 | |||
344 | /* | ||
345 | * Like insw but in the opposite direction. This is used by the IDE | ||
346 | * driver to write disk sectors. Performance is important, but the | ||
347 | * interfaces seems to be slow: just using the inlined version of the | ||
348 | * outw() breaks things. | ||
349 | */ | ||
350 | void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) | ||
351 | { | ||
352 | if (unlikely((unsigned long)src & 0x3)) { | ||
353 | if (!count) | ||
354 | return; | ||
355 | BUG_ON((unsigned long)src & 0x1); | ||
356 | iowrite16(*(unsigned short *)src, port); | ||
357 | src += 2; | ||
358 | --count; | ||
359 | } | ||
360 | |||
361 | while (count >= 2) { | ||
362 | unsigned int w; | ||
363 | count -= 2; | ||
364 | w = *(unsigned int *)src; | ||
365 | src += 4; | ||
366 | iowrite16(w >> 0, port); | ||
367 | iowrite16(w >> 16, port); | ||
368 | } | ||
369 | |||
370 | if (count) { | ||
371 | iowrite16(*(unsigned short *)src, port); | ||
372 | } | ||
373 | } | ||
374 | |||
375 | void outsw(unsigned long port, const void *src, unsigned long count) | ||
376 | { | ||
377 | iowrite16_rep(ioport_map(port, 2), src, count); | ||
378 | } | ||
379 | |||
380 | EXPORT_SYMBOL(iowrite16_rep); | ||
381 | EXPORT_SYMBOL(outsw); | ||
382 | |||
383 | |||
384 | /* | ||
385 | * Like insl but in the opposite direction. This is used by the IDE | ||
386 | * driver to write disk sectors. Works with any alignment in SRC. | ||
387 | * Performance is important, but the interfaces seems to be slow: | ||
388 | * just using the inlined version of the outl() breaks things. | ||
389 | */ | ||
390 | void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) | ||
391 | { | ||
392 | if (unlikely((unsigned long)src & 0x3)) { | ||
393 | while (count--) { | ||
394 | struct S { int x __attribute__((packed)); }; | ||
395 | iowrite32(((struct S *)src)->x, port); | ||
396 | src += 4; | ||
397 | } | ||
398 | } else { | ||
399 | /* Buffer 32-bit aligned. */ | ||
400 | while (count--) { | ||
401 | iowrite32(*(unsigned int *)src, port); | ||
402 | src += 4; | ||
403 | } | ||
404 | } | ||
405 | } | ||
406 | |||
407 | void outsl(unsigned long port, const void *src, unsigned long count) | ||
408 | { | ||
409 | iowrite32_rep(ioport_map(port, 4), src, count); | ||
410 | } | ||
411 | |||
412 | EXPORT_SYMBOL(iowrite32_rep); | ||
413 | EXPORT_SYMBOL(outsl); | ||
414 | |||
415 | |||
416 | /* | ||
417 | * Copy data from IO memory space to "real" memory space. | ||
418 | * This needs to be optimized. | ||
419 | */ | ||
420 | void memcpy_fromio(void *to, const volatile void __iomem *from, long count) | ||
421 | { | ||
422 | /* Optimize co-aligned transfers. Everything else gets handled | ||
423 | a byte at a time. */ | ||
424 | |||
425 | if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { | ||
426 | count -= 8; | ||
427 | do { | ||
428 | *(u64 *)to = __raw_readq(from); | ||
429 | count -= 8; | ||
430 | to += 8; | ||
431 | from += 8; | ||
432 | } while (count >= 0); | ||
433 | count += 8; | ||
434 | } | ||
435 | |||
436 | if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { | ||
437 | count -= 4; | ||
438 | do { | ||
439 | *(u32 *)to = __raw_readl(from); | ||
440 | count -= 4; | ||
441 | to += 4; | ||
442 | from += 4; | ||
443 | } while (count >= 0); | ||
444 | count += 4; | ||
445 | } | ||
446 | |||
447 | if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { | ||
448 | count -= 2; | ||
449 | do { | ||
450 | *(u16 *)to = __raw_readw(from); | ||
451 | count -= 2; | ||
452 | to += 2; | ||
453 | from += 2; | ||
454 | } while (count >= 0); | ||
455 | count += 2; | ||
456 | } | ||
457 | |||
458 | while (count > 0) { | ||
459 | *(u8 *) to = __raw_readb(from); | ||
460 | count--; | ||
461 | to++; | ||
462 | from++; | ||
463 | } | ||
464 | mb(); | ||
465 | } | ||
466 | |||
467 | EXPORT_SYMBOL(memcpy_fromio); | ||
468 | |||
469 | |||
470 | /* | ||
471 | * Copy data from "real" memory space to IO memory space. | ||
472 | * This needs to be optimized. | ||
473 | */ | ||
474 | void memcpy_toio(volatile void __iomem *to, const void *from, long count) | ||
475 | { | ||
476 | /* Optimize co-aligned transfers. Everything else gets handled | ||
477 | a byte at a time. */ | ||
478 | /* FIXME -- align FROM. */ | ||
479 | |||
480 | if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { | ||
481 | count -= 8; | ||
482 | do { | ||
483 | __raw_writeq(*(const u64 *)from, to); | ||
484 | count -= 8; | ||
485 | to += 8; | ||
486 | from += 8; | ||
487 | } while (count >= 0); | ||
488 | count += 8; | ||
489 | } | ||
490 | |||
491 | if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { | ||
492 | count -= 4; | ||
493 | do { | ||
494 | __raw_writel(*(const u32 *)from, to); | ||
495 | count -= 4; | ||
496 | to += 4; | ||
497 | from += 4; | ||
498 | } while (count >= 0); | ||
499 | count += 4; | ||
500 | } | ||
501 | |||
502 | if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { | ||
503 | count -= 2; | ||
504 | do { | ||
505 | __raw_writew(*(const u16 *)from, to); | ||
506 | count -= 2; | ||
507 | to += 2; | ||
508 | from += 2; | ||
509 | } while (count >= 0); | ||
510 | count += 2; | ||
511 | } | ||
512 | |||
513 | while (count > 0) { | ||
514 | __raw_writeb(*(const u8 *) from, to); | ||
515 | count--; | ||
516 | to++; | ||
517 | from++; | ||
518 | } | ||
519 | mb(); | ||
520 | } | ||
521 | |||
522 | EXPORT_SYMBOL(memcpy_toio); | ||
523 | |||
524 | |||
525 | /* | ||
526 | * "memset" on IO memory space. | ||
527 | */ | ||
528 | void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) | ||
529 | { | ||
530 | /* Handle any initial odd byte */ | ||
531 | if (count > 0 && ((u64)to & 1)) { | ||
532 | __raw_writeb(c, to); | ||
533 | to++; | ||
534 | count--; | ||
535 | } | ||
536 | |||
537 | /* Handle any initial odd halfword */ | ||
538 | if (count >= 2 && ((u64)to & 2)) { | ||
539 | __raw_writew(c, to); | ||
540 | to += 2; | ||
541 | count -= 2; | ||
542 | } | ||
543 | |||
544 | /* Handle any initial odd word */ | ||
545 | if (count >= 4 && ((u64)to & 4)) { | ||
546 | __raw_writel(c, to); | ||
547 | to += 4; | ||
548 | count -= 4; | ||
549 | } | ||
550 | |||
551 | /* Handle all full-sized quadwords: we're aligned | ||
552 | (or have a small count) */ | ||
553 | count -= 8; | ||
554 | if (count >= 0) { | ||
555 | do { | ||
556 | __raw_writeq(c, to); | ||
557 | to += 8; | ||
558 | count -= 8; | ||
559 | } while (count >= 0); | ||
560 | } | ||
561 | count += 8; | ||
562 | |||
563 | /* The tail is word-aligned if we still have count >= 4 */ | ||
564 | if (count >= 4) { | ||
565 | __raw_writel(c, to); | ||
566 | to += 4; | ||
567 | count -= 4; | ||
568 | } | ||
569 | |||
570 | /* The tail is half-word aligned if we have count >= 2 */ | ||
571 | if (count >= 2) { | ||
572 | __raw_writew(c, to); | ||
573 | to += 2; | ||
574 | count -= 2; | ||
575 | } | ||
576 | |||
577 | /* And finally, one last byte.. */ | ||
578 | if (count) { | ||
579 | __raw_writeb(c, to); | ||
580 | } | ||
581 | mb(); | ||
582 | } | ||
583 | |||
584 | EXPORT_SYMBOL(_memset_c_io); | ||
585 | |||
586 | /* A version of memcpy used by the vga console routines to move data around | ||
587 | arbitrarily between screen and main memory. */ | ||
588 | |||
589 | void | ||
590 | scr_memcpyw(u16 *d, const u16 *s, unsigned int count) | ||
591 | { | ||
592 | const u16 __iomem *ios = (const u16 __iomem *) s; | ||
593 | u16 __iomem *iod = (u16 __iomem *) d; | ||
594 | int s_isio = __is_ioaddr(s); | ||
595 | int d_isio = __is_ioaddr(d); | ||
596 | |||
597 | if (s_isio) { | ||
598 | if (d_isio) { | ||
599 | /* FIXME: Should handle unaligned ops and | ||
600 | operation widening. */ | ||
601 | |||
602 | count /= 2; | ||
603 | while (count--) { | ||
604 | u16 tmp = __raw_readw(ios++); | ||
605 | __raw_writew(tmp, iod++); | ||
606 | } | ||
607 | } | ||
608 | else | ||
609 | memcpy_fromio(d, ios, count); | ||
610 | } else { | ||
611 | if (d_isio) | ||
612 | memcpy_toio(iod, s, count); | ||
613 | else | ||
614 | memcpy(d, s, count); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | EXPORT_SYMBOL(scr_memcpyw); | ||
619 | |||
620 | void __iomem *ioport_map(unsigned long port, unsigned int size) | ||
621 | { | ||
622 | return IO_CONCAT(__IO_PREFIX,ioportmap) (port); | ||
623 | } | ||
624 | |||
625 | void ioport_unmap(void __iomem *addr) | ||
626 | { | ||
627 | } | ||
628 | |||
629 | EXPORT_SYMBOL(ioport_map); | ||
630 | EXPORT_SYMBOL(ioport_unmap); | ||
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c new file mode 100644 index 000000000000..b6114f5c0d2b --- /dev/null +++ b/arch/alpha/kernel/irq.c | |||
@@ -0,0 +1,774 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/irq.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * | ||
6 | * This file contains the code used by various IRQ handling routines: | ||
7 | * asking for different IRQ's should be done through these routines | ||
8 | * instead of just grabbing them. Thus setups with different IRQ numbers | ||
9 | * shouldn't result in any weird surprises, and installing new handlers | ||
10 | * should be easier. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/kernel_stat.h> | ||
18 | #include <linux/signal.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/random.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/proc_fs.h> | ||
27 | #include <linux/seq_file.h> | ||
28 | #include <linux/profile.h> | ||
29 | #include <linux/bitops.h> | ||
30 | |||
31 | #include <asm/system.h> | ||
32 | #include <asm/io.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | |||
35 | /* | ||
36 | * Controller mappings for all interrupt sources: | ||
37 | */ | ||
38 | irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { | ||
39 | [0 ... NR_IRQS-1] = { | ||
40 | .handler = &no_irq_type, | ||
41 | .lock = SPIN_LOCK_UNLOCKED | ||
42 | } | ||
43 | }; | ||
44 | |||
45 | static void register_irq_proc(unsigned int irq); | ||
46 | |||
47 | volatile unsigned long irq_err_count; | ||
48 | |||
49 | /* | ||
50 | * Special irq handlers. | ||
51 | */ | ||
52 | |||
53 | irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) | ||
54 | { | ||
55 | return IRQ_NONE; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Generic no controller code | ||
60 | */ | ||
61 | |||
62 | static void no_irq_enable_disable(unsigned int irq) { } | ||
63 | static unsigned int no_irq_startup(unsigned int irq) { return 0; } | ||
64 | |||
65 | static void | ||
66 | no_irq_ack(unsigned int irq) | ||
67 | { | ||
68 | irq_err_count++; | ||
69 | printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq); | ||
70 | } | ||
71 | |||
72 | struct hw_interrupt_type no_irq_type = { | ||
73 | .typename = "none", | ||
74 | .startup = no_irq_startup, | ||
75 | .shutdown = no_irq_enable_disable, | ||
76 | .enable = no_irq_enable_disable, | ||
77 | .disable = no_irq_enable_disable, | ||
78 | .ack = no_irq_ack, | ||
79 | .end = no_irq_enable_disable, | ||
80 | }; | ||
81 | |||
82 | int | ||
83 | handle_IRQ_event(unsigned int irq, struct pt_regs *regs, | ||
84 | struct irqaction *action) | ||
85 | { | ||
86 | int status = 1; /* Force the "do bottom halves" bit */ | ||
87 | int ret; | ||
88 | |||
89 | do { | ||
90 | if (!(action->flags & SA_INTERRUPT)) | ||
91 | local_irq_enable(); | ||
92 | else | ||
93 | local_irq_disable(); | ||
94 | |||
95 | ret = action->handler(irq, action->dev_id, regs); | ||
96 | if (ret == IRQ_HANDLED) | ||
97 | status |= action->flags; | ||
98 | action = action->next; | ||
99 | } while (action); | ||
100 | if (status & SA_SAMPLE_RANDOM) | ||
101 | add_interrupt_randomness(irq); | ||
102 | local_irq_disable(); | ||
103 | |||
104 | return status; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Generic enable/disable code: this just calls | ||
109 | * down into the PIC-specific version for the actual | ||
110 | * hardware disable after having gotten the irq | ||
111 | * controller lock. | ||
112 | */ | ||
113 | void inline | ||
114 | disable_irq_nosync(unsigned int irq) | ||
115 | { | ||
116 | irq_desc_t *desc = irq_desc + irq; | ||
117 | unsigned long flags; | ||
118 | |||
119 | spin_lock_irqsave(&desc->lock, flags); | ||
120 | if (!desc->depth++) { | ||
121 | desc->status |= IRQ_DISABLED; | ||
122 | desc->handler->disable(irq); | ||
123 | } | ||
124 | spin_unlock_irqrestore(&desc->lock, flags); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Synchronous version of the above, making sure the IRQ is | ||
129 | * no longer running on any other IRQ.. | ||
130 | */ | ||
131 | void | ||
132 | disable_irq(unsigned int irq) | ||
133 | { | ||
134 | disable_irq_nosync(irq); | ||
135 | synchronize_irq(irq); | ||
136 | } | ||
137 | |||
138 | void | ||
139 | enable_irq(unsigned int irq) | ||
140 | { | ||
141 | irq_desc_t *desc = irq_desc + irq; | ||
142 | unsigned long flags; | ||
143 | |||
144 | spin_lock_irqsave(&desc->lock, flags); | ||
145 | switch (desc->depth) { | ||
146 | case 1: { | ||
147 | unsigned int status = desc->status & ~IRQ_DISABLED; | ||
148 | desc->status = status; | ||
149 | if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { | ||
150 | desc->status = status | IRQ_REPLAY; | ||
151 | hw_resend_irq(desc->handler,irq); | ||
152 | } | ||
153 | desc->handler->enable(irq); | ||
154 | /* fall-through */ | ||
155 | } | ||
156 | default: | ||
157 | desc->depth--; | ||
158 | break; | ||
159 | case 0: | ||
160 | printk(KERN_ERR "enable_irq() unbalanced from %p\n", | ||
161 | __builtin_return_address(0)); | ||
162 | } | ||
163 | spin_unlock_irqrestore(&desc->lock, flags); | ||
164 | } | ||
165 | |||
166 | int | ||
167 | setup_irq(unsigned int irq, struct irqaction * new) | ||
168 | { | ||
169 | int shared = 0; | ||
170 | struct irqaction *old, **p; | ||
171 | unsigned long flags; | ||
172 | irq_desc_t *desc = irq_desc + irq; | ||
173 | |||
174 | if (desc->handler == &no_irq_type) | ||
175 | return -ENOSYS; | ||
176 | |||
177 | /* | ||
178 | * Some drivers like serial.c use request_irq() heavily, | ||
179 | * so we have to be careful not to interfere with a | ||
180 | * running system. | ||
181 | */ | ||
182 | if (new->flags & SA_SAMPLE_RANDOM) { | ||
183 | /* | ||
184 | * This function might sleep, we want to call it first, | ||
185 | * outside of the atomic block. | ||
186 | * Yes, this might clear the entropy pool if the wrong | ||
187 | * driver is attempted to be loaded, without actually | ||
188 | * installing a new handler, but is this really a problem, | ||
189 | * only the sysadmin is able to do this. | ||
190 | */ | ||
191 | rand_initialize_irq(irq); | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * The following block of code has to be executed atomically | ||
196 | */ | ||
197 | spin_lock_irqsave(&desc->lock,flags); | ||
198 | p = &desc->action; | ||
199 | if ((old = *p) != NULL) { | ||
200 | /* Can't share interrupts unless both agree to */ | ||
201 | if (!(old->flags & new->flags & SA_SHIRQ)) { | ||
202 | spin_unlock_irqrestore(&desc->lock,flags); | ||
203 | return -EBUSY; | ||
204 | } | ||
205 | |||
206 | /* add new interrupt at end of irq queue */ | ||
207 | do { | ||
208 | p = &old->next; | ||
209 | old = *p; | ||
210 | } while (old); | ||
211 | shared = 1; | ||
212 | } | ||
213 | |||
214 | *p = new; | ||
215 | |||
216 | if (!shared) { | ||
217 | desc->depth = 0; | ||
218 | desc->status &= | ||
219 | ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS); | ||
220 | desc->handler->startup(irq); | ||
221 | } | ||
222 | spin_unlock_irqrestore(&desc->lock,flags); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static struct proc_dir_entry * root_irq_dir; | ||
228 | static struct proc_dir_entry * irq_dir[NR_IRQS]; | ||
229 | |||
230 | #ifdef CONFIG_SMP | ||
231 | static struct proc_dir_entry * smp_affinity_entry[NR_IRQS]; | ||
232 | static char irq_user_affinity[NR_IRQS]; | ||
233 | static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL }; | ||
234 | |||
235 | static void | ||
236 | select_smp_affinity(int irq) | ||
237 | { | ||
238 | static int last_cpu; | ||
239 | int cpu = last_cpu + 1; | ||
240 | |||
241 | if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq]) | ||
242 | return; | ||
243 | |||
244 | while (!cpu_possible(cpu)) | ||
245 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); | ||
246 | last_cpu = cpu; | ||
247 | |||
248 | irq_affinity[irq] = cpumask_of_cpu(cpu); | ||
249 | irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu)); | ||
250 | } | ||
251 | |||
252 | static int | ||
253 | irq_affinity_read_proc (char *page, char **start, off_t off, | ||
254 | int count, int *eof, void *data) | ||
255 | { | ||
256 | int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]); | ||
257 | if (count - len < 2) | ||
258 | return -EINVAL; | ||
259 | len += sprintf(page + len, "\n"); | ||
260 | return len; | ||
261 | } | ||
262 | |||
263 | static int | ||
264 | irq_affinity_write_proc(struct file *file, const char __user *buffer, | ||
265 | unsigned long count, void *data) | ||
266 | { | ||
267 | int irq = (long) data, full_count = count, err; | ||
268 | cpumask_t new_value; | ||
269 | |||
270 | if (!irq_desc[irq].handler->set_affinity) | ||
271 | return -EIO; | ||
272 | |||
273 | err = cpumask_parse(buffer, count, new_value); | ||
274 | |||
275 | /* The special value 0 means release control of the | ||
276 | affinity to kernel. */ | ||
277 | cpus_and(new_value, new_value, cpu_online_map); | ||
278 | if (cpus_empty(new_value)) { | ||
279 | irq_user_affinity[irq] = 0; | ||
280 | select_smp_affinity(irq); | ||
281 | } | ||
282 | /* Do not allow disabling IRQs completely - it's a too easy | ||
283 | way to make the system unusable accidentally :-) At least | ||
284 | one online CPU still has to be targeted. */ | ||
285 | else { | ||
286 | irq_affinity[irq] = new_value; | ||
287 | irq_user_affinity[irq] = 1; | ||
288 | irq_desc[irq].handler->set_affinity(irq, new_value); | ||
289 | } | ||
290 | |||
291 | return full_count; | ||
292 | } | ||
293 | |||
294 | #endif /* CONFIG_SMP */ | ||
295 | |||
296 | #define MAX_NAMELEN 10 | ||
297 | |||
298 | static void | ||
299 | register_irq_proc (unsigned int irq) | ||
300 | { | ||
301 | char name [MAX_NAMELEN]; | ||
302 | |||
303 | if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) || | ||
304 | irq_dir[irq]) | ||
305 | return; | ||
306 | |||
307 | memset(name, 0, MAX_NAMELEN); | ||
308 | sprintf(name, "%d", irq); | ||
309 | |||
310 | /* create /proc/irq/1234 */ | ||
311 | irq_dir[irq] = proc_mkdir(name, root_irq_dir); | ||
312 | |||
313 | #ifdef CONFIG_SMP | ||
314 | if (irq_desc[irq].handler->set_affinity) { | ||
315 | struct proc_dir_entry *entry; | ||
316 | /* create /proc/irq/1234/smp_affinity */ | ||
317 | entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); | ||
318 | |||
319 | if (entry) { | ||
320 | entry->nlink = 1; | ||
321 | entry->data = (void *)(long)irq; | ||
322 | entry->read_proc = irq_affinity_read_proc; | ||
323 | entry->write_proc = irq_affinity_write_proc; | ||
324 | } | ||
325 | |||
326 | smp_affinity_entry[irq] = entry; | ||
327 | } | ||
328 | #endif | ||
329 | } | ||
330 | |||
331 | void | ||
332 | init_irq_proc (void) | ||
333 | { | ||
334 | int i; | ||
335 | |||
336 | /* create /proc/irq */ | ||
337 | root_irq_dir = proc_mkdir("irq", NULL); | ||
338 | |||
339 | #ifdef CONFIG_SMP | ||
340 | /* create /proc/irq/prof_cpu_mask */ | ||
341 | create_prof_cpu_mask(root_irq_dir); | ||
342 | #endif | ||
343 | |||
344 | /* | ||
345 | * Create entries for all existing IRQs. | ||
346 | */ | ||
347 | for (i = 0; i < ACTUAL_NR_IRQS; i++) { | ||
348 | if (irq_desc[i].handler == &no_irq_type) | ||
349 | continue; | ||
350 | register_irq_proc(i); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | int | ||
355 | request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), | ||
356 | unsigned long irqflags, const char * devname, void *dev_id) | ||
357 | { | ||
358 | int retval; | ||
359 | struct irqaction * action; | ||
360 | |||
361 | if (irq >= ACTUAL_NR_IRQS) | ||
362 | return -EINVAL; | ||
363 | if (!handler) | ||
364 | return -EINVAL; | ||
365 | |||
366 | #if 1 | ||
367 | /* | ||
368 | * Sanity-check: shared interrupts should REALLY pass in | ||
369 | * a real dev-ID, otherwise we'll have trouble later trying | ||
370 | * to figure out which interrupt is which (messes up the | ||
371 | * interrupt freeing logic etc). | ||
372 | */ | ||
373 | if ((irqflags & SA_SHIRQ) && !dev_id) { | ||
374 | printk(KERN_ERR | ||
375 | "Bad boy: %s (at %p) called us without a dev_id!\n", | ||
376 | devname, __builtin_return_address(0)); | ||
377 | } | ||
378 | #endif | ||
379 | |||
380 | action = (struct irqaction *) | ||
381 | kmalloc(sizeof(struct irqaction), GFP_KERNEL); | ||
382 | if (!action) | ||
383 | return -ENOMEM; | ||
384 | |||
385 | action->handler = handler; | ||
386 | action->flags = irqflags; | ||
387 | cpus_clear(action->mask); | ||
388 | action->name = devname; | ||
389 | action->next = NULL; | ||
390 | action->dev_id = dev_id; | ||
391 | |||
392 | #ifdef CONFIG_SMP | ||
393 | select_smp_affinity(irq); | ||
394 | #endif | ||
395 | |||
396 | retval = setup_irq(irq, action); | ||
397 | if (retval) | ||
398 | kfree(action); | ||
399 | return retval; | ||
400 | } | ||
401 | |||
402 | EXPORT_SYMBOL(request_irq); | ||
403 | |||
404 | void | ||
405 | free_irq(unsigned int irq, void *dev_id) | ||
406 | { | ||
407 | irq_desc_t *desc; | ||
408 | struct irqaction **p; | ||
409 | unsigned long flags; | ||
410 | |||
411 | if (irq >= ACTUAL_NR_IRQS) { | ||
412 | printk(KERN_CRIT "Trying to free IRQ%d\n", irq); | ||
413 | return; | ||
414 | } | ||
415 | |||
416 | desc = irq_desc + irq; | ||
417 | spin_lock_irqsave(&desc->lock,flags); | ||
418 | p = &desc->action; | ||
419 | for (;;) { | ||
420 | struct irqaction * action = *p; | ||
421 | if (action) { | ||
422 | struct irqaction **pp = p; | ||
423 | p = &action->next; | ||
424 | if (action->dev_id != dev_id) | ||
425 | continue; | ||
426 | |||
427 | /* Found - now remove it from the list of entries. */ | ||
428 | *pp = action->next; | ||
429 | if (!desc->action) { | ||
430 | desc->status |= IRQ_DISABLED; | ||
431 | desc->handler->shutdown(irq); | ||
432 | } | ||
433 | spin_unlock_irqrestore(&desc->lock,flags); | ||
434 | |||
435 | #ifdef CONFIG_SMP | ||
436 | /* Wait to make sure it's not being used on | ||
437 | another CPU. */ | ||
438 | while (desc->status & IRQ_INPROGRESS) | ||
439 | barrier(); | ||
440 | #endif | ||
441 | kfree(action); | ||
442 | return; | ||
443 | } | ||
444 | printk(KERN_ERR "Trying to free free IRQ%d\n",irq); | ||
445 | spin_unlock_irqrestore(&desc->lock,flags); | ||
446 | return; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | EXPORT_SYMBOL(free_irq); | ||
451 | |||
452 | int | ||
453 | show_interrupts(struct seq_file *p, void *v) | ||
454 | { | ||
455 | #ifdef CONFIG_SMP | ||
456 | int j; | ||
457 | #endif | ||
458 | int i = *(loff_t *) v; | ||
459 | struct irqaction * action; | ||
460 | unsigned long flags; | ||
461 | |||
462 | #ifdef CONFIG_SMP | ||
463 | if (i == 0) { | ||
464 | seq_puts(p, " "); | ||
465 | for (i = 0; i < NR_CPUS; i++) | ||
466 | if (cpu_online(i)) | ||
467 | seq_printf(p, "CPU%d ", i); | ||
468 | seq_putc(p, '\n'); | ||
469 | } | ||
470 | #endif | ||
471 | |||
472 | if (i < ACTUAL_NR_IRQS) { | ||
473 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
474 | action = irq_desc[i].action; | ||
475 | if (!action) | ||
476 | goto unlock; | ||
477 | seq_printf(p, "%3d: ",i); | ||
478 | #ifndef CONFIG_SMP | ||
479 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
480 | #else | ||
481 | for (j = 0; j < NR_CPUS; j++) | ||
482 | if (cpu_online(j)) | ||
483 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
484 | #endif | ||
485 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | ||
486 | seq_printf(p, " %c%s", | ||
487 | (action->flags & SA_INTERRUPT)?'+':' ', | ||
488 | action->name); | ||
489 | |||
490 | for (action=action->next; action; action = action->next) { | ||
491 | seq_printf(p, ", %c%s", | ||
492 | (action->flags & SA_INTERRUPT)?'+':' ', | ||
493 | action->name); | ||
494 | } | ||
495 | |||
496 | seq_putc(p, '\n'); | ||
497 | unlock: | ||
498 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
499 | } else if (i == ACTUAL_NR_IRQS) { | ||
500 | #ifdef CONFIG_SMP | ||
501 | seq_puts(p, "IPI: "); | ||
502 | for (i = 0; i < NR_CPUS; i++) | ||
503 | if (cpu_online(i)) | ||
504 | seq_printf(p, "%10lu ", cpu_data[i].ipi_count); | ||
505 | seq_putc(p, '\n'); | ||
506 | #endif | ||
507 | seq_printf(p, "ERR: %10lu\n", irq_err_count); | ||
508 | } | ||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | |||
513 | /* | ||
514 | * handle_irq handles all normal device IRQ's (the special | ||
515 | * SMP cross-CPU interrupts have their own specific | ||
516 | * handlers). | ||
517 | */ | ||
518 | |||
519 | #define MAX_ILLEGAL_IRQS 16 | ||
520 | |||
521 | void | ||
522 | handle_irq(int irq, struct pt_regs * regs) | ||
523 | { | ||
524 | /* | ||
525 | * We ack quickly, we don't want the irq controller | ||
526 | * thinking we're snobs just because some other CPU has | ||
527 | * disabled global interrupts (we have already done the | ||
528 | * INT_ACK cycles, it's too late to try to pretend to the | ||
529 | * controller that we aren't taking the interrupt). | ||
530 | * | ||
531 | * 0 return value means that this irq is already being | ||
532 | * handled by some other CPU. (or is disabled) | ||
533 | */ | ||
534 | int cpu = smp_processor_id(); | ||
535 | irq_desc_t *desc = irq_desc + irq; | ||
536 | struct irqaction * action; | ||
537 | unsigned int status; | ||
538 | static unsigned int illegal_count=0; | ||
539 | |||
540 | if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { | ||
541 | irq_err_count++; | ||
542 | illegal_count++; | ||
543 | printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", | ||
544 | irq); | ||
545 | return; | ||
546 | } | ||
547 | |||
548 | irq_enter(); | ||
549 | kstat_cpu(cpu).irqs[irq]++; | ||
550 | spin_lock_irq(&desc->lock); /* mask also the higher prio events */ | ||
551 | desc->handler->ack(irq); | ||
552 | /* | ||
553 | * REPLAY is when Linux resends an IRQ that was dropped earlier. | ||
554 | * WAITING is used by probe to mark irqs that are being tested. | ||
555 | */ | ||
556 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | ||
557 | status |= IRQ_PENDING; /* we _want_ to handle it */ | ||
558 | |||
559 | /* | ||
560 | * If the IRQ is disabled for whatever reason, we cannot | ||
561 | * use the action we have. | ||
562 | */ | ||
563 | action = NULL; | ||
564 | if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { | ||
565 | action = desc->action; | ||
566 | status &= ~IRQ_PENDING; /* we commit to handling */ | ||
567 | status |= IRQ_INPROGRESS; /* we are handling it */ | ||
568 | } | ||
569 | desc->status = status; | ||
570 | |||
571 | /* | ||
572 | * If there is no IRQ handler or it was disabled, exit early. | ||
573 | * Since we set PENDING, if another processor is handling | ||
574 | * a different instance of this same irq, the other processor | ||
575 | * will take care of it. | ||
576 | */ | ||
577 | if (!action) | ||
578 | goto out; | ||
579 | |||
580 | /* | ||
581 | * Edge triggered interrupts need to remember pending events. | ||
582 | * This applies to any hw interrupts that allow a second | ||
583 | * instance of the same irq to arrive while we are in handle_irq | ||
584 | * or in the handler. But the code here only handles the _second_ | ||
585 | * instance of the irq, not the third or fourth. So it is mostly | ||
586 | * useful for irq hardware that does not mask cleanly in an | ||
587 | * SMP environment. | ||
588 | */ | ||
589 | for (;;) { | ||
590 | spin_unlock(&desc->lock); | ||
591 | handle_IRQ_event(irq, regs, action); | ||
592 | spin_lock(&desc->lock); | ||
593 | |||
594 | if (!(desc->status & IRQ_PENDING) | ||
595 | || (desc->status & IRQ_LEVEL)) | ||
596 | break; | ||
597 | desc->status &= ~IRQ_PENDING; | ||
598 | } | ||
599 | desc->status &= ~IRQ_INPROGRESS; | ||
600 | out: | ||
601 | /* | ||
602 | * The ->end() handler has to deal with interrupts which got | ||
603 | * disabled while the handler was running. | ||
604 | */ | ||
605 | desc->handler->end(irq); | ||
606 | spin_unlock(&desc->lock); | ||
607 | |||
608 | irq_exit(); | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * IRQ autodetection code.. | ||
613 | * | ||
614 | * This depends on the fact that any interrupt that | ||
615 | * comes in on to an unassigned handler will get stuck | ||
616 | * with "IRQ_WAITING" cleared and the interrupt | ||
617 | * disabled. | ||
618 | */ | ||
619 | unsigned long | ||
620 | probe_irq_on(void) | ||
621 | { | ||
622 | int i; | ||
623 | irq_desc_t *desc; | ||
624 | unsigned long delay; | ||
625 | unsigned long val; | ||
626 | |||
627 | /* Something may have generated an irq long ago and we want to | ||
628 | flush such a longstanding irq before considering it as spurious. */ | ||
629 | for (i = NR_IRQS-1; i >= 0; i--) { | ||
630 | desc = irq_desc + i; | ||
631 | |||
632 | spin_lock_irq(&desc->lock); | ||
633 | if (!irq_desc[i].action) | ||
634 | irq_desc[i].handler->startup(i); | ||
635 | spin_unlock_irq(&desc->lock); | ||
636 | } | ||
637 | |||
638 | /* Wait for longstanding interrupts to trigger. */ | ||
639 | for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) | ||
640 | /* about 20ms delay */ barrier(); | ||
641 | |||
642 | /* enable any unassigned irqs (we must startup again here because | ||
643 | if a longstanding irq happened in the previous stage, it may have | ||
644 | masked itself) first, enable any unassigned irqs. */ | ||
645 | for (i = NR_IRQS-1; i >= 0; i--) { | ||
646 | desc = irq_desc + i; | ||
647 | |||
648 | spin_lock_irq(&desc->lock); | ||
649 | if (!desc->action) { | ||
650 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; | ||
651 | if (desc->handler->startup(i)) | ||
652 | desc->status |= IRQ_PENDING; | ||
653 | } | ||
654 | spin_unlock_irq(&desc->lock); | ||
655 | } | ||
656 | |||
657 | /* | ||
658 | * Wait for spurious interrupts to trigger | ||
659 | */ | ||
660 | for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) | ||
661 | /* about 100ms delay */ barrier(); | ||
662 | |||
663 | /* | ||
664 | * Now filter out any obviously spurious interrupts | ||
665 | */ | ||
666 | val = 0; | ||
667 | for (i=0; i<NR_IRQS; i++) { | ||
668 | irq_desc_t *desc = irq_desc + i; | ||
669 | unsigned int status; | ||
670 | |||
671 | spin_lock_irq(&desc->lock); | ||
672 | status = desc->status; | ||
673 | |||
674 | if (status & IRQ_AUTODETECT) { | ||
675 | /* It triggered already - consider it spurious. */ | ||
676 | if (!(status & IRQ_WAITING)) { | ||
677 | desc->status = status & ~IRQ_AUTODETECT; | ||
678 | desc->handler->shutdown(i); | ||
679 | } else | ||
680 | if (i < 32) | ||
681 | val |= 1 << i; | ||
682 | } | ||
683 | spin_unlock_irq(&desc->lock); | ||
684 | } | ||
685 | |||
686 | return val; | ||
687 | } | ||
688 | |||
689 | EXPORT_SYMBOL(probe_irq_on); | ||
690 | |||
691 | /* | ||
692 | * Return a mask of triggered interrupts (this | ||
693 | * can handle only legacy ISA interrupts). | ||
694 | */ | ||
695 | unsigned int | ||
696 | probe_irq_mask(unsigned long val) | ||
697 | { | ||
698 | int i; | ||
699 | unsigned int mask; | ||
700 | |||
701 | mask = 0; | ||
702 | for (i = 0; i < NR_IRQS; i++) { | ||
703 | irq_desc_t *desc = irq_desc + i; | ||
704 | unsigned int status; | ||
705 | |||
706 | spin_lock_irq(&desc->lock); | ||
707 | status = desc->status; | ||
708 | |||
709 | if (status & IRQ_AUTODETECT) { | ||
710 | /* We only react to ISA interrupts */ | ||
711 | if (!(status & IRQ_WAITING)) { | ||
712 | if (i < 16) | ||
713 | mask |= 1 << i; | ||
714 | } | ||
715 | |||
716 | desc->status = status & ~IRQ_AUTODETECT; | ||
717 | desc->handler->shutdown(i); | ||
718 | } | ||
719 | spin_unlock_irq(&desc->lock); | ||
720 | } | ||
721 | |||
722 | return mask & val; | ||
723 | } | ||
724 | |||
725 | /* | ||
726 | * Get the result of the IRQ probe.. A negative result means that | ||
727 | * we have several candidates (but we return the lowest-numbered | ||
728 | * one). | ||
729 | */ | ||
730 | |||
731 | int | ||
732 | probe_irq_off(unsigned long val) | ||
733 | { | ||
734 | int i, irq_found, nr_irqs; | ||
735 | |||
736 | nr_irqs = 0; | ||
737 | irq_found = 0; | ||
738 | for (i=0; i<NR_IRQS; i++) { | ||
739 | irq_desc_t *desc = irq_desc + i; | ||
740 | unsigned int status; | ||
741 | |||
742 | spin_lock_irq(&desc->lock); | ||
743 | status = desc->status; | ||
744 | |||
745 | if (status & IRQ_AUTODETECT) { | ||
746 | if (!(status & IRQ_WAITING)) { | ||
747 | if (!nr_irqs) | ||
748 | irq_found = i; | ||
749 | nr_irqs++; | ||
750 | } | ||
751 | desc->status = status & ~IRQ_AUTODETECT; | ||
752 | desc->handler->shutdown(i); | ||
753 | } | ||
754 | spin_unlock_irq(&desc->lock); | ||
755 | } | ||
756 | |||
757 | if (nr_irqs > 1) | ||
758 | irq_found = -irq_found; | ||
759 | return irq_found; | ||
760 | } | ||
761 | |||
762 | EXPORT_SYMBOL(probe_irq_off); | ||
763 | |||
764 | #ifdef CONFIG_SMP | ||
765 | void synchronize_irq(unsigned int irq) | ||
766 | { | ||
767 | /* is there anything to synchronize with? */ | ||
768 | if (!irq_desc[irq].action) | ||
769 | return; | ||
770 | |||
771 | while (irq_desc[irq].status & IRQ_INPROGRESS) | ||
772 | barrier(); | ||
773 | } | ||
774 | #endif | ||
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c new file mode 100644 index 000000000000..e6ded33c6e22 --- /dev/null +++ b/arch/alpha/kernel/irq_alpha.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Alpha specific irq code. | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/irq.h> | ||
9 | #include <linux/kernel_stat.h> | ||
10 | |||
11 | #include <asm/machvec.h> | ||
12 | #include <asm/dma.h> | ||
13 | |||
14 | #include "proto.h" | ||
15 | #include "irq_impl.h" | ||
16 | |||
17 | /* Hack minimum IPL during interrupt processing for broken hardware. */ | ||
18 | #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK | ||
19 | int __min_ipl; | ||
20 | #endif | ||
21 | |||
22 | /* | ||
23 | * Performance counter hook. A module can override this to | ||
24 | * do something useful. | ||
25 | */ | ||
26 | static void | ||
27 | dummy_perf(unsigned long vector, struct pt_regs *regs) | ||
28 | { | ||
29 | irq_err_count++; | ||
30 | printk(KERN_CRIT "Performance counter interrupt!\n"); | ||
31 | } | ||
32 | |||
33 | void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf; | ||
34 | |||
35 | /* | ||
36 | * The main interrupt entry point. | ||
37 | */ | ||
38 | |||
39 | asmlinkage void | ||
40 | do_entInt(unsigned long type, unsigned long vector, | ||
41 | unsigned long la_ptr, struct pt_regs *regs) | ||
42 | { | ||
43 | switch (type) { | ||
44 | case 0: | ||
45 | #ifdef CONFIG_SMP | ||
46 | handle_ipi(regs); | ||
47 | return; | ||
48 | #else | ||
49 | irq_err_count++; | ||
50 | printk(KERN_CRIT "Interprocessor interrupt? " | ||
51 | "You must be kidding!\n"); | ||
52 | #endif | ||
53 | break; | ||
54 | case 1: | ||
55 | #ifdef CONFIG_SMP | ||
56 | { | ||
57 | long cpu; | ||
58 | smp_percpu_timer_interrupt(regs); | ||
59 | cpu = smp_processor_id(); | ||
60 | if (cpu != boot_cpuid) { | ||
61 | kstat_cpu(cpu).irqs[RTC_IRQ]++; | ||
62 | } else { | ||
63 | handle_irq(RTC_IRQ, regs); | ||
64 | } | ||
65 | } | ||
66 | #else | ||
67 | handle_irq(RTC_IRQ, regs); | ||
68 | #endif | ||
69 | return; | ||
70 | case 2: | ||
71 | alpha_mv.machine_check(vector, la_ptr, regs); | ||
72 | return; | ||
73 | case 3: | ||
74 | alpha_mv.device_interrupt(vector, regs); | ||
75 | return; | ||
76 | case 4: | ||
77 | perf_irq(la_ptr, regs); | ||
78 | return; | ||
79 | default: | ||
80 | printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n", | ||
81 | type, vector); | ||
82 | } | ||
83 | printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps); | ||
84 | } | ||
85 | |||
86 | void __init | ||
87 | common_init_isa_dma(void) | ||
88 | { | ||
89 | outb(0, DMA1_RESET_REG); | ||
90 | outb(0, DMA2_RESET_REG); | ||
91 | outb(0, DMA1_CLR_MASK_REG); | ||
92 | outb(0, DMA2_CLR_MASK_REG); | ||
93 | } | ||
94 | |||
95 | void __init | ||
96 | init_IRQ(void) | ||
97 | { | ||
98 | /* Just in case the platform init_irq() causes interrupts/mchecks | ||
99 | (as is the case with RAWHIDE, at least). */ | ||
100 | wrent(entInt, 0); | ||
101 | |||
102 | alpha_mv.init_irq(); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * machine error checks | ||
107 | */ | ||
108 | #define MCHK_K_TPERR 0x0080 | ||
109 | #define MCHK_K_TCPERR 0x0082 | ||
110 | #define MCHK_K_HERR 0x0084 | ||
111 | #define MCHK_K_ECC_C 0x0086 | ||
112 | #define MCHK_K_ECC_NC 0x0088 | ||
113 | #define MCHK_K_OS_BUGCHECK 0x008A | ||
114 | #define MCHK_K_PAL_BUGCHECK 0x0090 | ||
115 | |||
116 | #ifndef CONFIG_SMP | ||
117 | struct mcheck_info __mcheck_info; | ||
118 | #endif | ||
119 | |||
120 | void | ||
121 | process_mcheck_info(unsigned long vector, unsigned long la_ptr, | ||
122 | struct pt_regs *regs, const char *machine, | ||
123 | int expected) | ||
124 | { | ||
125 | struct el_common *mchk_header; | ||
126 | const char *reason; | ||
127 | |||
128 | /* | ||
129 | * See if the machine check is due to a badaddr() and if so, | ||
130 | * ignore it. | ||
131 | */ | ||
132 | |||
133 | #ifdef CONFIG_VERBOSE_MCHECK | ||
134 | if (alpha_verbose_mcheck > 1) { | ||
135 | printk(KERN_CRIT "%s machine check %s\n", machine, | ||
136 | expected ? "expected." : "NOT expected!!!"); | ||
137 | } | ||
138 | #endif | ||
139 | |||
140 | if (expected) { | ||
141 | int cpu = smp_processor_id(); | ||
142 | mcheck_expected(cpu) = 0; | ||
143 | mcheck_taken(cpu) = 1; | ||
144 | return; | ||
145 | } | ||
146 | |||
147 | mchk_header = (struct el_common *)la_ptr; | ||
148 | |||
149 | printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%x\n", | ||
150 | machine, vector, regs->pc, mchk_header->code); | ||
151 | |||
152 | switch (mchk_header->code) { | ||
153 | /* Machine check reasons. Defined according to PALcode sources. */ | ||
154 | case 0x80: reason = "tag parity error"; break; | ||
155 | case 0x82: reason = "tag control parity error"; break; | ||
156 | case 0x84: reason = "generic hard error"; break; | ||
157 | case 0x86: reason = "correctable ECC error"; break; | ||
158 | case 0x88: reason = "uncorrectable ECC error"; break; | ||
159 | case 0x8A: reason = "OS-specific PAL bugcheck"; break; | ||
160 | case 0x90: reason = "callsys in kernel mode"; break; | ||
161 | case 0x96: reason = "i-cache read retryable error"; break; | ||
162 | case 0x98: reason = "processor detected hard error"; break; | ||
163 | |||
164 | /* System specific (these are for Alcor, at least): */ | ||
165 | case 0x202: reason = "system detected hard error"; break; | ||
166 | case 0x203: reason = "system detected uncorrectable ECC error"; break; | ||
167 | case 0x204: reason = "SIO SERR occurred on PCI bus"; break; | ||
168 | case 0x205: reason = "parity error detected by core logic"; break; | ||
169 | case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break; | ||
170 | case 0x207: reason = "non-existent memory error"; break; | ||
171 | case 0x208: reason = "MCHK_K_DCSR"; break; | ||
172 | case 0x209: reason = "PCI SERR detected"; break; | ||
173 | case 0x20b: reason = "PCI data parity error detected"; break; | ||
174 | case 0x20d: reason = "PCI address parity error detected"; break; | ||
175 | case 0x20f: reason = "PCI master abort error"; break; | ||
176 | case 0x211: reason = "PCI target abort error"; break; | ||
177 | case 0x213: reason = "scatter/gather PTE invalid error"; break; | ||
178 | case 0x215: reason = "flash ROM write error"; break; | ||
179 | case 0x217: reason = "IOA timeout detected"; break; | ||
180 | case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break; | ||
181 | case 0x21b: reason = "EISA fail-safe timer timeout"; break; | ||
182 | case 0x21d: reason = "EISA bus time-out"; break; | ||
183 | case 0x21f: reason = "EISA software generated NMI"; break; | ||
184 | case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break; | ||
185 | default: reason = "unknown"; break; | ||
186 | } | ||
187 | |||
188 | printk(KERN_CRIT "machine check type: %s%s\n", | ||
189 | reason, mchk_header->retry ? " (retryable)" : ""); | ||
190 | |||
191 | dik_show_regs(regs, NULL); | ||
192 | |||
193 | #ifdef CONFIG_VERBOSE_MCHECK | ||
194 | if (alpha_verbose_mcheck > 1) { | ||
195 | /* Dump the logout area to give all info. */ | ||
196 | unsigned long *ptr = (unsigned long *)la_ptr; | ||
197 | long i; | ||
198 | for (i = 0; i < mchk_header->size / sizeof(long); i += 2) { | ||
199 | printk(KERN_CRIT " +%8lx %016lx %016lx\n", | ||
200 | i*sizeof(long), ptr[i], ptr[i+1]); | ||
201 | } | ||
202 | } | ||
203 | #endif /* CONFIG_VERBOSE_MCHECK */ | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * The special RTC interrupt type. The interrupt itself was | ||
208 | * processed by PALcode, and comes in via entInt vector 1. | ||
209 | */ | ||
210 | |||
211 | static void rtc_enable_disable(unsigned int irq) { } | ||
212 | static unsigned int rtc_startup(unsigned int irq) { return 0; } | ||
213 | |||
214 | struct irqaction timer_irqaction = { | ||
215 | .handler = timer_interrupt, | ||
216 | .flags = SA_INTERRUPT, | ||
217 | .name = "timer", | ||
218 | }; | ||
219 | |||
220 | static struct hw_interrupt_type rtc_irq_type = { | ||
221 | .typename = "RTC", | ||
222 | .startup = rtc_startup, | ||
223 | .shutdown = rtc_enable_disable, | ||
224 | .enable = rtc_enable_disable, | ||
225 | .disable = rtc_enable_disable, | ||
226 | .ack = rtc_enable_disable, | ||
227 | .end = rtc_enable_disable, | ||
228 | }; | ||
229 | |||
230 | void __init | ||
231 | init_rtc_irq(void) | ||
232 | { | ||
233 | irq_desc[RTC_IRQ].status = IRQ_DISABLED; | ||
234 | irq_desc[RTC_IRQ].handler = &rtc_irq_type; | ||
235 | setup_irq(RTC_IRQ, &timer_irqaction); | ||
236 | } | ||
237 | |||
238 | /* Dummy irqactions. */ | ||
239 | struct irqaction isa_cascade_irqaction = { | ||
240 | .handler = no_action, | ||
241 | .name = "isa-cascade" | ||
242 | }; | ||
243 | |||
244 | struct irqaction timer_cascade_irqaction = { | ||
245 | .handler = no_action, | ||
246 | .name = "timer-cascade" | ||
247 | }; | ||
248 | |||
249 | struct irqaction halt_switch_irqaction = { | ||
250 | .handler = no_action, | ||
251 | .name = "halt-switch" | ||
252 | }; | ||
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c new file mode 100644 index 000000000000..b188683b83fd --- /dev/null +++ b/arch/alpha/kernel/irq_i8259.c | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/irq_i8259.c | ||
3 | * | ||
4 | * This is the 'legacy' 8259A Programmable Interrupt Controller, | ||
5 | * present in the majority of PC/AT boxes. | ||
6 | * | ||
7 | * Started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c. | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/cache.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | |||
17 | #include <asm/io.h> | ||
18 | |||
19 | #include "proto.h" | ||
20 | #include "irq_impl.h" | ||
21 | |||
22 | |||
23 | /* Note mask bit is true for DISABLED irqs. */ | ||
24 | static unsigned int cached_irq_mask = 0xffff; | ||
25 | static DEFINE_SPINLOCK(i8259_irq_lock); | ||
26 | |||
27 | static inline void | ||
28 | i8259_update_irq_hw(unsigned int irq, unsigned long mask) | ||
29 | { | ||
30 | int port = 0x21; | ||
31 | if (irq & 8) mask >>= 8; | ||
32 | if (irq & 8) port = 0xA1; | ||
33 | outb(mask, port); | ||
34 | } | ||
35 | |||
36 | inline void | ||
37 | i8259a_enable_irq(unsigned int irq) | ||
38 | { | ||
39 | spin_lock(&i8259_irq_lock); | ||
40 | i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); | ||
41 | spin_unlock(&i8259_irq_lock); | ||
42 | } | ||
43 | |||
44 | static inline void | ||
45 | __i8259a_disable_irq(unsigned int irq) | ||
46 | { | ||
47 | i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq); | ||
48 | } | ||
49 | |||
50 | void | ||
51 | i8259a_disable_irq(unsigned int irq) | ||
52 | { | ||
53 | spin_lock(&i8259_irq_lock); | ||
54 | __i8259a_disable_irq(irq); | ||
55 | spin_unlock(&i8259_irq_lock); | ||
56 | } | ||
57 | |||
58 | void | ||
59 | i8259a_mask_and_ack_irq(unsigned int irq) | ||
60 | { | ||
61 | spin_lock(&i8259_irq_lock); | ||
62 | __i8259a_disable_irq(irq); | ||
63 | |||
64 | /* Ack the interrupt making it the lowest priority. */ | ||
65 | if (irq >= 8) { | ||
66 | outb(0xE0 | (irq - 8), 0xa0); /* ack the slave */ | ||
67 | irq = 2; | ||
68 | } | ||
69 | outb(0xE0 | irq, 0x20); /* ack the master */ | ||
70 | spin_unlock(&i8259_irq_lock); | ||
71 | } | ||
72 | |||
73 | unsigned int | ||
74 | i8259a_startup_irq(unsigned int irq) | ||
75 | { | ||
76 | i8259a_enable_irq(irq); | ||
77 | return 0; /* never anything pending */ | ||
78 | } | ||
79 | |||
80 | void | ||
81 | i8259a_end_irq(unsigned int irq) | ||
82 | { | ||
83 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
84 | i8259a_enable_irq(irq); | ||
85 | } | ||
86 | |||
87 | struct hw_interrupt_type i8259a_irq_type = { | ||
88 | .typename = "XT-PIC", | ||
89 | .startup = i8259a_startup_irq, | ||
90 | .shutdown = i8259a_disable_irq, | ||
91 | .enable = i8259a_enable_irq, | ||
92 | .disable = i8259a_disable_irq, | ||
93 | .ack = i8259a_mask_and_ack_irq, | ||
94 | .end = i8259a_end_irq, | ||
95 | }; | ||
96 | |||
97 | void __init | ||
98 | init_i8259a_irqs(void) | ||
99 | { | ||
100 | static struct irqaction cascade = { | ||
101 | .handler = no_action, | ||
102 | .name = "cascade", | ||
103 | }; | ||
104 | |||
105 | long i; | ||
106 | |||
107 | outb(0xff, 0x21); /* mask all of 8259A-1 */ | ||
108 | outb(0xff, 0xA1); /* mask all of 8259A-2 */ | ||
109 | |||
110 | for (i = 0; i < 16; i++) { | ||
111 | irq_desc[i].status = IRQ_DISABLED; | ||
112 | irq_desc[i].handler = &i8259a_irq_type; | ||
113 | } | ||
114 | |||
115 | setup_irq(2, &cascade); | ||
116 | } | ||
117 | |||
118 | |||
119 | #if defined(CONFIG_ALPHA_GENERIC) | ||
120 | # define IACK_SC alpha_mv.iack_sc | ||
121 | #elif defined(CONFIG_ALPHA_APECS) | ||
122 | # define IACK_SC APECS_IACK_SC | ||
123 | #elif defined(CONFIG_ALPHA_LCA) | ||
124 | # define IACK_SC LCA_IACK_SC | ||
125 | #elif defined(CONFIG_ALPHA_CIA) | ||
126 | # define IACK_SC CIA_IACK_SC | ||
127 | #elif defined(CONFIG_ALPHA_PYXIS) | ||
128 | # define IACK_SC PYXIS_IACK_SC | ||
129 | #elif defined(CONFIG_ALPHA_TITAN) | ||
130 | # define IACK_SC TITAN_IACK_SC | ||
131 | #elif defined(CONFIG_ALPHA_TSUNAMI) | ||
132 | # define IACK_SC TSUNAMI_IACK_SC | ||
133 | #elif defined(CONFIG_ALPHA_IRONGATE) | ||
134 | # define IACK_SC IRONGATE_IACK_SC | ||
135 | #endif | ||
136 | /* Note that CONFIG_ALPHA_POLARIS is intentionally left out here, since | ||
137 | sys_rx164 wants to use isa_no_iack_sc_device_interrupt for some reason. */ | ||
138 | |||
139 | #if defined(IACK_SC) | ||
140 | void | ||
141 | isa_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
142 | { | ||
143 | /* | ||
144 | * Generate a PCI interrupt acknowledge cycle. The PIC will | ||
145 | * respond with the interrupt vector of the highest priority | ||
146 | * interrupt that is pending. The PALcode sets up the | ||
147 | * interrupts vectors such that irq level L generates vector L. | ||
148 | */ | ||
149 | int j = *(vuip) IACK_SC; | ||
150 | j &= 0xff; | ||
151 | handle_irq(j, regs); | ||
152 | } | ||
153 | #endif | ||
154 | |||
155 | #if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC) | ||
156 | void | ||
157 | isa_no_iack_sc_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
158 | { | ||
159 | unsigned long pic; | ||
160 | |||
161 | /* | ||
162 | * It seems to me that the probability of two or more *device* | ||
163 | * interrupts occurring at almost exactly the same time is | ||
164 | * pretty low. So why pay the price of checking for | ||
165 | * additional interrupts here if the common case can be | ||
166 | * handled so much easier? | ||
167 | */ | ||
168 | /* | ||
169 | * The first read of gives you *all* interrupting lines. | ||
170 | * Therefore, read the mask register and and out those lines | ||
171 | * not enabled. Note that some documentation has 21 and a1 | ||
172 | * write only. This is not true. | ||
173 | */ | ||
174 | pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */ | ||
175 | pic &= 0xFFFB; /* mask out cascade & hibits */ | ||
176 | |||
177 | while (pic) { | ||
178 | int j = ffz(~pic); | ||
179 | pic &= pic - 1; | ||
180 | handle_irq(j, regs); | ||
181 | } | ||
182 | } | ||
183 | #endif | ||
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h new file mode 100644 index 000000000000..f201d8ffc0d9 --- /dev/null +++ b/arch/alpha/kernel/irq_impl.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/irq_impl.h | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * Copyright (C) 1998, 2000 Richard Henderson | ||
6 | * | ||
7 | * This file contains declarations and inline functions for interfacing | ||
8 | * with the IRQ handling routines in irq.c. | ||
9 | */ | ||
10 | |||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/profile.h> | ||
14 | |||
15 | |||
16 | #define RTC_IRQ 8 | ||
17 | |||
18 | extern void isa_device_interrupt(unsigned long, struct pt_regs *); | ||
19 | extern void isa_no_iack_sc_device_interrupt(unsigned long, struct pt_regs *); | ||
20 | extern void srm_device_interrupt(unsigned long, struct pt_regs *); | ||
21 | extern void pyxis_device_interrupt(unsigned long, struct pt_regs *); | ||
22 | |||
23 | extern struct irqaction timer_irqaction; | ||
24 | extern struct irqaction isa_cascade_irqaction; | ||
25 | extern struct irqaction timer_cascade_irqaction; | ||
26 | extern struct irqaction halt_switch_irqaction; | ||
27 | |||
28 | extern void init_srm_irqs(long, unsigned long); | ||
29 | extern void init_pyxis_irqs(unsigned long); | ||
30 | extern void init_rtc_irq(void); | ||
31 | |||
32 | extern void common_init_isa_dma(void); | ||
33 | |||
34 | extern void i8259a_enable_irq(unsigned int); | ||
35 | extern void i8259a_disable_irq(unsigned int); | ||
36 | extern void i8259a_mask_and_ack_irq(unsigned int); | ||
37 | extern unsigned int i8259a_startup_irq(unsigned int); | ||
38 | extern void i8259a_end_irq(unsigned int); | ||
39 | extern struct hw_interrupt_type i8259a_irq_type; | ||
40 | extern void init_i8259a_irqs(void); | ||
41 | |||
42 | extern void handle_irq(int irq, struct pt_regs * regs); | ||
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c new file mode 100644 index 000000000000..146a20b9e3d5 --- /dev/null +++ b/arch/alpha/kernel/irq_pyxis.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/irq_pyxis.c | ||
3 | * | ||
4 | * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com). | ||
5 | * | ||
6 | * IRQ Code common to all PYXIS core logic chips. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/irq.h> | ||
12 | |||
13 | #include <asm/io.h> | ||
14 | #include <asm/core_cia.h> | ||
15 | |||
16 | #include "proto.h" | ||
17 | #include "irq_impl.h" | ||
18 | |||
19 | |||
20 | /* Note mask bit is true for ENABLED irqs. */ | ||
21 | static unsigned long cached_irq_mask; | ||
22 | |||
23 | static inline void | ||
24 | pyxis_update_irq_hw(unsigned long mask) | ||
25 | { | ||
26 | *(vulp)PYXIS_INT_MASK = mask; | ||
27 | mb(); | ||
28 | *(vulp)PYXIS_INT_MASK; | ||
29 | } | ||
30 | |||
31 | static inline void | ||
32 | pyxis_enable_irq(unsigned int irq) | ||
33 | { | ||
34 | pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | ||
35 | } | ||
36 | |||
37 | static void | ||
38 | pyxis_disable_irq(unsigned int irq) | ||
39 | { | ||
40 | pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | ||
41 | } | ||
42 | |||
43 | static unsigned int | ||
44 | pyxis_startup_irq(unsigned int irq) | ||
45 | { | ||
46 | pyxis_enable_irq(irq); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static void | ||
51 | pyxis_end_irq(unsigned int irq) | ||
52 | { | ||
53 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
54 | pyxis_enable_irq(irq); | ||
55 | } | ||
56 | |||
57 | static void | ||
58 | pyxis_mask_and_ack_irq(unsigned int irq) | ||
59 | { | ||
60 | unsigned long bit = 1UL << (irq - 16); | ||
61 | unsigned long mask = cached_irq_mask &= ~bit; | ||
62 | |||
63 | /* Disable the interrupt. */ | ||
64 | *(vulp)PYXIS_INT_MASK = mask; | ||
65 | wmb(); | ||
66 | /* Ack PYXIS PCI interrupt. */ | ||
67 | *(vulp)PYXIS_INT_REQ = bit; | ||
68 | mb(); | ||
69 | /* Re-read to force both writes. */ | ||
70 | *(vulp)PYXIS_INT_MASK; | ||
71 | } | ||
72 | |||
73 | static struct hw_interrupt_type pyxis_irq_type = { | ||
74 | .typename = "PYXIS", | ||
75 | .startup = pyxis_startup_irq, | ||
76 | .shutdown = pyxis_disable_irq, | ||
77 | .enable = pyxis_enable_irq, | ||
78 | .disable = pyxis_disable_irq, | ||
79 | .ack = pyxis_mask_and_ack_irq, | ||
80 | .end = pyxis_end_irq, | ||
81 | }; | ||
82 | |||
83 | void | ||
84 | pyxis_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
85 | { | ||
86 | unsigned long pld; | ||
87 | unsigned int i; | ||
88 | |||
89 | /* Read the interrupt summary register of PYXIS */ | ||
90 | pld = *(vulp)PYXIS_INT_REQ; | ||
91 | pld &= cached_irq_mask; | ||
92 | |||
93 | /* | ||
94 | * Now for every possible bit set, work through them and call | ||
95 | * the appropriate interrupt handler. | ||
96 | */ | ||
97 | while (pld) { | ||
98 | i = ffz(~pld); | ||
99 | pld &= pld - 1; /* clear least bit set */ | ||
100 | if (i == 7) | ||
101 | isa_device_interrupt(vector, regs); | ||
102 | else | ||
103 | handle_irq(16+i, regs); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | void __init | ||
108 | init_pyxis_irqs(unsigned long ignore_mask) | ||
109 | { | ||
110 | long i; | ||
111 | |||
112 | *(vulp)PYXIS_INT_MASK = 0; /* disable all */ | ||
113 | *(vulp)PYXIS_INT_REQ = -1; /* flush all */ | ||
114 | mb(); | ||
115 | |||
116 | /* Send -INTA pulses to clear any pending interrupts ...*/ | ||
117 | *(vuip) CIA_IACK_SC; | ||
118 | |||
119 | for (i = 16; i < 48; ++i) { | ||
120 | if ((ignore_mask >> i) & 1) | ||
121 | continue; | ||
122 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
123 | irq_desc[i].handler = &pyxis_irq_type; | ||
124 | } | ||
125 | |||
126 | setup_irq(16+7, &isa_cascade_irqaction); | ||
127 | } | ||
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c new file mode 100644 index 000000000000..0a87e466918c --- /dev/null +++ b/arch/alpha/kernel/irq_srm.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Handle interrupts from the SRM, assuming no additional weirdness. | ||
3 | */ | ||
4 | |||
5 | #include <linux/init.h> | ||
6 | #include <linux/sched.h> | ||
7 | #include <linux/irq.h> | ||
8 | |||
9 | #include "proto.h" | ||
10 | #include "irq_impl.h" | ||
11 | |||
12 | |||
13 | /* | ||
14 | * Is the palcode SMP safe? In other words: can we call cserve_ena/dis | ||
15 | * at the same time in multiple CPUs? To be safe I added a spinlock | ||
16 | * but it can be removed trivially if the palcode is robust against smp. | ||
17 | */ | ||
18 | DEFINE_SPINLOCK(srm_irq_lock); | ||
19 | |||
20 | static inline void | ||
21 | srm_enable_irq(unsigned int irq) | ||
22 | { | ||
23 | spin_lock(&srm_irq_lock); | ||
24 | cserve_ena(irq - 16); | ||
25 | spin_unlock(&srm_irq_lock); | ||
26 | } | ||
27 | |||
28 | static void | ||
29 | srm_disable_irq(unsigned int irq) | ||
30 | { | ||
31 | spin_lock(&srm_irq_lock); | ||
32 | cserve_dis(irq - 16); | ||
33 | spin_unlock(&srm_irq_lock); | ||
34 | } | ||
35 | |||
36 | static unsigned int | ||
37 | srm_startup_irq(unsigned int irq) | ||
38 | { | ||
39 | srm_enable_irq(irq); | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static void | ||
44 | srm_end_irq(unsigned int irq) | ||
45 | { | ||
46 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
47 | srm_enable_irq(irq); | ||
48 | } | ||
49 | |||
50 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ | ||
51 | static struct hw_interrupt_type srm_irq_type = { | ||
52 | .typename = "SRM", | ||
53 | .startup = srm_startup_irq, | ||
54 | .shutdown = srm_disable_irq, | ||
55 | .enable = srm_enable_irq, | ||
56 | .disable = srm_disable_irq, | ||
57 | .ack = srm_disable_irq, | ||
58 | .end = srm_end_irq, | ||
59 | }; | ||
60 | |||
61 | void __init | ||
62 | init_srm_irqs(long max, unsigned long ignore_mask) | ||
63 | { | ||
64 | long i; | ||
65 | |||
66 | for (i = 16; i < max; ++i) { | ||
67 | if (i < 64 && ((ignore_mask >> i) & 1)) | ||
68 | continue; | ||
69 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
70 | irq_desc[i].handler = &srm_irq_type; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | void | ||
75 | srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
76 | { | ||
77 | int irq = (vector - 0x800) >> 4; | ||
78 | handle_irq(irq, regs); | ||
79 | } | ||
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h new file mode 100644 index 000000000000..4959b7a3e1e6 --- /dev/null +++ b/arch/alpha/kernel/machvec_impl.h | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/machvec.h | ||
3 | * | ||
4 | * Copyright (C) 1997, 1998 Richard Henderson | ||
5 | * | ||
6 | * This file has goodies to help simplify instantiation of machine vectors. | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <asm/pgalloc.h> | ||
11 | |||
12 | /* Whee. These systems don't have an HAE: | ||
13 | IRONGATE, MARVEL, POLARIS, TSUNAMI, TITAN, WILDFIRE | ||
14 | Fix things up for the GENERIC kernel by defining the HAE address | ||
15 | to be that of the cache. Now we can read and write it as we like. ;-) */ | ||
16 | #define IRONGATE_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
17 | #define MARVEL_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
18 | #define POLARIS_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
19 | #define TSUNAMI_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
20 | #define TITAN_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
21 | #define WILDFIRE_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
22 | |||
23 | #ifdef CIA_ONE_HAE_WINDOW | ||
24 | #define CIA_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
25 | #endif | ||
26 | #ifdef MCPCIA_ONE_HAE_WINDOW | ||
27 | #define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
28 | #endif | ||
29 | |||
30 | /* Only a few systems don't define IACK_SC, handling all interrupts through | ||
31 | the SRM console. But splitting out that one case from IO() below | ||
32 | seems like such a pain. Define this to get things to compile. */ | ||
33 | #define JENSEN_IACK_SC 1 | ||
34 | #define T2_IACK_SC 1 | ||
35 | #define WILDFIRE_IACK_SC 1 /* FIXME */ | ||
36 | |||
37 | /* | ||
38 | * Some helpful macros for filling in the blanks. | ||
39 | */ | ||
40 | |||
41 | #define CAT1(x,y) x##y | ||
42 | #define CAT(x,y) CAT1(x,y) | ||
43 | |||
44 | #define DO_DEFAULT_RTC rtc_port: 0x70 | ||
45 | |||
46 | #define DO_EV4_MMU \ | ||
47 | .max_asn = EV4_MAX_ASN, \ | ||
48 | .mv_switch_mm = ev4_switch_mm, \ | ||
49 | .mv_activate_mm = ev4_activate_mm, \ | ||
50 | .mv_flush_tlb_current = ev4_flush_tlb_current, \ | ||
51 | .mv_flush_tlb_current_page = ev4_flush_tlb_current_page | ||
52 | |||
53 | #define DO_EV5_MMU \ | ||
54 | .max_asn = EV5_MAX_ASN, \ | ||
55 | .mv_switch_mm = ev5_switch_mm, \ | ||
56 | .mv_activate_mm = ev5_activate_mm, \ | ||
57 | .mv_flush_tlb_current = ev5_flush_tlb_current, \ | ||
58 | .mv_flush_tlb_current_page = ev5_flush_tlb_current_page | ||
59 | |||
60 | #define DO_EV6_MMU \ | ||
61 | .max_asn = EV6_MAX_ASN, \ | ||
62 | .mv_switch_mm = ev5_switch_mm, \ | ||
63 | .mv_activate_mm = ev5_activate_mm, \ | ||
64 | .mv_flush_tlb_current = ev5_flush_tlb_current, \ | ||
65 | .mv_flush_tlb_current_page = ev5_flush_tlb_current_page | ||
66 | |||
67 | #define DO_EV7_MMU \ | ||
68 | .max_asn = EV6_MAX_ASN, \ | ||
69 | .mv_switch_mm = ev5_switch_mm, \ | ||
70 | .mv_activate_mm = ev5_activate_mm, \ | ||
71 | .mv_flush_tlb_current = ev5_flush_tlb_current, \ | ||
72 | .mv_flush_tlb_current_page = ev5_flush_tlb_current_page | ||
73 | |||
74 | #define IO_LITE(UP,low) \ | ||
75 | .hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \ | ||
76 | .iack_sc = CAT(UP,_IACK_SC), \ | ||
77 | .mv_ioread8 = CAT(low,_ioread8), \ | ||
78 | .mv_ioread16 = CAT(low,_ioread16), \ | ||
79 | .mv_ioread32 = CAT(low,_ioread32), \ | ||
80 | .mv_iowrite8 = CAT(low,_iowrite8), \ | ||
81 | .mv_iowrite16 = CAT(low,_iowrite16), \ | ||
82 | .mv_iowrite32 = CAT(low,_iowrite32), \ | ||
83 | .mv_readb = CAT(low,_readb), \ | ||
84 | .mv_readw = CAT(low,_readw), \ | ||
85 | .mv_readl = CAT(low,_readl), \ | ||
86 | .mv_readq = CAT(low,_readq), \ | ||
87 | .mv_writeb = CAT(low,_writeb), \ | ||
88 | .mv_writew = CAT(low,_writew), \ | ||
89 | .mv_writel = CAT(low,_writel), \ | ||
90 | .mv_writeq = CAT(low,_writeq), \ | ||
91 | .mv_ioportmap = CAT(low,_ioportmap), \ | ||
92 | .mv_ioremap = CAT(low,_ioremap), \ | ||
93 | .mv_iounmap = CAT(low,_iounmap), \ | ||
94 | .mv_is_ioaddr = CAT(low,_is_ioaddr), \ | ||
95 | .mv_is_mmio = CAT(low,_is_mmio) \ | ||
96 | |||
97 | #define IO(UP,low) \ | ||
98 | IO_LITE(UP,low), \ | ||
99 | .pci_ops = &CAT(low,_pci_ops), \ | ||
100 | .mv_pci_tbi = CAT(low,_pci_tbi) | ||
101 | |||
102 | #define DO_APECS_IO IO(APECS,apecs) | ||
103 | #define DO_CIA_IO IO(CIA,cia) | ||
104 | #define DO_IRONGATE_IO IO(IRONGATE,irongate) | ||
105 | #define DO_LCA_IO IO(LCA,lca) | ||
106 | #define DO_MARVEL_IO IO(MARVEL,marvel) | ||
107 | #define DO_MCPCIA_IO IO(MCPCIA,mcpcia) | ||
108 | #define DO_POLARIS_IO IO(POLARIS,polaris) | ||
109 | #define DO_T2_IO IO(T2,t2) | ||
110 | #define DO_TSUNAMI_IO IO(TSUNAMI,tsunami) | ||
111 | #define DO_TITAN_IO IO(TITAN,titan) | ||
112 | #define DO_WILDFIRE_IO IO(WILDFIRE,wildfire) | ||
113 | |||
114 | #define DO_PYXIS_IO IO_LITE(CIA,cia_bwx), \ | ||
115 | .pci_ops = &cia_pci_ops, \ | ||
116 | .mv_pci_tbi = cia_pci_tbi | ||
117 | |||
118 | /* | ||
119 | * In a GENERIC kernel, we have lots of these vectors floating about, | ||
120 | * all but one of which we want to go away. In a non-GENERIC kernel, | ||
121 | * we want only one, ever. | ||
122 | * | ||
123 | * Accomplish this in the GENERIC kernel by putting all of the vectors | ||
124 | * in the .init.data section where they'll go away. We'll copy the | ||
125 | * one we want to the real alpha_mv vector in setup_arch. | ||
126 | * | ||
127 | * Accomplish this in a non-GENERIC kernel by ifdef'ing out all but | ||
128 | * one of the vectors, which will not reside in .init.data. We then | ||
129 | * alias this one vector to alpha_mv, so no copy is needed. | ||
130 | * | ||
131 | * Upshot: set __initdata to nothing for non-GENERIC kernels. | ||
132 | */ | ||
133 | |||
134 | #ifdef CONFIG_ALPHA_GENERIC | ||
135 | #define __initmv __initdata | ||
136 | #define ALIAS_MV(x) | ||
137 | #else | ||
138 | #define __initmv | ||
139 | |||
140 | /* GCC actually has a syntax for defining aliases, but is under some | ||
141 | delusion that you shouldn't be able to declare it extern somewhere | ||
142 | else beforehand. Fine. We'll do it ourselves. */ | ||
143 | #if 0 | ||
144 | #define ALIAS_MV(system) \ | ||
145 | struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); | ||
146 | #else | ||
147 | #define ALIAS_MV(system) \ | ||
148 | asm(".global alpha_mv\nalpha_mv = " #system "_mv"); | ||
149 | #endif | ||
150 | #endif /* GENERIC */ | ||
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c new file mode 100644 index 000000000000..fc271e316a38 --- /dev/null +++ b/arch/alpha/kernel/module.c | |||
@@ -0,0 +1,311 @@ | |||
1 | /* Kernel module help for Alpha. | ||
2 | Copyright (C) 2002 Richard Henderson. | ||
3 | |||
4 | This program is free software; you can redistribute it and/or modify | ||
5 | it under the terms of the GNU General Public License as published by | ||
6 | the Free Software Foundation; either version 2 of the License, or | ||
7 | (at your option) any later version. | ||
8 | |||
9 | This program is distributed in the hope that it will be useful, | ||
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | GNU General Public License for more details. | ||
13 | |||
14 | You should have received a copy of the GNU General Public License | ||
15 | along with this program; if not, write to the Free Software | ||
16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | #include <linux/moduleloader.h> | ||
19 | #include <linux/elf.h> | ||
20 | #include <linux/vmalloc.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/slab.h> | ||
25 | |||
26 | #if 0 | ||
27 | #define DEBUGP printk | ||
28 | #else | ||
29 | #define DEBUGP(fmt...) | ||
30 | #endif | ||
31 | |||
32 | void * | ||
33 | module_alloc(unsigned long size) | ||
34 | { | ||
35 | if (size == 0) | ||
36 | return NULL; | ||
37 | return vmalloc(size); | ||
38 | } | ||
39 | |||
40 | void | ||
41 | module_free(struct module *mod, void *module_region) | ||
42 | { | ||
43 | vfree(module_region); | ||
44 | } | ||
45 | |||
46 | /* Allocate the GOT at the end of the core sections. */ | ||
47 | |||
48 | struct got_entry { | ||
49 | struct got_entry *next; | ||
50 | Elf64_Addr r_offset; | ||
51 | int got_offset; | ||
52 | }; | ||
53 | |||
54 | static inline void | ||
55 | process_reloc_for_got(Elf64_Rela *rela, | ||
56 | struct got_entry *chains, Elf64_Xword *poffset) | ||
57 | { | ||
58 | unsigned long r_sym = ELF64_R_SYM (rela->r_info); | ||
59 | unsigned long r_type = ELF64_R_TYPE (rela->r_info); | ||
60 | Elf64_Addr r_offset = rela->r_offset; | ||
61 | struct got_entry *g; | ||
62 | |||
63 | if (r_type != R_ALPHA_LITERAL) | ||
64 | return; | ||
65 | |||
66 | for (g = chains + r_sym; g ; g = g->next) | ||
67 | if (g->r_offset == r_offset) { | ||
68 | if (g->got_offset == 0) { | ||
69 | g->got_offset = *poffset; | ||
70 | *poffset += 8; | ||
71 | } | ||
72 | goto found_entry; | ||
73 | } | ||
74 | |||
75 | g = kmalloc (sizeof (*g), GFP_KERNEL); | ||
76 | g->next = chains[r_sym].next; | ||
77 | g->r_offset = r_offset; | ||
78 | g->got_offset = *poffset; | ||
79 | *poffset += 8; | ||
80 | chains[r_sym].next = g; | ||
81 | |||
82 | found_entry: | ||
83 | /* Trick: most of the ELF64_R_TYPE field is unused. There are | ||
84 | 42 valid relocation types, and a 32-bit field. Co-opt the | ||
85 | bits above 256 to store the got offset for this reloc. */ | ||
86 | rela->r_info |= g->got_offset << 8; | ||
87 | } | ||
88 | |||
89 | int | ||
90 | module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, | ||
91 | char *secstrings, struct module *me) | ||
92 | { | ||
93 | struct got_entry *chains; | ||
94 | Elf64_Rela *rela; | ||
95 | Elf64_Shdr *esechdrs, *symtab, *s, *got; | ||
96 | unsigned long nsyms, nrela, i; | ||
97 | |||
98 | esechdrs = sechdrs + hdr->e_shnum; | ||
99 | symtab = got = NULL; | ||
100 | |||
101 | /* Find out how large the symbol table is. Allocate one got_entry | ||
102 | head per symbol. Normally this will be enough, but not always. | ||
103 | We'll chain different offsets for the symbol down each head. */ | ||
104 | for (s = sechdrs; s < esechdrs; ++s) | ||
105 | if (s->sh_type == SHT_SYMTAB) | ||
106 | symtab = s; | ||
107 | else if (!strcmp(".got", secstrings + s->sh_name)) { | ||
108 | got = s; | ||
109 | me->arch.gotsecindex = s - sechdrs; | ||
110 | } | ||
111 | |||
112 | if (!symtab) { | ||
113 | printk(KERN_ERR "module %s: no symbol table\n", me->name); | ||
114 | return -ENOEXEC; | ||
115 | } | ||
116 | if (!got) { | ||
117 | printk(KERN_ERR "module %s: no got section\n", me->name); | ||
118 | return -ENOEXEC; | ||
119 | } | ||
120 | |||
121 | nsyms = symtab->sh_size / sizeof(Elf64_Sym); | ||
122 | chains = kmalloc(nsyms * sizeof(struct got_entry), GFP_KERNEL); | ||
123 | memset(chains, 0, nsyms * sizeof(struct got_entry)); | ||
124 | |||
125 | got->sh_size = 0; | ||
126 | got->sh_addralign = 8; | ||
127 | got->sh_type = SHT_NOBITS; | ||
128 | |||
129 | /* Examine all LITERAL relocations to find out what GOT entries | ||
130 | are required. This sizes the GOT section as well. */ | ||
131 | for (s = sechdrs; s < esechdrs; ++s) | ||
132 | if (s->sh_type == SHT_RELA) { | ||
133 | nrela = s->sh_size / sizeof(Elf64_Rela); | ||
134 | rela = (void *)hdr + s->sh_offset; | ||
135 | for (i = 0; i < nrela; ++i) | ||
136 | process_reloc_for_got(rela+i, chains, | ||
137 | &got->sh_size); | ||
138 | } | ||
139 | |||
140 | /* Free the memory we allocated. */ | ||
141 | for (i = 0; i < nsyms; ++i) { | ||
142 | struct got_entry *g, *n; | ||
143 | for (g = chains[i].next; g ; g = n) { | ||
144 | n = g->next; | ||
145 | kfree(g); | ||
146 | } | ||
147 | } | ||
148 | kfree(chains); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | int | ||
154 | apply_relocate(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, | ||
155 | unsigned int relsec, struct module *me) | ||
156 | { | ||
157 | printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name); | ||
158 | return -ENOEXEC; | ||
159 | } | ||
160 | |||
161 | int | ||
162 | apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, | ||
163 | unsigned int symindex, unsigned int relsec, | ||
164 | struct module *me) | ||
165 | { | ||
166 | Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; | ||
167 | unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela); | ||
168 | Elf64_Sym *symtab, *sym; | ||
169 | void *base, *location; | ||
170 | unsigned long got, gp; | ||
171 | |||
172 | DEBUGP("Applying relocate section %u to %u\n", relsec, | ||
173 | sechdrs[relsec].sh_info); | ||
174 | |||
175 | base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; | ||
176 | symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; | ||
177 | |||
178 | /* The small sections were sorted to the end of the segment. | ||
179 | The following should definitely cover them. */ | ||
180 | gp = (u64)me->module_core + me->core_size - 0x8000; | ||
181 | got = sechdrs[me->arch.gotsecindex].sh_addr; | ||
182 | |||
183 | for (i = 0; i < n; i++) { | ||
184 | unsigned long r_sym = ELF64_R_SYM (rela[i].r_info); | ||
185 | unsigned long r_type = ELF64_R_TYPE (rela[i].r_info); | ||
186 | unsigned long r_got_offset = r_type >> 8; | ||
187 | unsigned long value, hi, lo; | ||
188 | r_type &= 0xff; | ||
189 | |||
190 | /* This is where to make the change. */ | ||
191 | location = base + rela[i].r_offset; | ||
192 | |||
193 | /* This is the symbol it is referring to. Note that all | ||
194 | unresolved symbols have been resolved. */ | ||
195 | sym = symtab + r_sym; | ||
196 | value = sym->st_value + rela[i].r_addend; | ||
197 | |||
198 | switch (r_type) { | ||
199 | case R_ALPHA_NONE: | ||
200 | break; | ||
201 | case R_ALPHA_REFQUAD: | ||
202 | /* BUG() can produce misaligned relocations. */ | ||
203 | ((u32 *)location)[0] = value; | ||
204 | ((u32 *)location)[1] = value >> 32; | ||
205 | break; | ||
206 | case R_ALPHA_GPREL32: | ||
207 | value -= gp; | ||
208 | if ((int)value != value) | ||
209 | goto reloc_overflow; | ||
210 | *(u32 *)location = value; | ||
211 | break; | ||
212 | case R_ALPHA_LITERAL: | ||
213 | hi = got + r_got_offset; | ||
214 | lo = hi - gp; | ||
215 | if ((short)lo != lo) | ||
216 | goto reloc_overflow; | ||
217 | *(u16 *)location = lo; | ||
218 | *(u64 *)hi = value; | ||
219 | break; | ||
220 | case R_ALPHA_LITUSE: | ||
221 | break; | ||
222 | case R_ALPHA_GPDISP: | ||
223 | value = gp - (u64)location; | ||
224 | lo = (short)value; | ||
225 | hi = (int)(value - lo); | ||
226 | if (hi + lo != value) | ||
227 | goto reloc_overflow; | ||
228 | *(u16 *)location = hi >> 16; | ||
229 | *(u16 *)(location + rela[i].r_addend) = lo; | ||
230 | break; | ||
231 | case R_ALPHA_BRSGP: | ||
232 | /* BRSGP is only allowed to bind to local symbols. | ||
233 | If the section is undef, this means that the | ||
234 | value was resolved from somewhere else. */ | ||
235 | if (sym->st_shndx == SHN_UNDEF) | ||
236 | goto reloc_overflow; | ||
237 | if ((sym->st_other & STO_ALPHA_STD_GPLOAD) == | ||
238 | STO_ALPHA_STD_GPLOAD) | ||
239 | /* Omit the prologue. */ | ||
240 | value += 8; | ||
241 | /* FALLTHRU */ | ||
242 | case R_ALPHA_BRADDR: | ||
243 | value -= (u64)location + 4; | ||
244 | if (value & 3) | ||
245 | goto reloc_overflow; | ||
246 | value = (long)value >> 2; | ||
247 | if (value + (1<<21) >= 1<<22) | ||
248 | goto reloc_overflow; | ||
249 | value &= 0x1fffff; | ||
250 | value |= *(u32 *)location & ~0x1fffff; | ||
251 | *(u32 *)location = value; | ||
252 | break; | ||
253 | case R_ALPHA_HINT: | ||
254 | break; | ||
255 | case R_ALPHA_SREL32: | ||
256 | value -= (u64)location; | ||
257 | if ((int)value != value) | ||
258 | goto reloc_overflow; | ||
259 | *(u32 *)location = value; | ||
260 | break; | ||
261 | case R_ALPHA_SREL64: | ||
262 | value -= (u64)location; | ||
263 | *(u64 *)location = value; | ||
264 | break; | ||
265 | case R_ALPHA_GPRELHIGH: | ||
266 | value = (long)(value - gp + 0x8000) >> 16; | ||
267 | if ((short) value != value) | ||
268 | goto reloc_overflow; | ||
269 | *(u16 *)location = value; | ||
270 | break; | ||
271 | case R_ALPHA_GPRELLOW: | ||
272 | value -= gp; | ||
273 | *(u16 *)location = value; | ||
274 | break; | ||
275 | case R_ALPHA_GPREL16: | ||
276 | value -= gp; | ||
277 | if ((short) value != value) | ||
278 | goto reloc_overflow; | ||
279 | *(u16 *)location = value; | ||
280 | break; | ||
281 | default: | ||
282 | printk(KERN_ERR "module %s: Unknown relocation: %lu\n", | ||
283 | me->name, r_type); | ||
284 | return -ENOEXEC; | ||
285 | reloc_overflow: | ||
286 | if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION) | ||
287 | printk(KERN_ERR | ||
288 | "module %s: Relocation overflow vs section %d\n", | ||
289 | me->name, sym->st_shndx); | ||
290 | else | ||
291 | printk(KERN_ERR | ||
292 | "module %s: Relocation overflow vs %s\n", | ||
293 | me->name, strtab + sym->st_name); | ||
294 | return -ENOEXEC; | ||
295 | } | ||
296 | } | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | int | ||
302 | module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | ||
303 | struct module *me) | ||
304 | { | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | void | ||
309 | module_arch_cleanup(struct module *mod) | ||
310 | { | ||
311 | } | ||
diff --git a/arch/alpha/kernel/ns87312.c b/arch/alpha/kernel/ns87312.c new file mode 100644 index 000000000000..342b56d24c20 --- /dev/null +++ b/arch/alpha/kernel/ns87312.c | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/ns87312.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/init.h> | ||
6 | #include <asm/io.h> | ||
7 | #include "proto.h" | ||
8 | |||
9 | |||
10 | /* | ||
11 | * The SRM console *disables* the IDE interface, this code ensures it's | ||
12 | * enabled. | ||
13 | * | ||
14 | * This code bangs on a control register of the 87312 Super I/O chip | ||
15 | * that implements parallel port/serial ports/IDE/FDI. Depending on | ||
16 | * the motherboard, the Super I/O chip can be configured through a | ||
17 | * pair of registers that are located either at I/O ports 0x26e/0x26f | ||
18 | * or 0x398/0x399. Unfortunately, autodetecting which base address is | ||
19 | * in use works only once (right after a reset). The Super I/O chip | ||
20 | * has the additional quirk that configuration register data must be | ||
21 | * written twice (I believe this is a safety feature to prevent | ||
22 | * accidental modification---fun, isn't it?). | ||
23 | */ | ||
24 | |||
25 | void __init | ||
26 | ns87312_enable_ide(long ide_base) | ||
27 | { | ||
28 | int data; | ||
29 | unsigned long flags; | ||
30 | |||
31 | local_irq_save(flags); | ||
32 | outb(0, ide_base); /* set the index register for reg #0 */ | ||
33 | data = inb(ide_base+1); /* read the current contents */ | ||
34 | outb(0, ide_base); /* set the index register for reg #0 */ | ||
35 | outb(data | 0x40, ide_base+1); /* turn on IDE */ | ||
36 | outb(data | 0x40, ide_base+1); /* turn on IDE, really! */ | ||
37 | local_irq_restore(flags); | ||
38 | } | ||
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c new file mode 100644 index 000000000000..b5d0fd2bb10a --- /dev/null +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -0,0 +1,1345 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/osf_sys.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This file handles some of the stranger OSF/1 system call interfaces. | ||
9 | * Some of the system calls expect a non-C calling standard, others have | ||
10 | * special parameter blocks.. | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/stddef.h> | ||
20 | #include <linux/syscalls.h> | ||
21 | #include <linux/unistd.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/user.h> | ||
25 | #include <linux/a.out.h> | ||
26 | #include <linux/utsname.h> | ||
27 | #include <linux/time.h> | ||
28 | #include <linux/timex.h> | ||
29 | #include <linux/major.h> | ||
30 | #include <linux/stat.h> | ||
31 | #include <linux/mman.h> | ||
32 | #include <linux/shm.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/file.h> | ||
35 | #include <linux/types.h> | ||
36 | #include <linux/ipc.h> | ||
37 | #include <linux/namei.h> | ||
38 | #include <linux/uio.h> | ||
39 | #include <linux/vfs.h> | ||
40 | |||
41 | #include <asm/fpu.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/uaccess.h> | ||
44 | #include <asm/system.h> | ||
45 | #include <asm/sysinfo.h> | ||
46 | #include <asm/hwrpb.h> | ||
47 | #include <asm/processor.h> | ||
48 | |||
49 | extern int do_pipe(int *); | ||
50 | |||
51 | /* | ||
52 | * Brk needs to return an error. Still support Linux's brk(0) query idiom, | ||
53 | * which OSF programs just shouldn't be doing. We're still not quite | ||
54 | * identical to OSF as we don't return 0 on success, but doing otherwise | ||
55 | * would require changes to libc. Hopefully this is good enough. | ||
56 | */ | ||
57 | asmlinkage unsigned long | ||
58 | osf_brk(unsigned long brk) | ||
59 | { | ||
60 | unsigned long retval = sys_brk(brk); | ||
61 | if (brk && brk != retval) | ||
62 | retval = -ENOMEM; | ||
63 | return retval; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * This is pure guess-work.. | ||
68 | */ | ||
69 | asmlinkage int | ||
70 | osf_set_program_attributes(unsigned long text_start, unsigned long text_len, | ||
71 | unsigned long bss_start, unsigned long bss_len) | ||
72 | { | ||
73 | struct mm_struct *mm; | ||
74 | |||
75 | lock_kernel(); | ||
76 | mm = current->mm; | ||
77 | mm->end_code = bss_start + bss_len; | ||
78 | mm->brk = bss_start + bss_len; | ||
79 | #if 0 | ||
80 | printk("set_program_attributes(%lx %lx %lx %lx)\n", | ||
81 | text_start, text_len, bss_start, bss_len); | ||
82 | #endif | ||
83 | unlock_kernel(); | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * OSF/1 directory handling functions... | ||
89 | * | ||
90 | * The "getdents()" interface is much more sane: the "basep" stuff is | ||
91 | * braindamage (it can't really handle filesystems where the directory | ||
92 | * offset differences aren't the same as "d_reclen"). | ||
93 | */ | ||
94 | #define NAME_OFFSET offsetof (struct osf_dirent, d_name) | ||
95 | #define ROUND_UP(x) (((x)+3) & ~3) | ||
96 | |||
97 | struct osf_dirent { | ||
98 | unsigned int d_ino; | ||
99 | unsigned short d_reclen; | ||
100 | unsigned short d_namlen; | ||
101 | char d_name[1]; | ||
102 | }; | ||
103 | |||
104 | struct osf_dirent_callback { | ||
105 | struct osf_dirent __user *dirent; | ||
106 | long __user *basep; | ||
107 | unsigned int count; | ||
108 | int error; | ||
109 | }; | ||
110 | |||
111 | static int | ||
112 | osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, | ||
113 | ino_t ino, unsigned int d_type) | ||
114 | { | ||
115 | struct osf_dirent __user *dirent; | ||
116 | struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; | ||
117 | unsigned int reclen = ROUND_UP(NAME_OFFSET + namlen + 1); | ||
118 | |||
119 | buf->error = -EINVAL; /* only used if we fail */ | ||
120 | if (reclen > buf->count) | ||
121 | return -EINVAL; | ||
122 | if (buf->basep) { | ||
123 | if (put_user(offset, buf->basep)) | ||
124 | return -EFAULT; | ||
125 | buf->basep = NULL; | ||
126 | } | ||
127 | dirent = buf->dirent; | ||
128 | put_user(ino, &dirent->d_ino); | ||
129 | put_user(namlen, &dirent->d_namlen); | ||
130 | put_user(reclen, &dirent->d_reclen); | ||
131 | if (copy_to_user(dirent->d_name, name, namlen) || | ||
132 | put_user(0, dirent->d_name + namlen)) | ||
133 | return -EFAULT; | ||
134 | dirent = (void __user *)dirent + reclen; | ||
135 | buf->dirent = dirent; | ||
136 | buf->count -= reclen; | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | asmlinkage int | ||
141 | osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent, | ||
142 | unsigned int count, long __user *basep) | ||
143 | { | ||
144 | int error; | ||
145 | struct file *file; | ||
146 | struct osf_dirent_callback buf; | ||
147 | |||
148 | error = -EBADF; | ||
149 | file = fget(fd); | ||
150 | if (!file) | ||
151 | goto out; | ||
152 | |||
153 | buf.dirent = dirent; | ||
154 | buf.basep = basep; | ||
155 | buf.count = count; | ||
156 | buf.error = 0; | ||
157 | |||
158 | error = vfs_readdir(file, osf_filldir, &buf); | ||
159 | if (error < 0) | ||
160 | goto out_putf; | ||
161 | |||
162 | error = buf.error; | ||
163 | if (count != buf.count) | ||
164 | error = count - buf.count; | ||
165 | |||
166 | out_putf: | ||
167 | fput(file); | ||
168 | out: | ||
169 | return error; | ||
170 | } | ||
171 | |||
172 | #undef ROUND_UP | ||
173 | #undef NAME_OFFSET | ||
174 | |||
175 | asmlinkage unsigned long | ||
176 | osf_mmap(unsigned long addr, unsigned long len, unsigned long prot, | ||
177 | unsigned long flags, unsigned long fd, unsigned long off) | ||
178 | { | ||
179 | struct file *file = NULL; | ||
180 | unsigned long ret = -EBADF; | ||
181 | |||
182 | #if 0 | ||
183 | if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) | ||
184 | printk("%s: unimplemented OSF mmap flags %04lx\n", | ||
185 | current->comm, flags); | ||
186 | #endif | ||
187 | if (!(flags & MAP_ANONYMOUS)) { | ||
188 | file = fget(fd); | ||
189 | if (!file) | ||
190 | goto out; | ||
191 | } | ||
192 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
193 | down_write(¤t->mm->mmap_sem); | ||
194 | ret = do_mmap(file, addr, len, prot, flags, off); | ||
195 | up_write(¤t->mm->mmap_sem); | ||
196 | if (file) | ||
197 | fput(file); | ||
198 | out: | ||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | |||
203 | /* | ||
204 | * The OSF/1 statfs structure is much larger, but this should | ||
205 | * match the beginning, at least. | ||
206 | */ | ||
207 | struct osf_statfs { | ||
208 | short f_type; | ||
209 | short f_flags; | ||
210 | int f_fsize; | ||
211 | int f_bsize; | ||
212 | int f_blocks; | ||
213 | int f_bfree; | ||
214 | int f_bavail; | ||
215 | int f_files; | ||
216 | int f_ffree; | ||
217 | __kernel_fsid_t f_fsid; | ||
218 | }; | ||
219 | |||
220 | static int | ||
221 | linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, | ||
222 | unsigned long bufsiz) | ||
223 | { | ||
224 | struct osf_statfs tmp_stat; | ||
225 | |||
226 | tmp_stat.f_type = linux_stat->f_type; | ||
227 | tmp_stat.f_flags = 0; /* mount flags */ | ||
228 | tmp_stat.f_fsize = linux_stat->f_frsize; | ||
229 | tmp_stat.f_bsize = linux_stat->f_bsize; | ||
230 | tmp_stat.f_blocks = linux_stat->f_blocks; | ||
231 | tmp_stat.f_bfree = linux_stat->f_bfree; | ||
232 | tmp_stat.f_bavail = linux_stat->f_bavail; | ||
233 | tmp_stat.f_files = linux_stat->f_files; | ||
234 | tmp_stat.f_ffree = linux_stat->f_ffree; | ||
235 | tmp_stat.f_fsid = linux_stat->f_fsid; | ||
236 | if (bufsiz > sizeof(tmp_stat)) | ||
237 | bufsiz = sizeof(tmp_stat); | ||
238 | return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; | ||
239 | } | ||
240 | |||
241 | static int | ||
242 | do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer, | ||
243 | unsigned long bufsiz) | ||
244 | { | ||
245 | struct kstatfs linux_stat; | ||
246 | int error = vfs_statfs(dentry->d_inode->i_sb, &linux_stat); | ||
247 | if (!error) | ||
248 | error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); | ||
249 | return error; | ||
250 | } | ||
251 | |||
252 | asmlinkage int | ||
253 | osf_statfs(char __user *path, struct osf_statfs __user *buffer, unsigned long bufsiz) | ||
254 | { | ||
255 | struct nameidata nd; | ||
256 | int retval; | ||
257 | |||
258 | retval = user_path_walk(path, &nd); | ||
259 | if (!retval) { | ||
260 | retval = do_osf_statfs(nd.dentry, buffer, bufsiz); | ||
261 | path_release(&nd); | ||
262 | } | ||
263 | return retval; | ||
264 | } | ||
265 | |||
266 | asmlinkage int | ||
267 | osf_fstatfs(unsigned long fd, struct osf_statfs __user *buffer, unsigned long bufsiz) | ||
268 | { | ||
269 | struct file *file; | ||
270 | int retval; | ||
271 | |||
272 | retval = -EBADF; | ||
273 | file = fget(fd); | ||
274 | if (file) { | ||
275 | retval = do_osf_statfs(file->f_dentry, buffer, bufsiz); | ||
276 | fput(file); | ||
277 | } | ||
278 | return retval; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Uhh.. OSF/1 mount parameters aren't exactly obvious.. | ||
283 | * | ||
284 | * Although to be frank, neither are the native Linux/i386 ones.. | ||
285 | */ | ||
286 | struct ufs_args { | ||
287 | char __user *devname; | ||
288 | int flags; | ||
289 | uid_t exroot; | ||
290 | }; | ||
291 | |||
292 | struct cdfs_args { | ||
293 | char __user *devname; | ||
294 | int flags; | ||
295 | uid_t exroot; | ||
296 | |||
297 | /* This has lots more here, which Linux handles with the option block | ||
298 | but I'm too lazy to do the translation into ASCII. */ | ||
299 | }; | ||
300 | |||
301 | struct procfs_args { | ||
302 | char __user *devname; | ||
303 | int flags; | ||
304 | uid_t exroot; | ||
305 | }; | ||
306 | |||
307 | /* | ||
308 | * We can't actually handle ufs yet, so we translate UFS mounts to | ||
309 | * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS | ||
310 | * layout is so braindead it's a major headache doing it. | ||
311 | * | ||
312 | * Just how long ago was it written? OTOH our UFS driver may be still | ||
313 | * unhappy with OSF UFS. [CHECKME] | ||
314 | */ | ||
315 | static int | ||
316 | osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) | ||
317 | { | ||
318 | int retval; | ||
319 | struct cdfs_args tmp; | ||
320 | char *devname; | ||
321 | |||
322 | retval = -EFAULT; | ||
323 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
324 | goto out; | ||
325 | devname = getname(tmp.devname); | ||
326 | retval = PTR_ERR(devname); | ||
327 | if (IS_ERR(devname)) | ||
328 | goto out; | ||
329 | retval = do_mount(devname, dirname, "ext2", flags, NULL); | ||
330 | putname(devname); | ||
331 | out: | ||
332 | return retval; | ||
333 | } | ||
334 | |||
335 | static int | ||
336 | osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) | ||
337 | { | ||
338 | int retval; | ||
339 | struct cdfs_args tmp; | ||
340 | char *devname; | ||
341 | |||
342 | retval = -EFAULT; | ||
343 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
344 | goto out; | ||
345 | devname = getname(tmp.devname); | ||
346 | retval = PTR_ERR(devname); | ||
347 | if (IS_ERR(devname)) | ||
348 | goto out; | ||
349 | retval = do_mount(devname, dirname, "iso9660", flags, NULL); | ||
350 | putname(devname); | ||
351 | out: | ||
352 | return retval; | ||
353 | } | ||
354 | |||
355 | static int | ||
356 | osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) | ||
357 | { | ||
358 | struct procfs_args tmp; | ||
359 | |||
360 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
361 | return -EFAULT; | ||
362 | |||
363 | return do_mount("", dirname, "proc", flags, NULL); | ||
364 | } | ||
365 | |||
366 | asmlinkage int | ||
367 | osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data) | ||
368 | { | ||
369 | int retval = -EINVAL; | ||
370 | char *name; | ||
371 | |||
372 | lock_kernel(); | ||
373 | |||
374 | name = getname(path); | ||
375 | retval = PTR_ERR(name); | ||
376 | if (IS_ERR(name)) | ||
377 | goto out; | ||
378 | switch (typenr) { | ||
379 | case 1: | ||
380 | retval = osf_ufs_mount(name, data, flag); | ||
381 | break; | ||
382 | case 6: | ||
383 | retval = osf_cdfs_mount(name, data, flag); | ||
384 | break; | ||
385 | case 9: | ||
386 | retval = osf_procfs_mount(name, data, flag); | ||
387 | break; | ||
388 | default: | ||
389 | printk("osf_mount(%ld, %x)\n", typenr, flag); | ||
390 | } | ||
391 | putname(name); | ||
392 | out: | ||
393 | unlock_kernel(); | ||
394 | return retval; | ||
395 | } | ||
396 | |||
397 | asmlinkage int | ||
398 | osf_utsname(char __user *name) | ||
399 | { | ||
400 | int error; | ||
401 | |||
402 | down_read(&uts_sem); | ||
403 | error = -EFAULT; | ||
404 | if (copy_to_user(name + 0, system_utsname.sysname, 32)) | ||
405 | goto out; | ||
406 | if (copy_to_user(name + 32, system_utsname.nodename, 32)) | ||
407 | goto out; | ||
408 | if (copy_to_user(name + 64, system_utsname.release, 32)) | ||
409 | goto out; | ||
410 | if (copy_to_user(name + 96, system_utsname.version, 32)) | ||
411 | goto out; | ||
412 | if (copy_to_user(name + 128, system_utsname.machine, 32)) | ||
413 | goto out; | ||
414 | |||
415 | error = 0; | ||
416 | out: | ||
417 | up_read(&uts_sem); | ||
418 | return error; | ||
419 | } | ||
420 | |||
421 | asmlinkage unsigned long | ||
422 | sys_getpagesize(void) | ||
423 | { | ||
424 | return PAGE_SIZE; | ||
425 | } | ||
426 | |||
427 | asmlinkage unsigned long | ||
428 | sys_getdtablesize(void) | ||
429 | { | ||
430 | return NR_OPEN; | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * For compatibility with OSF/1 only. Use utsname(2) instead. | ||
435 | */ | ||
436 | asmlinkage int | ||
437 | osf_getdomainname(char __user *name, int namelen) | ||
438 | { | ||
439 | unsigned len; | ||
440 | int i; | ||
441 | |||
442 | if (!access_ok(VERIFY_WRITE, name, namelen)) | ||
443 | return -EFAULT; | ||
444 | |||
445 | len = namelen; | ||
446 | if (namelen > 32) | ||
447 | len = 32; | ||
448 | |||
449 | down_read(&uts_sem); | ||
450 | for (i = 0; i < len; ++i) { | ||
451 | __put_user(system_utsname.domainname[i], name + i); | ||
452 | if (system_utsname.domainname[i] == '\0') | ||
453 | break; | ||
454 | } | ||
455 | up_read(&uts_sem); | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | asmlinkage long | ||
461 | osf_shmat(int shmid, void __user *shmaddr, int shmflg) | ||
462 | { | ||
463 | unsigned long raddr; | ||
464 | long err; | ||
465 | |||
466 | err = do_shmat(shmid, shmaddr, shmflg, &raddr); | ||
467 | |||
468 | /* | ||
469 | * This works because all user-level addresses are | ||
470 | * non-negative longs! | ||
471 | */ | ||
472 | return err ? err : (long)raddr; | ||
473 | } | ||
474 | |||
475 | |||
476 | /* | ||
477 | * The following stuff should move into a header file should it ever | ||
478 | * be labeled "officially supported." Right now, there is just enough | ||
479 | * support to avoid applications (such as tar) printing error | ||
480 | * messages. The attributes are not really implemented. | ||
481 | */ | ||
482 | |||
483 | /* | ||
484 | * Values for Property list entry flag | ||
485 | */ | ||
486 | #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry | ||
487 | by default */ | ||
488 | #define PLE_FLAG_MASK 0x1 /* Valid flag values */ | ||
489 | #define PLE_FLAG_ALL -1 /* All flag value */ | ||
490 | |||
491 | struct proplistname_args { | ||
492 | unsigned int pl_mask; | ||
493 | unsigned int pl_numnames; | ||
494 | char **pl_names; | ||
495 | }; | ||
496 | |||
497 | union pl_args { | ||
498 | struct setargs { | ||
499 | char __user *path; | ||
500 | long follow; | ||
501 | long nbytes; | ||
502 | char __user *buf; | ||
503 | } set; | ||
504 | struct fsetargs { | ||
505 | long fd; | ||
506 | long nbytes; | ||
507 | char __user *buf; | ||
508 | } fset; | ||
509 | struct getargs { | ||
510 | char __user *path; | ||
511 | long follow; | ||
512 | struct proplistname_args __user *name_args; | ||
513 | long nbytes; | ||
514 | char __user *buf; | ||
515 | int __user *min_buf_size; | ||
516 | } get; | ||
517 | struct fgetargs { | ||
518 | long fd; | ||
519 | struct proplistname_args __user *name_args; | ||
520 | long nbytes; | ||
521 | char __user *buf; | ||
522 | int __user *min_buf_size; | ||
523 | } fget; | ||
524 | struct delargs { | ||
525 | char __user *path; | ||
526 | long follow; | ||
527 | struct proplistname_args __user *name_args; | ||
528 | } del; | ||
529 | struct fdelargs { | ||
530 | long fd; | ||
531 | struct proplistname_args __user *name_args; | ||
532 | } fdel; | ||
533 | }; | ||
534 | |||
535 | enum pl_code { | ||
536 | PL_SET = 1, PL_FSET = 2, | ||
537 | PL_GET = 3, PL_FGET = 4, | ||
538 | PL_DEL = 5, PL_FDEL = 6 | ||
539 | }; | ||
540 | |||
541 | asmlinkage long | ||
542 | osf_proplist_syscall(enum pl_code code, union pl_args __user *args) | ||
543 | { | ||
544 | long error; | ||
545 | int __user *min_buf_size_ptr; | ||
546 | |||
547 | lock_kernel(); | ||
548 | switch (code) { | ||
549 | case PL_SET: | ||
550 | if (get_user(error, &args->set.nbytes)) | ||
551 | error = -EFAULT; | ||
552 | break; | ||
553 | case PL_FSET: | ||
554 | if (get_user(error, &args->fset.nbytes)) | ||
555 | error = -EFAULT; | ||
556 | break; | ||
557 | case PL_GET: | ||
558 | error = get_user(min_buf_size_ptr, &args->get.min_buf_size); | ||
559 | if (error) | ||
560 | break; | ||
561 | error = put_user(0, min_buf_size_ptr); | ||
562 | break; | ||
563 | case PL_FGET: | ||
564 | error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); | ||
565 | if (error) | ||
566 | break; | ||
567 | error = put_user(0, min_buf_size_ptr); | ||
568 | break; | ||
569 | case PL_DEL: | ||
570 | case PL_FDEL: | ||
571 | error = 0; | ||
572 | break; | ||
573 | default: | ||
574 | error = -EOPNOTSUPP; | ||
575 | break; | ||
576 | }; | ||
577 | unlock_kernel(); | ||
578 | return error; | ||
579 | } | ||
580 | |||
581 | asmlinkage int | ||
582 | osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss) | ||
583 | { | ||
584 | unsigned long usp = rdusp(); | ||
585 | unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; | ||
586 | unsigned long oss_os = on_sig_stack(usp); | ||
587 | int error; | ||
588 | |||
589 | if (uss) { | ||
590 | void __user *ss_sp; | ||
591 | |||
592 | error = -EFAULT; | ||
593 | if (get_user(ss_sp, &uss->ss_sp)) | ||
594 | goto out; | ||
595 | |||
596 | /* If the current stack was set with sigaltstack, don't | ||
597 | swap stacks while we are on it. */ | ||
598 | error = -EPERM; | ||
599 | if (current->sas_ss_sp && on_sig_stack(usp)) | ||
600 | goto out; | ||
601 | |||
602 | /* Since we don't know the extent of the stack, and we don't | ||
603 | track onstack-ness, but rather calculate it, we must | ||
604 | presume a size. Ho hum this interface is lossy. */ | ||
605 | current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; | ||
606 | current->sas_ss_size = SIGSTKSZ; | ||
607 | } | ||
608 | |||
609 | if (uoss) { | ||
610 | error = -EFAULT; | ||
611 | if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) | ||
612 | || __put_user(oss_sp, &uoss->ss_sp) | ||
613 | || __put_user(oss_os, &uoss->ss_onstack)) | ||
614 | goto out; | ||
615 | } | ||
616 | |||
617 | error = 0; | ||
618 | out: | ||
619 | return error; | ||
620 | } | ||
621 | |||
622 | asmlinkage long | ||
623 | osf_sysinfo(int command, char __user *buf, long count) | ||
624 | { | ||
625 | static char * sysinfo_table[] = { | ||
626 | system_utsname.sysname, | ||
627 | system_utsname.nodename, | ||
628 | system_utsname.release, | ||
629 | system_utsname.version, | ||
630 | system_utsname.machine, | ||
631 | "alpha", /* instruction set architecture */ | ||
632 | "dummy", /* hardware serial number */ | ||
633 | "dummy", /* hardware manufacturer */ | ||
634 | "dummy", /* secure RPC domain */ | ||
635 | }; | ||
636 | unsigned long offset; | ||
637 | char *res; | ||
638 | long len, err = -EINVAL; | ||
639 | |||
640 | offset = command-1; | ||
641 | if (offset >= sizeof(sysinfo_table)/sizeof(char *)) { | ||
642 | /* Digital UNIX has a few unpublished interfaces here */ | ||
643 | printk("sysinfo(%d)", command); | ||
644 | goto out; | ||
645 | } | ||
646 | |||
647 | down_read(&uts_sem); | ||
648 | res = sysinfo_table[offset]; | ||
649 | len = strlen(res)+1; | ||
650 | if (len > count) | ||
651 | len = count; | ||
652 | if (copy_to_user(buf, res, len)) | ||
653 | err = -EFAULT; | ||
654 | else | ||
655 | err = 0; | ||
656 | up_read(&uts_sem); | ||
657 | out: | ||
658 | return err; | ||
659 | } | ||
660 | |||
661 | asmlinkage unsigned long | ||
662 | osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, | ||
663 | int __user *start, void __user *arg) | ||
664 | { | ||
665 | unsigned long w; | ||
666 | struct percpu_struct *cpu; | ||
667 | |||
668 | switch (op) { | ||
669 | case GSI_IEEE_FP_CONTROL: | ||
670 | /* Return current software fp control & status bits. */ | ||
671 | /* Note that DU doesn't verify available space here. */ | ||
672 | |||
673 | w = current_thread_info()->ieee_state & IEEE_SW_MASK; | ||
674 | w = swcr_update_status(w, rdfpcr()); | ||
675 | if (put_user(w, (unsigned long __user *) buffer)) | ||
676 | return -EFAULT; | ||
677 | return 0; | ||
678 | |||
679 | case GSI_IEEE_STATE_AT_SIGNAL: | ||
680 | /* | ||
681 | * Not sure anybody will ever use this weird stuff. These | ||
682 | * ops can be used (under OSF/1) to set the fpcr that should | ||
683 | * be used when a signal handler starts executing. | ||
684 | */ | ||
685 | break; | ||
686 | |||
687 | case GSI_UACPROC: | ||
688 | if (nbytes < sizeof(unsigned int)) | ||
689 | return -EINVAL; | ||
690 | w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; | ||
691 | if (put_user(w, (unsigned int __user *)buffer)) | ||
692 | return -EFAULT; | ||
693 | return 1; | ||
694 | |||
695 | case GSI_PROC_TYPE: | ||
696 | if (nbytes < sizeof(unsigned long)) | ||
697 | return -EINVAL; | ||
698 | cpu = (struct percpu_struct*) | ||
699 | ((char*)hwrpb + hwrpb->processor_offset); | ||
700 | w = cpu->type; | ||
701 | if (put_user(w, (unsigned long __user*)buffer)) | ||
702 | return -EFAULT; | ||
703 | return 1; | ||
704 | |||
705 | case GSI_GET_HWRPB: | ||
706 | if (nbytes < sizeof(*hwrpb)) | ||
707 | return -EINVAL; | ||
708 | if (copy_to_user(buffer, hwrpb, nbytes) != 0) | ||
709 | return -EFAULT; | ||
710 | return 1; | ||
711 | |||
712 | default: | ||
713 | break; | ||
714 | } | ||
715 | |||
716 | return -EOPNOTSUPP; | ||
717 | } | ||
718 | |||
719 | asmlinkage unsigned long | ||
720 | osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, | ||
721 | int __user *start, void __user *arg) | ||
722 | { | ||
723 | switch (op) { | ||
724 | case SSI_IEEE_FP_CONTROL: { | ||
725 | unsigned long swcr, fpcr; | ||
726 | unsigned int *state; | ||
727 | |||
728 | /* | ||
729 | * Alpha Architecture Handbook 4.7.7.3: | ||
730 | * To be fully IEEE compiant, we must track the current IEEE | ||
731 | * exception state in software, because spurrious bits can be | ||
732 | * set in the trap shadow of a software-complete insn. | ||
733 | */ | ||
734 | |||
735 | if (get_user(swcr, (unsigned long __user *)buffer)) | ||
736 | return -EFAULT; | ||
737 | state = ¤t_thread_info()->ieee_state; | ||
738 | |||
739 | /* Update softare trap enable bits. */ | ||
740 | *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); | ||
741 | |||
742 | /* Update the real fpcr. */ | ||
743 | fpcr = rdfpcr() & FPCR_DYN_MASK; | ||
744 | fpcr |= ieee_swcr_to_fpcr(swcr); | ||
745 | wrfpcr(fpcr); | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | case SSI_IEEE_RAISE_EXCEPTION: { | ||
751 | unsigned long exc, swcr, fpcr, fex; | ||
752 | unsigned int *state; | ||
753 | |||
754 | if (get_user(exc, (unsigned long __user *)buffer)) | ||
755 | return -EFAULT; | ||
756 | state = ¤t_thread_info()->ieee_state; | ||
757 | exc &= IEEE_STATUS_MASK; | ||
758 | |||
759 | /* Update softare trap enable bits. */ | ||
760 | swcr = (*state & IEEE_SW_MASK) | exc; | ||
761 | *state |= exc; | ||
762 | |||
763 | /* Update the real fpcr. */ | ||
764 | fpcr = rdfpcr(); | ||
765 | fpcr |= ieee_swcr_to_fpcr(swcr); | ||
766 | wrfpcr(fpcr); | ||
767 | |||
768 | /* If any exceptions set by this call, and are unmasked, | ||
769 | send a signal. Old exceptions are not signaled. */ | ||
770 | fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; | ||
771 | if (fex) { | ||
772 | siginfo_t info; | ||
773 | int si_code = 0; | ||
774 | |||
775 | if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; | ||
776 | if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; | ||
777 | if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; | ||
778 | if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; | ||
779 | if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; | ||
780 | if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; | ||
781 | |||
782 | info.si_signo = SIGFPE; | ||
783 | info.si_errno = 0; | ||
784 | info.si_code = si_code; | ||
785 | info.si_addr = NULL; /* FIXME */ | ||
786 | send_sig_info(SIGFPE, &info, current); | ||
787 | } | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | case SSI_IEEE_STATE_AT_SIGNAL: | ||
792 | case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: | ||
793 | /* | ||
794 | * Not sure anybody will ever use this weird stuff. These | ||
795 | * ops can be used (under OSF/1) to set the fpcr that should | ||
796 | * be used when a signal handler starts executing. | ||
797 | */ | ||
798 | break; | ||
799 | |||
800 | case SSI_NVPAIRS: { | ||
801 | unsigned long v, w, i; | ||
802 | unsigned int old, new; | ||
803 | |||
804 | for (i = 0; i < nbytes; ++i) { | ||
805 | |||
806 | if (get_user(v, 2*i + (unsigned int __user *)buffer)) | ||
807 | return -EFAULT; | ||
808 | if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer)) | ||
809 | return -EFAULT; | ||
810 | switch (v) { | ||
811 | case SSIN_UACPROC: | ||
812 | again: | ||
813 | old = current_thread_info()->flags; | ||
814 | new = old & ~(UAC_BITMASK << UAC_SHIFT); | ||
815 | new = new | (w & UAC_BITMASK) << UAC_SHIFT; | ||
816 | if (cmpxchg(¤t_thread_info()->flags, | ||
817 | old, new) != old) | ||
818 | goto again; | ||
819 | break; | ||
820 | |||
821 | default: | ||
822 | return -EOPNOTSUPP; | ||
823 | } | ||
824 | } | ||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | default: | ||
829 | break; | ||
830 | } | ||
831 | |||
832 | return -EOPNOTSUPP; | ||
833 | } | ||
834 | |||
835 | /* Translations due to the fact that OSF's time_t is an int. Which | ||
836 | affects all sorts of things, like timeval and itimerval. */ | ||
837 | |||
838 | extern struct timezone sys_tz; | ||
839 | extern int do_adjtimex(struct timex *); | ||
840 | |||
841 | struct timeval32 | ||
842 | { | ||
843 | int tv_sec, tv_usec; | ||
844 | }; | ||
845 | |||
846 | struct itimerval32 | ||
847 | { | ||
848 | struct timeval32 it_interval; | ||
849 | struct timeval32 it_value; | ||
850 | }; | ||
851 | |||
852 | static inline long | ||
853 | get_tv32(struct timeval *o, struct timeval32 __user *i) | ||
854 | { | ||
855 | return (!access_ok(VERIFY_READ, i, sizeof(*i)) || | ||
856 | (__get_user(o->tv_sec, &i->tv_sec) | | ||
857 | __get_user(o->tv_usec, &i->tv_usec))); | ||
858 | } | ||
859 | |||
860 | static inline long | ||
861 | put_tv32(struct timeval32 __user *o, struct timeval *i) | ||
862 | { | ||
863 | return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || | ||
864 | (__put_user(i->tv_sec, &o->tv_sec) | | ||
865 | __put_user(i->tv_usec, &o->tv_usec))); | ||
866 | } | ||
867 | |||
868 | static inline long | ||
869 | get_it32(struct itimerval *o, struct itimerval32 __user *i) | ||
870 | { | ||
871 | return (!access_ok(VERIFY_READ, i, sizeof(*i)) || | ||
872 | (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | | ||
873 | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | | ||
874 | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | | ||
875 | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); | ||
876 | } | ||
877 | |||
878 | static inline long | ||
879 | put_it32(struct itimerval32 __user *o, struct itimerval *i) | ||
880 | { | ||
881 | return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || | ||
882 | (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | | ||
883 | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | | ||
884 | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | | ||
885 | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); | ||
886 | } | ||
887 | |||
888 | static inline void | ||
889 | jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) | ||
890 | { | ||
891 | value->tv_usec = (jiffies % HZ) * (1000000L / HZ); | ||
892 | value->tv_sec = jiffies / HZ; | ||
893 | } | ||
894 | |||
895 | asmlinkage int | ||
896 | osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz) | ||
897 | { | ||
898 | if (tv) { | ||
899 | struct timeval ktv; | ||
900 | do_gettimeofday(&ktv); | ||
901 | if (put_tv32(tv, &ktv)) | ||
902 | return -EFAULT; | ||
903 | } | ||
904 | if (tz) { | ||
905 | if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) | ||
906 | return -EFAULT; | ||
907 | } | ||
908 | return 0; | ||
909 | } | ||
910 | |||
911 | asmlinkage int | ||
912 | osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz) | ||
913 | { | ||
914 | struct timespec kts; | ||
915 | struct timezone ktz; | ||
916 | |||
917 | if (tv) { | ||
918 | if (get_tv32((struct timeval *)&kts, tv)) | ||
919 | return -EFAULT; | ||
920 | } | ||
921 | if (tz) { | ||
922 | if (copy_from_user(&ktz, tz, sizeof(*tz))) | ||
923 | return -EFAULT; | ||
924 | } | ||
925 | |||
926 | kts.tv_nsec *= 1000; | ||
927 | |||
928 | return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); | ||
929 | } | ||
930 | |||
931 | asmlinkage int | ||
932 | osf_getitimer(int which, struct itimerval32 __user *it) | ||
933 | { | ||
934 | struct itimerval kit; | ||
935 | int error; | ||
936 | |||
937 | error = do_getitimer(which, &kit); | ||
938 | if (!error && put_it32(it, &kit)) | ||
939 | error = -EFAULT; | ||
940 | |||
941 | return error; | ||
942 | } | ||
943 | |||
944 | asmlinkage int | ||
945 | osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __user *out) | ||
946 | { | ||
947 | struct itimerval kin, kout; | ||
948 | int error; | ||
949 | |||
950 | if (in) { | ||
951 | if (get_it32(&kin, in)) | ||
952 | return -EFAULT; | ||
953 | } else | ||
954 | memset(&kin, 0, sizeof(kin)); | ||
955 | |||
956 | error = do_setitimer(which, &kin, out ? &kout : NULL); | ||
957 | if (error || !out) | ||
958 | return error; | ||
959 | |||
960 | if (put_it32(out, &kout)) | ||
961 | return -EFAULT; | ||
962 | |||
963 | return 0; | ||
964 | |||
965 | } | ||
966 | |||
967 | asmlinkage int | ||
968 | osf_utimes(char __user *filename, struct timeval32 __user *tvs) | ||
969 | { | ||
970 | struct timeval ktvs[2]; | ||
971 | |||
972 | if (tvs) { | ||
973 | if (get_tv32(&ktvs[0], &tvs[0]) || | ||
974 | get_tv32(&ktvs[1], &tvs[1])) | ||
975 | return -EFAULT; | ||
976 | } | ||
977 | |||
978 | return do_utimes(filename, tvs ? ktvs : NULL); | ||
979 | } | ||
980 | |||
981 | #define MAX_SELECT_SECONDS \ | ||
982 | ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) | ||
983 | |||
984 | asmlinkage int | ||
985 | osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, | ||
986 | struct timeval32 __user *tvp) | ||
987 | { | ||
988 | fd_set_bits fds; | ||
989 | char *bits; | ||
990 | size_t size; | ||
991 | long timeout; | ||
992 | int ret = -EINVAL; | ||
993 | |||
994 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
995 | if (tvp) { | ||
996 | time_t sec, usec; | ||
997 | |||
998 | if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) | ||
999 | || __get_user(sec, &tvp->tv_sec) | ||
1000 | || __get_user(usec, &tvp->tv_usec)) { | ||
1001 | ret = -EFAULT; | ||
1002 | goto out_nofds; | ||
1003 | } | ||
1004 | |||
1005 | if (sec < 0 || usec < 0) | ||
1006 | goto out_nofds; | ||
1007 | |||
1008 | if ((unsigned long) sec < MAX_SELECT_SECONDS) { | ||
1009 | timeout = (usec + 1000000/HZ - 1) / (1000000/HZ); | ||
1010 | timeout += sec * (unsigned long) HZ; | ||
1011 | } | ||
1012 | } | ||
1013 | |||
1014 | if (n < 0 || n > current->files->max_fdset) | ||
1015 | goto out_nofds; | ||
1016 | |||
1017 | /* | ||
1018 | * We need 6 bitmaps (in/out/ex for both incoming and outgoing), | ||
1019 | * since we used fdset we need to allocate memory in units of | ||
1020 | * long-words. | ||
1021 | */ | ||
1022 | ret = -ENOMEM; | ||
1023 | size = FDS_BYTES(n); | ||
1024 | bits = kmalloc(6 * size, GFP_KERNEL); | ||
1025 | if (!bits) | ||
1026 | goto out_nofds; | ||
1027 | fds.in = (unsigned long *) bits; | ||
1028 | fds.out = (unsigned long *) (bits + size); | ||
1029 | fds.ex = (unsigned long *) (bits + 2*size); | ||
1030 | fds.res_in = (unsigned long *) (bits + 3*size); | ||
1031 | fds.res_out = (unsigned long *) (bits + 4*size); | ||
1032 | fds.res_ex = (unsigned long *) (bits + 5*size); | ||
1033 | |||
1034 | if ((ret = get_fd_set(n, inp->fds_bits, fds.in)) || | ||
1035 | (ret = get_fd_set(n, outp->fds_bits, fds.out)) || | ||
1036 | (ret = get_fd_set(n, exp->fds_bits, fds.ex))) | ||
1037 | goto out; | ||
1038 | zero_fd_set(n, fds.res_in); | ||
1039 | zero_fd_set(n, fds.res_out); | ||
1040 | zero_fd_set(n, fds.res_ex); | ||
1041 | |||
1042 | ret = do_select(n, &fds, &timeout); | ||
1043 | |||
1044 | /* OSF does not copy back the remaining time. */ | ||
1045 | |||
1046 | if (ret < 0) | ||
1047 | goto out; | ||
1048 | if (!ret) { | ||
1049 | ret = -ERESTARTNOHAND; | ||
1050 | if (signal_pending(current)) | ||
1051 | goto out; | ||
1052 | ret = 0; | ||
1053 | } | ||
1054 | |||
1055 | if (set_fd_set(n, inp->fds_bits, fds.res_in) || | ||
1056 | set_fd_set(n, outp->fds_bits, fds.res_out) || | ||
1057 | set_fd_set(n, exp->fds_bits, fds.res_ex)) | ||
1058 | ret = -EFAULT; | ||
1059 | |||
1060 | out: | ||
1061 | kfree(bits); | ||
1062 | out_nofds: | ||
1063 | return ret; | ||
1064 | } | ||
1065 | |||
1066 | struct rusage32 { | ||
1067 | struct timeval32 ru_utime; /* user time used */ | ||
1068 | struct timeval32 ru_stime; /* system time used */ | ||
1069 | long ru_maxrss; /* maximum resident set size */ | ||
1070 | long ru_ixrss; /* integral shared memory size */ | ||
1071 | long ru_idrss; /* integral unshared data size */ | ||
1072 | long ru_isrss; /* integral unshared stack size */ | ||
1073 | long ru_minflt; /* page reclaims */ | ||
1074 | long ru_majflt; /* page faults */ | ||
1075 | long ru_nswap; /* swaps */ | ||
1076 | long ru_inblock; /* block input operations */ | ||
1077 | long ru_oublock; /* block output operations */ | ||
1078 | long ru_msgsnd; /* messages sent */ | ||
1079 | long ru_msgrcv; /* messages received */ | ||
1080 | long ru_nsignals; /* signals received */ | ||
1081 | long ru_nvcsw; /* voluntary context switches */ | ||
1082 | long ru_nivcsw; /* involuntary " */ | ||
1083 | }; | ||
1084 | |||
1085 | asmlinkage int | ||
1086 | osf_getrusage(int who, struct rusage32 __user *ru) | ||
1087 | { | ||
1088 | struct rusage32 r; | ||
1089 | |||
1090 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) | ||
1091 | return -EINVAL; | ||
1092 | |||
1093 | memset(&r, 0, sizeof(r)); | ||
1094 | switch (who) { | ||
1095 | case RUSAGE_SELF: | ||
1096 | jiffies_to_timeval32(current->utime, &r.ru_utime); | ||
1097 | jiffies_to_timeval32(current->stime, &r.ru_stime); | ||
1098 | r.ru_minflt = current->min_flt; | ||
1099 | r.ru_majflt = current->maj_flt; | ||
1100 | break; | ||
1101 | case RUSAGE_CHILDREN: | ||
1102 | jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); | ||
1103 | jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); | ||
1104 | r.ru_minflt = current->signal->cmin_flt; | ||
1105 | r.ru_majflt = current->signal->cmaj_flt; | ||
1106 | break; | ||
1107 | } | ||
1108 | |||
1109 | return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; | ||
1110 | } | ||
1111 | |||
1112 | asmlinkage long | ||
1113 | osf_wait4(pid_t pid, int __user *ustatus, int options, | ||
1114 | struct rusage32 __user *ur) | ||
1115 | { | ||
1116 | struct rusage r; | ||
1117 | long ret, err; | ||
1118 | mm_segment_t old_fs; | ||
1119 | |||
1120 | if (!ur) | ||
1121 | return sys_wait4(pid, ustatus, options, NULL); | ||
1122 | |||
1123 | old_fs = get_fs(); | ||
1124 | |||
1125 | set_fs (KERNEL_DS); | ||
1126 | ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); | ||
1127 | set_fs (old_fs); | ||
1128 | |||
1129 | if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) | ||
1130 | return -EFAULT; | ||
1131 | |||
1132 | err = 0; | ||
1133 | err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); | ||
1134 | err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); | ||
1135 | err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); | ||
1136 | err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); | ||
1137 | err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); | ||
1138 | err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); | ||
1139 | err |= __put_user(r.ru_idrss, &ur->ru_idrss); | ||
1140 | err |= __put_user(r.ru_isrss, &ur->ru_isrss); | ||
1141 | err |= __put_user(r.ru_minflt, &ur->ru_minflt); | ||
1142 | err |= __put_user(r.ru_majflt, &ur->ru_majflt); | ||
1143 | err |= __put_user(r.ru_nswap, &ur->ru_nswap); | ||
1144 | err |= __put_user(r.ru_inblock, &ur->ru_inblock); | ||
1145 | err |= __put_user(r.ru_oublock, &ur->ru_oublock); | ||
1146 | err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); | ||
1147 | err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); | ||
1148 | err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); | ||
1149 | err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); | ||
1150 | err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); | ||
1151 | |||
1152 | return err ? err : ret; | ||
1153 | } | ||
1154 | |||
1155 | /* | ||
1156 | * I don't know what the parameters are: the first one | ||
1157 | * seems to be a timeval pointer, and I suspect the second | ||
1158 | * one is the time remaining.. Ho humm.. No documentation. | ||
1159 | */ | ||
1160 | asmlinkage int | ||
1161 | osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remain) | ||
1162 | { | ||
1163 | struct timeval tmp; | ||
1164 | unsigned long ticks; | ||
1165 | |||
1166 | if (get_tv32(&tmp, sleep)) | ||
1167 | goto fault; | ||
1168 | |||
1169 | ticks = tmp.tv_usec; | ||
1170 | ticks = (ticks + (1000000 / HZ) - 1) / (1000000 / HZ); | ||
1171 | ticks += tmp.tv_sec * HZ; | ||
1172 | |||
1173 | current->state = TASK_INTERRUPTIBLE; | ||
1174 | ticks = schedule_timeout(ticks); | ||
1175 | |||
1176 | if (remain) { | ||
1177 | tmp.tv_sec = ticks / HZ; | ||
1178 | tmp.tv_usec = ticks % HZ; | ||
1179 | if (put_tv32(remain, &tmp)) | ||
1180 | goto fault; | ||
1181 | } | ||
1182 | |||
1183 | return 0; | ||
1184 | fault: | ||
1185 | return -EFAULT; | ||
1186 | } | ||
1187 | |||
1188 | |||
1189 | struct timex32 { | ||
1190 | unsigned int modes; /* mode selector */ | ||
1191 | long offset; /* time offset (usec) */ | ||
1192 | long freq; /* frequency offset (scaled ppm) */ | ||
1193 | long maxerror; /* maximum error (usec) */ | ||
1194 | long esterror; /* estimated error (usec) */ | ||
1195 | int status; /* clock command/status */ | ||
1196 | long constant; /* pll time constant */ | ||
1197 | long precision; /* clock precision (usec) (read only) */ | ||
1198 | long tolerance; /* clock frequency tolerance (ppm) | ||
1199 | * (read only) | ||
1200 | */ | ||
1201 | struct timeval32 time; /* (read only) */ | ||
1202 | long tick; /* (modified) usecs between clock ticks */ | ||
1203 | |||
1204 | long ppsfreq; /* pps frequency (scaled ppm) (ro) */ | ||
1205 | long jitter; /* pps jitter (us) (ro) */ | ||
1206 | int shift; /* interval duration (s) (shift) (ro) */ | ||
1207 | long stabil; /* pps stability (scaled ppm) (ro) */ | ||
1208 | long jitcnt; /* jitter limit exceeded (ro) */ | ||
1209 | long calcnt; /* calibration intervals (ro) */ | ||
1210 | long errcnt; /* calibration errors (ro) */ | ||
1211 | long stbcnt; /* stability limit exceeded (ro) */ | ||
1212 | |||
1213 | int :32; int :32; int :32; int :32; | ||
1214 | int :32; int :32; int :32; int :32; | ||
1215 | int :32; int :32; int :32; int :32; | ||
1216 | }; | ||
1217 | |||
1218 | asmlinkage int | ||
1219 | sys_old_adjtimex(struct timex32 __user *txc_p) | ||
1220 | { | ||
1221 | struct timex txc; | ||
1222 | int ret; | ||
1223 | |||
1224 | /* copy relevant bits of struct timex. */ | ||
1225 | if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || | ||
1226 | copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - | ||
1227 | offsetof(struct timex32, time))) | ||
1228 | return -EFAULT; | ||
1229 | |||
1230 | ret = do_adjtimex(&txc); | ||
1231 | if (ret < 0) | ||
1232 | return ret; | ||
1233 | |||
1234 | /* copy back to timex32 */ | ||
1235 | if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || | ||
1236 | (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - | ||
1237 | offsetof(struct timex32, tick))) || | ||
1238 | (put_tv32(&txc_p->time, &txc.time))) | ||
1239 | return -EFAULT; | ||
1240 | |||
1241 | return ret; | ||
1242 | } | ||
1243 | |||
1244 | /* Get an address range which is currently unmapped. Similar to the | ||
1245 | generic version except that we know how to honor ADDR_LIMIT_32BIT. */ | ||
1246 | |||
1247 | static unsigned long | ||
1248 | arch_get_unmapped_area_1(unsigned long addr, unsigned long len, | ||
1249 | unsigned long limit) | ||
1250 | { | ||
1251 | struct vm_area_struct *vma = find_vma(current->mm, addr); | ||
1252 | |||
1253 | while (1) { | ||
1254 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
1255 | if (limit - len < addr) | ||
1256 | return -ENOMEM; | ||
1257 | if (!vma || addr + len <= vma->vm_start) | ||
1258 | return addr; | ||
1259 | addr = vma->vm_end; | ||
1260 | vma = vma->vm_next; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | unsigned long | ||
1265 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
1266 | unsigned long len, unsigned long pgoff, | ||
1267 | unsigned long flags) | ||
1268 | { | ||
1269 | unsigned long limit; | ||
1270 | |||
1271 | /* "32 bit" actually means 31 bit, since pointers sign extend. */ | ||
1272 | if (current->personality & ADDR_LIMIT_32BIT) | ||
1273 | limit = 0x80000000; | ||
1274 | else | ||
1275 | limit = TASK_SIZE; | ||
1276 | |||
1277 | if (len > limit) | ||
1278 | return -ENOMEM; | ||
1279 | |||
1280 | /* First, see if the given suggestion fits. | ||
1281 | |||
1282 | The OSF/1 loader (/sbin/loader) relies on us returning an | ||
1283 | address larger than the requested if one exists, which is | ||
1284 | a terribly broken way to program. | ||
1285 | |||
1286 | That said, I can see the use in being able to suggest not | ||
1287 | merely specific addresses, but regions of memory -- perhaps | ||
1288 | this feature should be incorporated into all ports? */ | ||
1289 | |||
1290 | if (addr) { | ||
1291 | addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); | ||
1292 | if (addr != (unsigned long) -ENOMEM) | ||
1293 | return addr; | ||
1294 | } | ||
1295 | |||
1296 | /* Next, try allocating at TASK_UNMAPPED_BASE. */ | ||
1297 | addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), | ||
1298 | len, limit); | ||
1299 | if (addr != (unsigned long) -ENOMEM) | ||
1300 | return addr; | ||
1301 | |||
1302 | /* Finally, try allocating in low memory. */ | ||
1303 | addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); | ||
1304 | |||
1305 | return addr; | ||
1306 | } | ||
1307 | |||
1308 | #ifdef CONFIG_OSF4_COMPAT | ||
1309 | |||
1310 | /* Clear top 32 bits of iov_len in the user's buffer for | ||
1311 | compatibility with old versions of OSF/1 where iov_len | ||
1312 | was defined as int. */ | ||
1313 | static int | ||
1314 | osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) | ||
1315 | { | ||
1316 | unsigned long i; | ||
1317 | |||
1318 | for (i = 0 ; i < count ; i++) { | ||
1319 | int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; | ||
1320 | |||
1321 | if (put_user(0, iov_len_high)) | ||
1322 | return -EFAULT; | ||
1323 | } | ||
1324 | return 0; | ||
1325 | } | ||
1326 | |||
1327 | asmlinkage ssize_t | ||
1328 | osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long count) | ||
1329 | { | ||
1330 | if (unlikely(personality(current->personality) == PER_OSF4)) | ||
1331 | if (osf_fix_iov_len(vector, count)) | ||
1332 | return -EFAULT; | ||
1333 | return sys_readv(fd, vector, count); | ||
1334 | } | ||
1335 | |||
1336 | asmlinkage ssize_t | ||
1337 | osf_writev(unsigned long fd, const struct iovec __user * vector, unsigned long count) | ||
1338 | { | ||
1339 | if (unlikely(personality(current->personality) == PER_OSF4)) | ||
1340 | if (osf_fix_iov_len(vector, count)) | ||
1341 | return -EFAULT; | ||
1342 | return sys_writev(fd, vector, count); | ||
1343 | } | ||
1344 | |||
1345 | #endif | ||
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c new file mode 100644 index 000000000000..582a3519fb28 --- /dev/null +++ b/arch/alpha/kernel/pci-noop.c | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/pci-noop.c | ||
3 | * | ||
4 | * Stub PCI interfaces for Jensen-specific kernels. | ||
5 | */ | ||
6 | |||
7 | #include <linux/pci.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/bootmem.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/dma-mapping.h> | ||
14 | |||
15 | #include "proto.h" | ||
16 | |||
17 | |||
18 | /* | ||
19 | * The PCI controller list. | ||
20 | */ | ||
21 | |||
22 | struct pci_controller *hose_head, **hose_tail = &hose_head; | ||
23 | struct pci_controller *pci_isa_hose; | ||
24 | |||
25 | |||
26 | struct pci_controller * __init | ||
27 | alloc_pci_controller(void) | ||
28 | { | ||
29 | struct pci_controller *hose; | ||
30 | |||
31 | hose = alloc_bootmem(sizeof(*hose)); | ||
32 | |||
33 | *hose_tail = hose; | ||
34 | hose_tail = &hose->next; | ||
35 | |||
36 | return hose; | ||
37 | } | ||
38 | |||
39 | struct resource * __init | ||
40 | alloc_resource(void) | ||
41 | { | ||
42 | struct resource *res; | ||
43 | |||
44 | res = alloc_bootmem(sizeof(*res)); | ||
45 | |||
46 | return res; | ||
47 | } | ||
48 | |||
49 | asmlinkage long | ||
50 | sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) | ||
51 | { | ||
52 | struct pci_controller *hose; | ||
53 | |||
54 | /* from hose or from bus.devfn */ | ||
55 | if (which & IOBASE_FROM_HOSE) { | ||
56 | for (hose = hose_head; hose; hose = hose->next) | ||
57 | if (hose->index == bus) | ||
58 | break; | ||
59 | if (!hose) | ||
60 | return -ENODEV; | ||
61 | } else { | ||
62 | /* Special hook for ISA access. */ | ||
63 | if (bus == 0 && dfn == 0) | ||
64 | hose = pci_isa_hose; | ||
65 | else | ||
66 | return -ENODEV; | ||
67 | } | ||
68 | |||
69 | switch (which & ~IOBASE_FROM_HOSE) { | ||
70 | case IOBASE_HOSE: | ||
71 | return hose->index; | ||
72 | case IOBASE_SPARSE_MEM: | ||
73 | return hose->sparse_mem_base; | ||
74 | case IOBASE_DENSE_MEM: | ||
75 | return hose->dense_mem_base; | ||
76 | case IOBASE_SPARSE_IO: | ||
77 | return hose->sparse_io_base; | ||
78 | case IOBASE_DENSE_IO: | ||
79 | return hose->dense_io_base; | ||
80 | case IOBASE_ROOT_BUS: | ||
81 | return hose->bus->number; | ||
82 | } | ||
83 | |||
84 | return -EOPNOTSUPP; | ||
85 | } | ||
86 | |||
87 | asmlinkage long | ||
88 | sys_pciconfig_read(unsigned long bus, unsigned long dfn, | ||
89 | unsigned long off, unsigned long len, void *buf) | ||
90 | { | ||
91 | if (!capable(CAP_SYS_ADMIN)) | ||
92 | return -EPERM; | ||
93 | else | ||
94 | return -ENODEV; | ||
95 | } | ||
96 | |||
97 | asmlinkage long | ||
98 | sys_pciconfig_write(unsigned long bus, unsigned long dfn, | ||
99 | unsigned long off, unsigned long len, void *buf) | ||
100 | { | ||
101 | if (!capable(CAP_SYS_ADMIN)) | ||
102 | return -EPERM; | ||
103 | else | ||
104 | return -ENODEV; | ||
105 | } | ||
106 | |||
107 | /* Stubs for the routines in pci_iommu.c: */ | ||
108 | |||
109 | void * | ||
110 | pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | ||
111 | { | ||
112 | return NULL; | ||
113 | } | ||
114 | |||
115 | void | ||
116 | pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, | ||
117 | dma_addr_t dma_addr) | ||
118 | { | ||
119 | } | ||
120 | |||
121 | dma_addr_t | ||
122 | pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, | ||
123 | int direction) | ||
124 | { | ||
125 | return (dma_addr_t) 0; | ||
126 | } | ||
127 | |||
128 | void | ||
129 | pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, | ||
130 | int direction) | ||
131 | { | ||
132 | } | ||
133 | |||
134 | int | ||
135 | pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | ||
136 | int direction) | ||
137 | { | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | void | ||
142 | pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | ||
143 | int direction) | ||
144 | { | ||
145 | } | ||
146 | |||
147 | int | ||
148 | pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask) | ||
149 | { | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | /* Generic DMA mapping functions: */ | ||
154 | |||
155 | void * | ||
156 | dma_alloc_coherent(struct device *dev, size_t size, | ||
157 | dma_addr_t *dma_handle, int gfp) | ||
158 | { | ||
159 | void *ret; | ||
160 | |||
161 | if (!dev || *dev->dma_mask >= 0xffffffffUL) | ||
162 | gfp &= ~GFP_DMA; | ||
163 | ret = (void *)__get_free_pages(gfp, get_order(size)); | ||
164 | if (ret) { | ||
165 | memset(ret, 0, size); | ||
166 | *dma_handle = virt_to_bus(ret); | ||
167 | } | ||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
172 | |||
173 | int | ||
174 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
175 | enum dma_data_direction direction) | ||
176 | { | ||
177 | int i; | ||
178 | |||
179 | for (i = 0; i < nents; i++ ) { | ||
180 | void *va; | ||
181 | |||
182 | BUG_ON(!sg[i].page); | ||
183 | va = page_address(sg[i].page) + sg[i].offset; | ||
184 | sg_dma_address(sg + i) = (dma_addr_t)virt_to_bus(va); | ||
185 | sg_dma_len(sg + i) = sg[i].length; | ||
186 | } | ||
187 | |||
188 | return nents; | ||
189 | } | ||
190 | |||
191 | EXPORT_SYMBOL(dma_map_sg); | ||
192 | |||
193 | int | ||
194 | dma_set_mask(struct device *dev, u64 mask) | ||
195 | { | ||
196 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
197 | return -EIO; | ||
198 | |||
199 | *dev->dma_mask = mask; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | ||
205 | { | ||
206 | return NULL; | ||
207 | } | ||
208 | |||
209 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) | ||
210 | { | ||
211 | } | ||
212 | |||
213 | EXPORT_SYMBOL(pci_iomap); | ||
214 | EXPORT_SYMBOL(pci_iounmap); | ||
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c new file mode 100644 index 000000000000..1f36bbd0ed5d --- /dev/null +++ b/arch/alpha/kernel/pci.c | |||
@@ -0,0 +1,561 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/pci.c | ||
3 | * | ||
4 | * Extruded from code written by | ||
5 | * Dave Rusling (david.rusling@reo.mts.dec.com) | ||
6 | * David Mosberger (davidm@cs.arizona.edu) | ||
7 | */ | ||
8 | |||
9 | /* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */ | ||
10 | |||
11 | /* | ||
12 | * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru> | ||
13 | * PCI-PCI bridges cleanup | ||
14 | */ | ||
15 | #include <linux/config.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/ioport.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/cache.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <asm/machvec.h> | ||
26 | |||
27 | #include "proto.h" | ||
28 | #include "pci_impl.h" | ||
29 | |||
30 | |||
31 | /* | ||
32 | * Some string constants used by the various core logics. | ||
33 | */ | ||
34 | |||
35 | const char *const pci_io_names[] = { | ||
36 | "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3", | ||
37 | "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7" | ||
38 | }; | ||
39 | |||
40 | const char *const pci_mem_names[] = { | ||
41 | "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3", | ||
42 | "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7" | ||
43 | }; | ||
44 | |||
45 | const char pci_hae0_name[] = "HAE0"; | ||
46 | |||
47 | /* Indicate whether we respect the PCI setup left by console. */ | ||
48 | /* | ||
49 | * Make this long-lived so that we know when shutting down | ||
50 | * whether we probed only or not. | ||
51 | */ | ||
52 | int pci_probe_only; | ||
53 | |||
54 | /* | ||
55 | * The PCI controller list. | ||
56 | */ | ||
57 | |||
58 | struct pci_controller *hose_head, **hose_tail = &hose_head; | ||
59 | struct pci_controller *pci_isa_hose; | ||
60 | |||
61 | /* | ||
62 | * Quirks. | ||
63 | */ | ||
64 | |||
65 | static void __init | ||
66 | quirk_isa_bridge(struct pci_dev *dev) | ||
67 | { | ||
68 | dev->class = PCI_CLASS_BRIDGE_ISA << 8; | ||
69 | } | ||
70 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); | ||
71 | |||
72 | static void __init | ||
73 | quirk_cypress(struct pci_dev *dev) | ||
74 | { | ||
75 | /* The Notorious Cy82C693 chip. */ | ||
76 | |||
77 | /* The Cypress IDE controller doesn't support native mode, but it | ||
78 | has programmable addresses of IDE command/control registers. | ||
79 | This violates PCI specifications, confuses the IDE subsystem and | ||
80 | causes resource conflicts between the primary HD_CMD register and | ||
81 | the floppy controller. Ugh. Fix that. */ | ||
82 | if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) { | ||
83 | dev->resource[0].flags = 0; | ||
84 | dev->resource[1].flags = 0; | ||
85 | } | ||
86 | |||
87 | /* The Cypress bridge responds on the PCI bus in the address range | ||
88 | 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no | ||
89 | way to turn this off. The bridge also supports several extended | ||
90 | BIOS ranges (disabled after power-up), and some consoles do turn | ||
91 | them on. So if we use a large direct-map window, or a large SG | ||
92 | window, we must avoid the entire 0xfff00000-0xffffffff region. */ | ||
93 | else if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) { | ||
94 | if (__direct_map_base + __direct_map_size >= 0xfff00000UL) | ||
95 | __direct_map_size = 0xfff00000UL - __direct_map_base; | ||
96 | else { | ||
97 | struct pci_controller *hose = dev->sysdata; | ||
98 | struct pci_iommu_arena *pci = hose->sg_pci; | ||
99 | if (pci && pci->dma_base + pci->size >= 0xfff00000UL) | ||
100 | pci->size = 0xfff00000UL - pci->dma_base; | ||
101 | } | ||
102 | } | ||
103 | } | ||
104 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress); | ||
105 | |||
106 | /* Called for each device after PCI setup is done. */ | ||
107 | static void __init | ||
108 | pcibios_fixup_final(struct pci_dev *dev) | ||
109 | { | ||
110 | unsigned int class = dev->class >> 8; | ||
111 | |||
112 | if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) { | ||
113 | dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1; | ||
114 | isa_bridge = dev; | ||
115 | } | ||
116 | } | ||
117 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); | ||
118 | |||
119 | /* Just declaring that the power-of-ten prefixes are actually the | ||
120 | power-of-two ones doesn't make it true :) */ | ||
121 | #define KB 1024 | ||
122 | #define MB (1024*KB) | ||
123 | #define GB (1024*MB) | ||
124 | |||
125 | void | ||
126 | pcibios_align_resource(void *data, struct resource *res, | ||
127 | unsigned long size, unsigned long align) | ||
128 | { | ||
129 | struct pci_dev *dev = data; | ||
130 | struct pci_controller *hose = dev->sysdata; | ||
131 | unsigned long alignto; | ||
132 | unsigned long start = res->start; | ||
133 | |||
134 | if (res->flags & IORESOURCE_IO) { | ||
135 | /* Make sure we start at our min on all hoses */ | ||
136 | if (start - hose->io_space->start < PCIBIOS_MIN_IO) | ||
137 | start = PCIBIOS_MIN_IO + hose->io_space->start; | ||
138 | |||
139 | /* | ||
140 | * Put everything into 0x00-0xff region modulo 0x400 | ||
141 | */ | ||
142 | if (start & 0x300) | ||
143 | start = (start + 0x3ff) & ~0x3ff; | ||
144 | } | ||
145 | else if (res->flags & IORESOURCE_MEM) { | ||
146 | /* Make sure we start at our min on all hoses */ | ||
147 | if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) | ||
148 | start = PCIBIOS_MIN_MEM + hose->mem_space->start; | ||
149 | |||
150 | /* | ||
151 | * The following holds at least for the Low Cost | ||
152 | * Alpha implementation of the PCI interface: | ||
153 | * | ||
154 | * In sparse memory address space, the first | ||
155 | * octant (16MB) of every 128MB segment is | ||
156 | * aliased to the very first 16 MB of the | ||
157 | * address space (i.e., it aliases the ISA | ||
158 | * memory address space). Thus, we try to | ||
159 | * avoid allocating PCI devices in that range. | ||
160 | * Can be allocated in 2nd-7th octant only. | ||
161 | * Devices that need more than 112MB of | ||
162 | * address space must be accessed through | ||
163 | * dense memory space only! | ||
164 | */ | ||
165 | |||
166 | /* Align to multiple of size of minimum base. */ | ||
167 | alignto = max(0x1000UL, align); | ||
168 | start = ALIGN(start, alignto); | ||
169 | if (hose->sparse_mem_base && size <= 7 * 16*MB) { | ||
170 | if (((start / (16*MB)) & 0x7) == 0) { | ||
171 | start &= ~(128*MB - 1); | ||
172 | start += 16*MB; | ||
173 | start = ALIGN(start, alignto); | ||
174 | } | ||
175 | if (start/(128*MB) != (start + size - 1)/(128*MB)) { | ||
176 | start &= ~(128*MB - 1); | ||
177 | start += (128 + 16)*MB; | ||
178 | start = ALIGN(start, alignto); | ||
179 | } | ||
180 | } | ||
181 | } | ||
182 | |||
183 | res->start = start; | ||
184 | } | ||
185 | #undef KB | ||
186 | #undef MB | ||
187 | #undef GB | ||
188 | |||
189 | static int __init | ||
190 | pcibios_init(void) | ||
191 | { | ||
192 | if (alpha_mv.init_pci) | ||
193 | alpha_mv.init_pci(); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | subsys_initcall(pcibios_init); | ||
198 | |||
199 | char * __init | ||
200 | pcibios_setup(char *str) | ||
201 | { | ||
202 | return str; | ||
203 | } | ||
204 | |||
205 | #ifdef ALPHA_RESTORE_SRM_SETUP | ||
206 | static struct pdev_srm_saved_conf *srm_saved_configs; | ||
207 | |||
208 | void __init | ||
209 | pdev_save_srm_config(struct pci_dev *dev) | ||
210 | { | ||
211 | struct pdev_srm_saved_conf *tmp; | ||
212 | static int printed = 0; | ||
213 | |||
214 | if (!alpha_using_srm || pci_probe_only) | ||
215 | return; | ||
216 | |||
217 | if (!printed) { | ||
218 | printk(KERN_INFO "pci: enabling save/restore of SRM state\n"); | ||
219 | printed = 1; | ||
220 | } | ||
221 | |||
222 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
223 | if (!tmp) { | ||
224 | printk(KERN_ERR "%s: kmalloc() failed!\n", __FUNCTION__); | ||
225 | return; | ||
226 | } | ||
227 | tmp->next = srm_saved_configs; | ||
228 | tmp->dev = dev; | ||
229 | |||
230 | pci_save_state(dev); | ||
231 | |||
232 | srm_saved_configs = tmp; | ||
233 | } | ||
234 | |||
235 | void | ||
236 | pci_restore_srm_config(void) | ||
237 | { | ||
238 | struct pdev_srm_saved_conf *tmp; | ||
239 | |||
240 | /* No need to restore if probed only. */ | ||
241 | if (pci_probe_only) | ||
242 | return; | ||
243 | |||
244 | /* Restore SRM config. */ | ||
245 | for (tmp = srm_saved_configs; tmp; tmp = tmp->next) { | ||
246 | pci_restore_state(tmp->dev); | ||
247 | } | ||
248 | } | ||
249 | #endif | ||
250 | |||
251 | void __init | ||
252 | pcibios_fixup_resource(struct resource *res, struct resource *root) | ||
253 | { | ||
254 | res->start += root->start; | ||
255 | res->end += root->start; | ||
256 | } | ||
257 | |||
258 | void __init | ||
259 | pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus) | ||
260 | { | ||
261 | /* Update device resources. */ | ||
262 | struct pci_controller *hose = (struct pci_controller *)bus->sysdata; | ||
263 | int i; | ||
264 | |||
265 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
266 | if (!dev->resource[i].start) | ||
267 | continue; | ||
268 | if (dev->resource[i].flags & IORESOURCE_IO) | ||
269 | pcibios_fixup_resource(&dev->resource[i], | ||
270 | hose->io_space); | ||
271 | else if (dev->resource[i].flags & IORESOURCE_MEM) | ||
272 | pcibios_fixup_resource(&dev->resource[i], | ||
273 | hose->mem_space); | ||
274 | } | ||
275 | } | ||
276 | |||
277 | void __init | ||
278 | pcibios_fixup_bus(struct pci_bus *bus) | ||
279 | { | ||
280 | /* Propagate hose info into the subordinate devices. */ | ||
281 | |||
282 | struct pci_controller *hose = bus->sysdata; | ||
283 | struct pci_dev *dev = bus->self; | ||
284 | |||
285 | if (!dev) { | ||
286 | /* Root bus. */ | ||
287 | u32 pci_mem_end; | ||
288 | u32 sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0; | ||
289 | unsigned long end; | ||
290 | |||
291 | bus->resource[0] = hose->io_space; | ||
292 | bus->resource[1] = hose->mem_space; | ||
293 | |||
294 | /* Adjust hose mem_space limit to prevent PCI allocations | ||
295 | in the iommu windows. */ | ||
296 | pci_mem_end = min((u32)__direct_map_base, sg_base) - 1; | ||
297 | end = hose->mem_space->start + pci_mem_end; | ||
298 | if (hose->mem_space->end > end) | ||
299 | hose->mem_space->end = end; | ||
300 | } else if (pci_probe_only && | ||
301 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
302 | pci_read_bridge_bases(bus); | ||
303 | pcibios_fixup_device_resources(dev, bus); | ||
304 | } | ||
305 | |||
306 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
307 | pdev_save_srm_config(dev); | ||
308 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
309 | pcibios_fixup_device_resources(dev, bus); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | void __init | ||
314 | pcibios_update_irq(struct pci_dev *dev, int irq) | ||
315 | { | ||
316 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
317 | } | ||
318 | |||
319 | /* Most Alphas have straight-forward swizzling needs. */ | ||
320 | |||
321 | u8 __init | ||
322 | common_swizzle(struct pci_dev *dev, u8 *pinp) | ||
323 | { | ||
324 | u8 pin = *pinp; | ||
325 | |||
326 | while (dev->bus->parent) { | ||
327 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); | ||
328 | /* Move up the chain of bridges. */ | ||
329 | dev = dev->bus->self; | ||
330 | } | ||
331 | *pinp = pin; | ||
332 | |||
333 | /* The slot is the slot of the last bridge. */ | ||
334 | return PCI_SLOT(dev->devfn); | ||
335 | } | ||
336 | |||
337 | void __devinit | ||
338 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | ||
339 | struct resource *res) | ||
340 | { | ||
341 | struct pci_controller *hose = (struct pci_controller *)dev->sysdata; | ||
342 | unsigned long offset = 0; | ||
343 | |||
344 | if (res->flags & IORESOURCE_IO) | ||
345 | offset = hose->io_space->start; | ||
346 | else if (res->flags & IORESOURCE_MEM) | ||
347 | offset = hose->mem_space->start; | ||
348 | |||
349 | region->start = res->start - offset; | ||
350 | region->end = res->end - offset; | ||
351 | } | ||
352 | |||
353 | #ifdef CONFIG_HOTPLUG | ||
354 | EXPORT_SYMBOL(pcibios_resource_to_bus); | ||
355 | #endif | ||
356 | |||
357 | int | ||
358 | pcibios_enable_device(struct pci_dev *dev, int mask) | ||
359 | { | ||
360 | u16 cmd, oldcmd; | ||
361 | int i; | ||
362 | |||
363 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
364 | oldcmd = cmd; | ||
365 | |||
366 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
367 | struct resource *res = &dev->resource[i]; | ||
368 | |||
369 | if (res->flags & IORESOURCE_IO) | ||
370 | cmd |= PCI_COMMAND_IO; | ||
371 | else if (res->flags & IORESOURCE_MEM) | ||
372 | cmd |= PCI_COMMAND_MEMORY; | ||
373 | } | ||
374 | |||
375 | if (cmd != oldcmd) { | ||
376 | printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", | ||
377 | pci_name(dev), cmd); | ||
378 | /* Enable the appropriate bits in the PCI command register. */ | ||
379 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
380 | } | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * If we set up a device for bus mastering, we need to check the latency | ||
386 | * timer as certain firmware forgets to set it properly, as seen | ||
387 | * on SX164 and LX164 with SRM. | ||
388 | */ | ||
389 | void | ||
390 | pcibios_set_master(struct pci_dev *dev) | ||
391 | { | ||
392 | u8 lat; | ||
393 | pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); | ||
394 | if (lat >= 16) return; | ||
395 | printk("PCI: Setting latency timer of device %s to 64\n", | ||
396 | pci_name(dev)); | ||
397 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); | ||
398 | } | ||
399 | |||
400 | static void __init | ||
401 | pcibios_claim_one_bus(struct pci_bus *b) | ||
402 | { | ||
403 | struct pci_dev *dev; | ||
404 | struct pci_bus *child_bus; | ||
405 | |||
406 | list_for_each_entry(dev, &b->devices, bus_list) { | ||
407 | int i; | ||
408 | |||
409 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
410 | struct resource *r = &dev->resource[i]; | ||
411 | |||
412 | if (r->parent || !r->start || !r->flags) | ||
413 | continue; | ||
414 | pci_claim_resource(dev, i); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | list_for_each_entry(child_bus, &b->children, node) | ||
419 | pcibios_claim_one_bus(child_bus); | ||
420 | } | ||
421 | |||
422 | static void __init | ||
423 | pcibios_claim_console_setup(void) | ||
424 | { | ||
425 | struct pci_bus *b; | ||
426 | |||
427 | list_for_each_entry(b, &pci_root_buses, node) | ||
428 | pcibios_claim_one_bus(b); | ||
429 | } | ||
430 | |||
431 | void __init | ||
432 | common_init_pci(void) | ||
433 | { | ||
434 | struct pci_controller *hose; | ||
435 | struct pci_bus *bus; | ||
436 | int next_busno; | ||
437 | int need_domain_info = 0; | ||
438 | |||
439 | /* Scan all of the recorded PCI controllers. */ | ||
440 | for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { | ||
441 | bus = pci_scan_bus(next_busno, alpha_mv.pci_ops, hose); | ||
442 | hose->bus = bus; | ||
443 | hose->need_domain_info = need_domain_info; | ||
444 | next_busno = bus->subordinate + 1; | ||
445 | /* Don't allow 8-bit bus number overflow inside the hose - | ||
446 | reserve some space for bridges. */ | ||
447 | if (next_busno > 224) { | ||
448 | next_busno = 0; | ||
449 | need_domain_info = 1; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | if (pci_probe_only) | ||
454 | pcibios_claim_console_setup(); | ||
455 | |||
456 | pci_assign_unassigned_resources(); | ||
457 | pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); | ||
458 | } | ||
459 | |||
460 | |||
461 | struct pci_controller * __init | ||
462 | alloc_pci_controller(void) | ||
463 | { | ||
464 | struct pci_controller *hose; | ||
465 | |||
466 | hose = alloc_bootmem(sizeof(*hose)); | ||
467 | |||
468 | *hose_tail = hose; | ||
469 | hose_tail = &hose->next; | ||
470 | |||
471 | return hose; | ||
472 | } | ||
473 | |||
474 | struct resource * __init | ||
475 | alloc_resource(void) | ||
476 | { | ||
477 | struct resource *res; | ||
478 | |||
479 | res = alloc_bootmem(sizeof(*res)); | ||
480 | |||
481 | return res; | ||
482 | } | ||
483 | |||
484 | |||
485 | /* Provide information on locations of various I/O regions in physical | ||
486 | memory. Do this on a per-card basis so that we choose the right hose. */ | ||
487 | |||
488 | asmlinkage long | ||
489 | sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) | ||
490 | { | ||
491 | struct pci_controller *hose; | ||
492 | struct pci_dev *dev; | ||
493 | |||
494 | /* from hose or from bus.devfn */ | ||
495 | if (which & IOBASE_FROM_HOSE) { | ||
496 | for(hose = hose_head; hose; hose = hose->next) | ||
497 | if (hose->index == bus) break; | ||
498 | if (!hose) return -ENODEV; | ||
499 | } else { | ||
500 | /* Special hook for ISA access. */ | ||
501 | if (bus == 0 && dfn == 0) { | ||
502 | hose = pci_isa_hose; | ||
503 | } else { | ||
504 | dev = pci_find_slot(bus, dfn); | ||
505 | if (!dev) | ||
506 | return -ENODEV; | ||
507 | hose = dev->sysdata; | ||
508 | } | ||
509 | } | ||
510 | |||
511 | switch (which & ~IOBASE_FROM_HOSE) { | ||
512 | case IOBASE_HOSE: | ||
513 | return hose->index; | ||
514 | case IOBASE_SPARSE_MEM: | ||
515 | return hose->sparse_mem_base; | ||
516 | case IOBASE_DENSE_MEM: | ||
517 | return hose->dense_mem_base; | ||
518 | case IOBASE_SPARSE_IO: | ||
519 | return hose->sparse_io_base; | ||
520 | case IOBASE_DENSE_IO: | ||
521 | return hose->dense_io_base; | ||
522 | case IOBASE_ROOT_BUS: | ||
523 | return hose->bus->number; | ||
524 | } | ||
525 | |||
526 | return -EOPNOTSUPP; | ||
527 | } | ||
528 | |||
529 | /* Create an __iomem token from a PCI BAR. Copied from lib/iomap.c with | ||
530 | no changes, since we don't want the other things in that object file. */ | ||
531 | |||
532 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | ||
533 | { | ||
534 | unsigned long start = pci_resource_start(dev, bar); | ||
535 | unsigned long len = pci_resource_len(dev, bar); | ||
536 | unsigned long flags = pci_resource_flags(dev, bar); | ||
537 | |||
538 | if (!len || !start) | ||
539 | return NULL; | ||
540 | if (maxlen && len > maxlen) | ||
541 | len = maxlen; | ||
542 | if (flags & IORESOURCE_IO) | ||
543 | return ioport_map(start, len); | ||
544 | if (flags & IORESOURCE_MEM) { | ||
545 | /* Not checking IORESOURCE_CACHEABLE because alpha does | ||
546 | not distinguish between ioremap and ioremap_nocache. */ | ||
547 | return ioremap(start, len); | ||
548 | } | ||
549 | return NULL; | ||
550 | } | ||
551 | |||
552 | /* Destroy that token. Not copied from lib/iomap.c. */ | ||
553 | |||
554 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) | ||
555 | { | ||
556 | if (__is_mmio(addr)) | ||
557 | iounmap(addr); | ||
558 | } | ||
559 | |||
560 | EXPORT_SYMBOL(pci_iomap); | ||
561 | EXPORT_SYMBOL(pci_iounmap); | ||
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h new file mode 100644 index 000000000000..f8b74995a002 --- /dev/null +++ b/arch/alpha/kernel/pci_impl.h | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/pci_impl.h | ||
3 | * | ||
4 | * This file contains declarations and inline functions for interfacing | ||
5 | * with the PCI initialization routines. | ||
6 | */ | ||
7 | |||
8 | struct pci_dev; | ||
9 | struct pci_controller; | ||
10 | struct pci_iommu_arena; | ||
11 | |||
12 | /* | ||
13 | * We can't just blindly use 64K for machines with EISA busses; they | ||
14 | * may also have PCI-PCI bridges present, and then we'd configure the | ||
15 | * bridge incorrectly. | ||
16 | * | ||
17 | * Also, we start at 0x8000 or 0x9000, in hopes to get all devices' | ||
18 | * IO space areas allocated *before* 0xC000; this is because certain | ||
19 | * BIOSes (Millennium for one) use PCI Config space "mechanism #2" | ||
20 | * accesses to probe the bus. If a device's registers appear at 0xC000, | ||
21 | * it may see an INx/OUTx at that address during BIOS emulation of the | ||
22 | * VGA BIOS, and some cards, notably Adaptec 2940UW, take mortal offense. | ||
23 | */ | ||
24 | |||
25 | #define EISA_DEFAULT_IO_BASE 0x9000 /* start above 8th slot */ | ||
26 | #define DEFAULT_IO_BASE 0x8000 /* start at 8th slot */ | ||
27 | |||
28 | /* | ||
29 | * We try to make the DEFAULT_MEM_BASE addresses *always* have more than | ||
30 | * a single bit set. This is so that devices like the broken Myrinet card | ||
31 | * will always have a PCI memory address that will never match a IDSEL | ||
32 | * address in PCI Config space, which can cause problems with early rev cards. | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * An XL is AVANTI (APECS) family, *but* it has only 27 bits of ISA address | ||
37 | * that get passed through the PCI<->ISA bridge chip. Although this causes | ||
38 | * us to set the PCI->Mem window bases lower than normal, we still allocate | ||
39 | * PCI bus devices' memory addresses *below* the low DMA mapping window, | ||
40 | * and hope they fit below 64Mb (to avoid conflicts), and so that they can | ||
41 | * be accessed via SPARSE space. | ||
42 | * | ||
43 | * We accept the risk that a broken Myrinet card will be put into a true XL | ||
44 | * and thus can more easily run into the problem described below. | ||
45 | */ | ||
46 | #define XL_DEFAULT_MEM_BASE ((16+2)*1024*1024) /* 16M to 64M-1 is avail */ | ||
47 | |||
48 | /* | ||
49 | * APECS and LCA have only 34 bits for physical addresses, thus limiting PCI | ||
50 | * bus memory addresses for SPARSE access to be less than 128Mb. | ||
51 | */ | ||
52 | #define APECS_AND_LCA_DEFAULT_MEM_BASE ((16+2)*1024*1024) | ||
53 | |||
54 | /* | ||
55 | * Because MCPCIA and T2 core logic support more bits for | ||
56 | * physical addresses, they should allow an expanded range of SPARSE | ||
57 | * memory addresses. However, we do not use them all, in order to | ||
58 | * avoid the HAE manipulation that would be needed. | ||
59 | */ | ||
60 | #define MCPCIA_DEFAULT_MEM_BASE ((32+2)*1024*1024) | ||
61 | #define T2_DEFAULT_MEM_BASE ((16+1)*1024*1024) | ||
62 | |||
63 | /* | ||
64 | * Because CIA and PYXIS have more bits for physical addresses, | ||
65 | * they support an expanded range of SPARSE memory addresses. | ||
66 | */ | ||
67 | #define DEFAULT_MEM_BASE ((128+16)*1024*1024) | ||
68 | |||
69 | /* ??? Experimenting with no HAE for CIA. */ | ||
70 | #define CIA_DEFAULT_MEM_BASE ((32+2)*1024*1024) | ||
71 | |||
72 | #define IRONGATE_DEFAULT_MEM_BASE ((256*8-16)*1024*1024) | ||
73 | |||
74 | #define DEFAULT_AGP_APER_SIZE (64*1024*1024) | ||
75 | |||
76 | /* | ||
77 | * A small note about bridges and interrupts. The DECchip 21050 (and | ||
78 | * later) adheres to the PCI-PCI bridge specification. This says that | ||
79 | * the interrupts on the other side of a bridge are swizzled in the | ||
80 | * following manner: | ||
81 | * | ||
82 | * Dev Interrupt Interrupt | ||
83 | * Pin on Pin on | ||
84 | * Device Connector | ||
85 | * | ||
86 | * 4 A A | ||
87 | * B B | ||
88 | * C C | ||
89 | * D D | ||
90 | * | ||
91 | * 5 A B | ||
92 | * B C | ||
93 | * C D | ||
94 | * D A | ||
95 | * | ||
96 | * 6 A C | ||
97 | * B D | ||
98 | * C A | ||
99 | * D B | ||
100 | * | ||
101 | * 7 A D | ||
102 | * B A | ||
103 | * C B | ||
104 | * D C | ||
105 | * | ||
106 | * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A. | ||
107 | * Thus, each swizzle is ((pin-1) + (device#-4)) % 4 | ||
108 | * | ||
109 | * The following code swizzles for exactly one bridge. The routine | ||
110 | * common_swizzle below handles multiple bridges. But there are a | ||
111 | * couple boards that do strange things, so we define this here. | ||
112 | */ | ||
113 | |||
114 | static inline u8 bridge_swizzle(u8 pin, u8 slot) | ||
115 | { | ||
116 | return (((pin-1) + slot) % 4) + 1; | ||
117 | } | ||
118 | |||
119 | |||
120 | /* The following macro is used to implement the table-based irq mapping | ||
121 | function for all single-bus Alphas. */ | ||
122 | |||
123 | #define COMMON_TABLE_LOOKUP \ | ||
124 | ({ long _ctl_ = -1; \ | ||
125 | if (slot >= min_idsel && slot <= max_idsel && pin < irqs_per_slot) \ | ||
126 | _ctl_ = irq_tab[slot - min_idsel][pin]; \ | ||
127 | _ctl_; }) | ||
128 | |||
129 | |||
130 | /* A PCI IOMMU allocation arena. There are typically two of these | ||
131 | regions per bus. */ | ||
132 | /* ??? The 8400 has a 32-byte pte entry, and the entire table apparently | ||
133 | lives directly on the host bridge (no tlb?). We don't support this | ||
134 | machine, but if we ever did, we'd need to parameterize all this quite | ||
135 | a bit further. Probably with per-bus operation tables. */ | ||
136 | |||
137 | struct pci_iommu_arena | ||
138 | { | ||
139 | spinlock_t lock; | ||
140 | struct pci_controller *hose; | ||
141 | #define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */ | ||
142 | #define IOMMU_RESERVED_PTE 0xface | ||
143 | unsigned long *ptes; | ||
144 | dma_addr_t dma_base; | ||
145 | unsigned int size; | ||
146 | unsigned int next_entry; | ||
147 | unsigned int align_entry; | ||
148 | }; | ||
149 | |||
150 | #if defined(CONFIG_ALPHA_SRM) && \ | ||
151 | (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA)) | ||
152 | # define NEED_SRM_SAVE_RESTORE | ||
153 | #else | ||
154 | # undef NEED_SRM_SAVE_RESTORE | ||
155 | #endif | ||
156 | |||
157 | #if defined(CONFIG_ALPHA_GENERIC) || defined(NEED_SRM_SAVE_RESTORE) | ||
158 | # define ALPHA_RESTORE_SRM_SETUP | ||
159 | #else | ||
160 | # undef ALPHA_RESTORE_SRM_SETUP | ||
161 | #endif | ||
162 | |||
163 | #ifdef ALPHA_RESTORE_SRM_SETUP | ||
164 | /* Store PCI device configuration left by SRM here. */ | ||
165 | struct pdev_srm_saved_conf | ||
166 | { | ||
167 | struct pdev_srm_saved_conf *next; | ||
168 | struct pci_dev *dev; | ||
169 | }; | ||
170 | |||
171 | extern void pci_restore_srm_config(void); | ||
172 | #else | ||
173 | #define pdev_save_srm_config(dev) do {} while (0) | ||
174 | #define pci_restore_srm_config() do {} while (0) | ||
175 | #endif | ||
176 | |||
177 | /* The hose list. */ | ||
178 | extern struct pci_controller *hose_head, **hose_tail; | ||
179 | extern struct pci_controller *pci_isa_hose; | ||
180 | |||
181 | /* Indicate that we trust the console to configure things properly. */ | ||
182 | extern int pci_probe_only; | ||
183 | |||
184 | extern unsigned long alpha_agpgart_size; | ||
185 | |||
186 | extern void common_init_pci(void); | ||
187 | extern u8 common_swizzle(struct pci_dev *, u8 *); | ||
188 | extern struct pci_controller *alloc_pci_controller(void); | ||
189 | extern struct resource *alloc_resource(void); | ||
190 | |||
191 | extern struct pci_iommu_arena *iommu_arena_new_node(int, | ||
192 | struct pci_controller *, | ||
193 | dma_addr_t, unsigned long, | ||
194 | unsigned long); | ||
195 | extern struct pci_iommu_arena *iommu_arena_new(struct pci_controller *, | ||
196 | dma_addr_t, unsigned long, | ||
197 | unsigned long); | ||
198 | extern const char *const pci_io_names[]; | ||
199 | extern const char *const pci_mem_names[]; | ||
200 | extern const char pci_hae0_name[]; | ||
201 | |||
202 | extern unsigned long size_for_memory(unsigned long max); | ||
203 | |||
204 | extern int iommu_reserve(struct pci_iommu_arena *, long, long); | ||
205 | extern int iommu_release(struct pci_iommu_arena *, long, long); | ||
206 | extern int iommu_bind(struct pci_iommu_arena *, long, long, unsigned long *); | ||
207 | extern int iommu_unbind(struct pci_iommu_arena *, long, long); | ||
208 | |||
209 | |||
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c new file mode 100644 index 000000000000..7cb23f12ecbd --- /dev/null +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -0,0 +1,971 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/pci_iommu.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/pci.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/bootmem.h> | ||
10 | |||
11 | #include <asm/io.h> | ||
12 | #include <asm/hwrpb.h> | ||
13 | |||
14 | #include "proto.h" | ||
15 | #include "pci_impl.h" | ||
16 | |||
17 | |||
18 | #define DEBUG_ALLOC 0 | ||
19 | #if DEBUG_ALLOC > 0 | ||
20 | # define DBGA(args...) printk(KERN_DEBUG args) | ||
21 | #else | ||
22 | # define DBGA(args...) | ||
23 | #endif | ||
24 | #if DEBUG_ALLOC > 1 | ||
25 | # define DBGA2(args...) printk(KERN_DEBUG args) | ||
26 | #else | ||
27 | # define DBGA2(args...) | ||
28 | #endif | ||
29 | |||
30 | #define DEBUG_NODIRECT 0 | ||
31 | #define DEBUG_FORCEDAC 0 | ||
32 | |||
33 | #define ISA_DMA_MASK 0x00ffffff | ||
34 | |||
35 | static inline unsigned long | ||
36 | mk_iommu_pte(unsigned long paddr) | ||
37 | { | ||
38 | return (paddr >> (PAGE_SHIFT-1)) | 1; | ||
39 | } | ||
40 | |||
41 | static inline long | ||
42 | calc_npages(long bytes) | ||
43 | { | ||
44 | return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
45 | } | ||
46 | |||
47 | |||
48 | /* Return the minimum of MAX or the first power of two larger | ||
49 | than main memory. */ | ||
50 | |||
51 | unsigned long | ||
52 | size_for_memory(unsigned long max) | ||
53 | { | ||
54 | unsigned long mem = max_low_pfn << PAGE_SHIFT; | ||
55 | if (mem < max) | ||
56 | max = 1UL << ceil_log2(mem); | ||
57 | return max; | ||
58 | } | ||
59 | |||
60 | struct pci_iommu_arena * | ||
61 | iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, | ||
62 | unsigned long window_size, unsigned long align) | ||
63 | { | ||
64 | unsigned long mem_size; | ||
65 | struct pci_iommu_arena *arena; | ||
66 | |||
67 | mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long)); | ||
68 | |||
69 | /* Note that the TLB lookup logic uses bitwise concatenation, | ||
70 | not addition, so the required arena alignment is based on | ||
71 | the size of the window. Retain the align parameter so that | ||
72 | particular systems can over-align the arena. */ | ||
73 | if (align < mem_size) | ||
74 | align = mem_size; | ||
75 | |||
76 | |||
77 | #ifdef CONFIG_DISCONTIGMEM | ||
78 | |||
79 | if (!NODE_DATA(nid) || | ||
80 | (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid), | ||
81 | sizeof(*arena))))) { | ||
82 | printk("%s: couldn't allocate arena from node %d\n" | ||
83 | " falling back to system-wide allocation\n", | ||
84 | __FUNCTION__, nid); | ||
85 | arena = alloc_bootmem(sizeof(*arena)); | ||
86 | } | ||
87 | |||
88 | if (!NODE_DATA(nid) || | ||
89 | (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), | ||
90 | mem_size, | ||
91 | align, | ||
92 | 0)))) { | ||
93 | printk("%s: couldn't allocate arena ptes from node %d\n" | ||
94 | " falling back to system-wide allocation\n", | ||
95 | __FUNCTION__, nid); | ||
96 | arena->ptes = __alloc_bootmem(mem_size, align, 0); | ||
97 | } | ||
98 | |||
99 | #else /* CONFIG_DISCONTIGMEM */ | ||
100 | |||
101 | arena = alloc_bootmem(sizeof(*arena)); | ||
102 | arena->ptes = __alloc_bootmem(mem_size, align, 0); | ||
103 | |||
104 | #endif /* CONFIG_DISCONTIGMEM */ | ||
105 | |||
106 | spin_lock_init(&arena->lock); | ||
107 | arena->hose = hose; | ||
108 | arena->dma_base = base; | ||
109 | arena->size = window_size; | ||
110 | arena->next_entry = 0; | ||
111 | |||
112 | /* Align allocations to a multiple of a page size. Not needed | ||
113 | unless there are chip bugs. */ | ||
114 | arena->align_entry = 1; | ||
115 | |||
116 | return arena; | ||
117 | } | ||
118 | |||
119 | struct pci_iommu_arena * | ||
120 | iommu_arena_new(struct pci_controller *hose, dma_addr_t base, | ||
121 | unsigned long window_size, unsigned long align) | ||
122 | { | ||
123 | return iommu_arena_new_node(0, hose, base, window_size, align); | ||
124 | } | ||
125 | |||
126 | /* Must be called with the arena lock held */ | ||
127 | static long | ||
128 | iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) | ||
129 | { | ||
130 | unsigned long *ptes; | ||
131 | long i, p, nent; | ||
132 | |||
133 | /* Search forward for the first mask-aligned sequence of N free ptes */ | ||
134 | ptes = arena->ptes; | ||
135 | nent = arena->size >> PAGE_SHIFT; | ||
136 | p = (arena->next_entry + mask) & ~mask; | ||
137 | i = 0; | ||
138 | while (i < n && p+i < nent) { | ||
139 | if (ptes[p+i]) | ||
140 | p = (p + i + 1 + mask) & ~mask, i = 0; | ||
141 | else | ||
142 | i = i + 1; | ||
143 | } | ||
144 | |||
145 | if (i < n) { | ||
146 | /* Reached the end. Flush the TLB and restart the | ||
147 | search from the beginning. */ | ||
148 | alpha_mv.mv_pci_tbi(arena->hose, 0, -1); | ||
149 | |||
150 | p = 0, i = 0; | ||
151 | while (i < n && p+i < nent) { | ||
152 | if (ptes[p+i]) | ||
153 | p = (p + i + 1 + mask) & ~mask, i = 0; | ||
154 | else | ||
155 | i = i + 1; | ||
156 | } | ||
157 | |||
158 | if (i < n) | ||
159 | return -1; | ||
160 | } | ||
161 | |||
162 | /* Success. It's the responsibility of the caller to mark them | ||
163 | in use before releasing the lock */ | ||
164 | return p; | ||
165 | } | ||
166 | |||
167 | static long | ||
168 | iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) | ||
169 | { | ||
170 | unsigned long flags; | ||
171 | unsigned long *ptes; | ||
172 | long i, p, mask; | ||
173 | |||
174 | spin_lock_irqsave(&arena->lock, flags); | ||
175 | |||
176 | /* Search for N empty ptes */ | ||
177 | ptes = arena->ptes; | ||
178 | mask = max(align, arena->align_entry) - 1; | ||
179 | p = iommu_arena_find_pages(arena, n, mask); | ||
180 | if (p < 0) { | ||
181 | spin_unlock_irqrestore(&arena->lock, flags); | ||
182 | return -1; | ||
183 | } | ||
184 | |||
185 | /* Success. Mark them all in use, ie not zero and invalid | ||
186 | for the iommu tlb that could load them from under us. | ||
187 | The chip specific bits will fill this in with something | ||
188 | kosher when we return. */ | ||
189 | for (i = 0; i < n; ++i) | ||
190 | ptes[p+i] = IOMMU_INVALID_PTE; | ||
191 | |||
192 | arena->next_entry = p + n; | ||
193 | spin_unlock_irqrestore(&arena->lock, flags); | ||
194 | |||
195 | return p; | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) | ||
200 | { | ||
201 | unsigned long *p; | ||
202 | long i; | ||
203 | |||
204 | p = arena->ptes + ofs; | ||
205 | for (i = 0; i < n; ++i) | ||
206 | p[i] = 0; | ||
207 | } | ||
208 | |||
209 | /* Map a single buffer of the indicated size for PCI DMA in streaming | ||
210 | mode. The 32-bit PCI bus mastering address to use is returned. | ||
211 | Once the device is given the dma address, the device owns this memory | ||
212 | until either pci_unmap_single or pci_dma_sync_single is performed. */ | ||
213 | |||
214 | static dma_addr_t | ||
215 | pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | ||
216 | int dac_allowed) | ||
217 | { | ||
218 | struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; | ||
219 | dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | ||
220 | struct pci_iommu_arena *arena; | ||
221 | long npages, dma_ofs, i; | ||
222 | unsigned long paddr; | ||
223 | dma_addr_t ret; | ||
224 | unsigned int align = 0; | ||
225 | |||
226 | paddr = __pa(cpu_addr); | ||
227 | |||
228 | #if !DEBUG_NODIRECT | ||
229 | /* First check to see if we can use the direct map window. */ | ||
230 | if (paddr + size + __direct_map_base - 1 <= max_dma | ||
231 | && paddr + size <= __direct_map_size) { | ||
232 | ret = paddr + __direct_map_base; | ||
233 | |||
234 | DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n", | ||
235 | cpu_addr, size, ret, __builtin_return_address(0)); | ||
236 | |||
237 | return ret; | ||
238 | } | ||
239 | #endif | ||
240 | |||
241 | /* Next, use DAC if selected earlier. */ | ||
242 | if (dac_allowed) { | ||
243 | ret = paddr + alpha_mv.pci_dac_offset; | ||
244 | |||
245 | DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n", | ||
246 | cpu_addr, size, ret, __builtin_return_address(0)); | ||
247 | |||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | /* If the machine doesn't define a pci_tbi routine, we have to | ||
252 | assume it doesn't support sg mapping, and, since we tried to | ||
253 | use direct_map above, it now must be considered an error. */ | ||
254 | if (! alpha_mv.mv_pci_tbi) { | ||
255 | static int been_here = 0; /* Only print the message once. */ | ||
256 | if (!been_here) { | ||
257 | printk(KERN_WARNING "pci_map_single: no HW sg\n"); | ||
258 | been_here = 1; | ||
259 | } | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | arena = hose->sg_pci; | ||
264 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | ||
265 | arena = hose->sg_isa; | ||
266 | |||
267 | npages = calc_npages((paddr & ~PAGE_MASK) + size); | ||
268 | |||
269 | /* Force allocation to 64KB boundary for ISA bridges. */ | ||
270 | if (pdev && pdev == isa_bridge) | ||
271 | align = 8; | ||
272 | dma_ofs = iommu_arena_alloc(arena, npages, align); | ||
273 | if (dma_ofs < 0) { | ||
274 | printk(KERN_WARNING "pci_map_single failed: " | ||
275 | "could not allocate dma page tables\n"); | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | paddr &= PAGE_MASK; | ||
280 | for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) | ||
281 | arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); | ||
282 | |||
283 | ret = arena->dma_base + dma_ofs * PAGE_SIZE; | ||
284 | ret += (unsigned long)cpu_addr & ~PAGE_MASK; | ||
285 | |||
286 | DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n", | ||
287 | cpu_addr, size, npages, ret, __builtin_return_address(0)); | ||
288 | |||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | dma_addr_t | ||
293 | pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir) | ||
294 | { | ||
295 | int dac_allowed; | ||
296 | |||
297 | if (dir == PCI_DMA_NONE) | ||
298 | BUG(); | ||
299 | |||
300 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | ||
301 | return pci_map_single_1(pdev, cpu_addr, size, dac_allowed); | ||
302 | } | ||
303 | |||
304 | dma_addr_t | ||
305 | pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, | ||
306 | size_t size, int dir) | ||
307 | { | ||
308 | int dac_allowed; | ||
309 | |||
310 | if (dir == PCI_DMA_NONE) | ||
311 | BUG(); | ||
312 | |||
313 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | ||
314 | return pci_map_single_1(pdev, (char *)page_address(page) + offset, | ||
315 | size, dac_allowed); | ||
316 | } | ||
317 | |||
318 | /* Unmap a single streaming mode DMA translation. The DMA_ADDR and | ||
319 | SIZE must match what was provided for in a previous pci_map_single | ||
320 | call. All other usages are undefined. After this call, reads by | ||
321 | the cpu to the buffer are guaranteed to see whatever the device | ||
322 | wrote there. */ | ||
323 | |||
324 | void | ||
325 | pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, | ||
326 | int direction) | ||
327 | { | ||
328 | unsigned long flags; | ||
329 | struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; | ||
330 | struct pci_iommu_arena *arena; | ||
331 | long dma_ofs, npages; | ||
332 | |||
333 | if (direction == PCI_DMA_NONE) | ||
334 | BUG(); | ||
335 | |||
336 | if (dma_addr >= __direct_map_base | ||
337 | && dma_addr < __direct_map_base + __direct_map_size) { | ||
338 | /* Nothing to do. */ | ||
339 | |||
340 | DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n", | ||
341 | dma_addr, size, __builtin_return_address(0)); | ||
342 | |||
343 | return; | ||
344 | } | ||
345 | |||
346 | if (dma_addr > 0xffffffff) { | ||
347 | DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n", | ||
348 | dma_addr, size, __builtin_return_address(0)); | ||
349 | return; | ||
350 | } | ||
351 | |||
352 | arena = hose->sg_pci; | ||
353 | if (!arena || dma_addr < arena->dma_base) | ||
354 | arena = hose->sg_isa; | ||
355 | |||
356 | dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; | ||
357 | if (dma_ofs * PAGE_SIZE >= arena->size) { | ||
358 | printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx " | ||
359 | " base %lx size %x\n", dma_addr, arena->dma_base, | ||
360 | arena->size); | ||
361 | return; | ||
362 | BUG(); | ||
363 | } | ||
364 | |||
365 | npages = calc_npages((dma_addr & ~PAGE_MASK) + size); | ||
366 | |||
367 | spin_lock_irqsave(&arena->lock, flags); | ||
368 | |||
369 | iommu_arena_free(arena, dma_ofs, npages); | ||
370 | |||
371 | /* If we're freeing ptes above the `next_entry' pointer (they | ||
372 | may have snuck back into the TLB since the last wrap flush), | ||
373 | we need to flush the TLB before reallocating the latter. */ | ||
374 | if (dma_ofs >= arena->next_entry) | ||
375 | alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); | ||
376 | |||
377 | spin_unlock_irqrestore(&arena->lock, flags); | ||
378 | |||
379 | DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n", | ||
380 | dma_addr, size, npages, __builtin_return_address(0)); | ||
381 | } | ||
382 | |||
383 | void | ||
384 | pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr, | ||
385 | size_t size, int direction) | ||
386 | { | ||
387 | pci_unmap_single(pdev, dma_addr, size, direction); | ||
388 | } | ||
389 | |||
390 | /* Allocate and map kernel buffer using consistent mode DMA for PCI | ||
391 | device. Returns non-NULL cpu-view pointer to the buffer if | ||
392 | successful and sets *DMA_ADDRP to the pci side dma address as well, | ||
393 | else DMA_ADDRP is undefined. */ | ||
394 | |||
395 | void * | ||
396 | pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | ||
397 | { | ||
398 | void *cpu_addr; | ||
399 | long order = get_order(size); | ||
400 | int gfp = GFP_ATOMIC; | ||
401 | |||
402 | try_again: | ||
403 | cpu_addr = (void *)__get_free_pages(gfp, order); | ||
404 | if (! cpu_addr) { | ||
405 | printk(KERN_INFO "pci_alloc_consistent: " | ||
406 | "get_free_pages failed from %p\n", | ||
407 | __builtin_return_address(0)); | ||
408 | /* ??? Really atomic allocation? Otherwise we could play | ||
409 | with vmalloc and sg if we can't find contiguous memory. */ | ||
410 | return NULL; | ||
411 | } | ||
412 | memset(cpu_addr, 0, size); | ||
413 | |||
414 | *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); | ||
415 | if (*dma_addrp == 0) { | ||
416 | free_pages((unsigned long)cpu_addr, order); | ||
417 | if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) | ||
418 | return NULL; | ||
419 | /* The address doesn't fit required mask and we | ||
420 | do not have iommu. Try again with GFP_DMA. */ | ||
421 | gfp |= GFP_DMA; | ||
422 | goto try_again; | ||
423 | } | ||
424 | |||
425 | DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", | ||
426 | size, cpu_addr, *dma_addrp, __builtin_return_address(0)); | ||
427 | |||
428 | return cpu_addr; | ||
429 | } | ||
430 | |||
431 | /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must | ||
432 | be values that were returned from pci_alloc_consistent. SIZE must | ||
433 | be the same as what as passed into pci_alloc_consistent. | ||
434 | References to the memory and mappings associated with CPU_ADDR or | ||
435 | DMA_ADDR past this call are illegal. */ | ||
436 | |||
437 | void | ||
438 | pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, | ||
439 | dma_addr_t dma_addr) | ||
440 | { | ||
441 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); | ||
442 | free_pages((unsigned long)cpu_addr, get_order(size)); | ||
443 | |||
444 | DBGA2("pci_free_consistent: [%x,%lx] from %p\n", | ||
445 | dma_addr, size, __builtin_return_address(0)); | ||
446 | } | ||
447 | |||
448 | |||
449 | /* Classify the elements of the scatterlist. Write dma_address | ||
450 | of each element with: | ||
451 | 0 : Followers all physically adjacent. | ||
452 | 1 : Followers all virtually adjacent. | ||
453 | -1 : Not leader, physically adjacent to previous. | ||
454 | -2 : Not leader, virtually adjacent to previous. | ||
455 | Write dma_length of each leader with the combined lengths of | ||
456 | the mergable followers. */ | ||
457 | |||
458 | #define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset) | ||
459 | #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) | ||
460 | |||
461 | static void | ||
462 | sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok) | ||
463 | { | ||
464 | unsigned long next_paddr; | ||
465 | struct scatterlist *leader; | ||
466 | long leader_flag, leader_length; | ||
467 | |||
468 | leader = sg; | ||
469 | leader_flag = 0; | ||
470 | leader_length = leader->length; | ||
471 | next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length; | ||
472 | |||
473 | for (++sg; sg < end; ++sg) { | ||
474 | unsigned long addr, len; | ||
475 | addr = SG_ENT_PHYS_ADDRESS(sg); | ||
476 | len = sg->length; | ||
477 | |||
478 | if (next_paddr == addr) { | ||
479 | sg->dma_address = -1; | ||
480 | leader_length += len; | ||
481 | } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) { | ||
482 | sg->dma_address = -2; | ||
483 | leader_flag = 1; | ||
484 | leader_length += len; | ||
485 | } else { | ||
486 | leader->dma_address = leader_flag; | ||
487 | leader->dma_length = leader_length; | ||
488 | leader = sg; | ||
489 | leader_flag = 0; | ||
490 | leader_length = len; | ||
491 | } | ||
492 | |||
493 | next_paddr = addr + len; | ||
494 | } | ||
495 | |||
496 | leader->dma_address = leader_flag; | ||
497 | leader->dma_length = leader_length; | ||
498 | } | ||
499 | |||
500 | /* Given a scatterlist leader, choose an allocation method and fill | ||
501 | in the blanks. */ | ||
502 | |||
503 | static int | ||
504 | sg_fill(struct scatterlist *leader, struct scatterlist *end, | ||
505 | struct scatterlist *out, struct pci_iommu_arena *arena, | ||
506 | dma_addr_t max_dma, int dac_allowed) | ||
507 | { | ||
508 | unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader); | ||
509 | long size = leader->dma_length; | ||
510 | struct scatterlist *sg; | ||
511 | unsigned long *ptes; | ||
512 | long npages, dma_ofs, i; | ||
513 | |||
514 | #if !DEBUG_NODIRECT | ||
515 | /* If everything is physically contiguous, and the addresses | ||
516 | fall into the direct-map window, use it. */ | ||
517 | if (leader->dma_address == 0 | ||
518 | && paddr + size + __direct_map_base - 1 <= max_dma | ||
519 | && paddr + size <= __direct_map_size) { | ||
520 | out->dma_address = paddr + __direct_map_base; | ||
521 | out->dma_length = size; | ||
522 | |||
523 | DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", | ||
524 | __va(paddr), size, out->dma_address); | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | #endif | ||
529 | |||
530 | /* If physically contiguous and DAC is available, use it. */ | ||
531 | if (leader->dma_address == 0 && dac_allowed) { | ||
532 | out->dma_address = paddr + alpha_mv.pci_dac_offset; | ||
533 | out->dma_length = size; | ||
534 | |||
535 | DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", | ||
536 | __va(paddr), size, out->dma_address); | ||
537 | |||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | /* Otherwise, we'll use the iommu to make the pages virtually | ||
542 | contiguous. */ | ||
543 | |||
544 | paddr &= ~PAGE_MASK; | ||
545 | npages = calc_npages(paddr + size); | ||
546 | dma_ofs = iommu_arena_alloc(arena, npages, 0); | ||
547 | if (dma_ofs < 0) { | ||
548 | /* If we attempted a direct map above but failed, die. */ | ||
549 | if (leader->dma_address == 0) | ||
550 | return -1; | ||
551 | |||
552 | /* Otherwise, break up the remaining virtually contiguous | ||
553 | hunks into individual direct maps and retry. */ | ||
554 | sg_classify(leader, end, 0); | ||
555 | return sg_fill(leader, end, out, arena, max_dma, dac_allowed); | ||
556 | } | ||
557 | |||
558 | out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; | ||
559 | out->dma_length = size; | ||
560 | |||
561 | DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", | ||
562 | __va(paddr), size, out->dma_address, npages); | ||
563 | |||
564 | /* All virtually contiguous. We need to find the length of each | ||
565 | physically contiguous subsegment to fill in the ptes. */ | ||
566 | ptes = &arena->ptes[dma_ofs]; | ||
567 | sg = leader; | ||
568 | do { | ||
569 | #if DEBUG_ALLOC > 0 | ||
570 | struct scatterlist *last_sg = sg; | ||
571 | #endif | ||
572 | |||
573 | size = sg->length; | ||
574 | paddr = SG_ENT_PHYS_ADDRESS(sg); | ||
575 | |||
576 | while (sg+1 < end && (int) sg[1].dma_address == -1) { | ||
577 | size += sg[1].length; | ||
578 | sg++; | ||
579 | } | ||
580 | |||
581 | npages = calc_npages((paddr & ~PAGE_MASK) + size); | ||
582 | |||
583 | paddr &= PAGE_MASK; | ||
584 | for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) | ||
585 | *ptes++ = mk_iommu_pte(paddr); | ||
586 | |||
587 | #if DEBUG_ALLOC > 0 | ||
588 | DBGA(" (%ld) [%p,%x] np %ld\n", | ||
589 | last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), | ||
590 | last_sg->length, npages); | ||
591 | while (++last_sg <= sg) { | ||
592 | DBGA(" (%ld) [%p,%x] cont\n", | ||
593 | last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), | ||
594 | last_sg->length); | ||
595 | } | ||
596 | #endif | ||
597 | } while (++sg < end && (int) sg->dma_address < 0); | ||
598 | |||
599 | return 1; | ||
600 | } | ||
601 | |||
602 | int | ||
603 | pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | ||
604 | int direction) | ||
605 | { | ||
606 | struct scatterlist *start, *end, *out; | ||
607 | struct pci_controller *hose; | ||
608 | struct pci_iommu_arena *arena; | ||
609 | dma_addr_t max_dma; | ||
610 | int dac_allowed; | ||
611 | |||
612 | if (direction == PCI_DMA_NONE) | ||
613 | BUG(); | ||
614 | |||
615 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | ||
616 | |||
617 | /* Fast path single entry scatterlists. */ | ||
618 | if (nents == 1) { | ||
619 | sg->dma_length = sg->length; | ||
620 | sg->dma_address | ||
621 | = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), | ||
622 | sg->length, dac_allowed); | ||
623 | return sg->dma_address != 0; | ||
624 | } | ||
625 | |||
626 | start = sg; | ||
627 | end = sg + nents; | ||
628 | |||
629 | /* First, prepare information about the entries. */ | ||
630 | sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0); | ||
631 | |||
632 | /* Second, figure out where we're going to map things. */ | ||
633 | if (alpha_mv.mv_pci_tbi) { | ||
634 | hose = pdev ? pdev->sysdata : pci_isa_hose; | ||
635 | max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | ||
636 | arena = hose->sg_pci; | ||
637 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | ||
638 | arena = hose->sg_isa; | ||
639 | } else { | ||
640 | max_dma = -1; | ||
641 | arena = NULL; | ||
642 | hose = NULL; | ||
643 | } | ||
644 | |||
645 | /* Third, iterate over the scatterlist leaders and allocate | ||
646 | dma space as needed. */ | ||
647 | for (out = sg; sg < end; ++sg) { | ||
648 | if ((int) sg->dma_address < 0) | ||
649 | continue; | ||
650 | if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0) | ||
651 | goto error; | ||
652 | out++; | ||
653 | } | ||
654 | |||
655 | /* Mark the end of the list for pci_unmap_sg. */ | ||
656 | if (out < end) | ||
657 | out->dma_length = 0; | ||
658 | |||
659 | if (out - start == 0) | ||
660 | printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); | ||
661 | DBGA("pci_map_sg: %ld entries\n", out - start); | ||
662 | |||
663 | return out - start; | ||
664 | |||
665 | error: | ||
666 | printk(KERN_WARNING "pci_map_sg failed: " | ||
667 | "could not allocate dma page tables\n"); | ||
668 | |||
669 | /* Some allocation failed while mapping the scatterlist | ||
670 | entries. Unmap them now. */ | ||
671 | if (out > start) | ||
672 | pci_unmap_sg(pdev, start, out - start, direction); | ||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | /* Unmap a set of streaming mode DMA translations. Again, cpu read | ||
677 | rules concerning calls here are the same as for pci_unmap_single() | ||
678 | above. */ | ||
679 | |||
680 | void | ||
681 | pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | ||
682 | int direction) | ||
683 | { | ||
684 | unsigned long flags; | ||
685 | struct pci_controller *hose; | ||
686 | struct pci_iommu_arena *arena; | ||
687 | struct scatterlist *end; | ||
688 | dma_addr_t max_dma; | ||
689 | dma_addr_t fbeg, fend; | ||
690 | |||
691 | if (direction == PCI_DMA_NONE) | ||
692 | BUG(); | ||
693 | |||
694 | if (! alpha_mv.mv_pci_tbi) | ||
695 | return; | ||
696 | |||
697 | hose = pdev ? pdev->sysdata : pci_isa_hose; | ||
698 | max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | ||
699 | arena = hose->sg_pci; | ||
700 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | ||
701 | arena = hose->sg_isa; | ||
702 | |||
703 | fbeg = -1, fend = 0; | ||
704 | |||
705 | spin_lock_irqsave(&arena->lock, flags); | ||
706 | |||
707 | for (end = sg + nents; sg < end; ++sg) { | ||
708 | dma64_addr_t addr; | ||
709 | size_t size; | ||
710 | long npages, ofs; | ||
711 | dma_addr_t tend; | ||
712 | |||
713 | addr = sg->dma_address; | ||
714 | size = sg->dma_length; | ||
715 | if (!size) | ||
716 | break; | ||
717 | |||
718 | if (addr > 0xffffffff) { | ||
719 | /* It's a DAC address -- nothing to do. */ | ||
720 | DBGA(" (%ld) DAC [%lx,%lx]\n", | ||
721 | sg - end + nents, addr, size); | ||
722 | continue; | ||
723 | } | ||
724 | |||
725 | if (addr >= __direct_map_base | ||
726 | && addr < __direct_map_base + __direct_map_size) { | ||
727 | /* Nothing to do. */ | ||
728 | DBGA(" (%ld) direct [%lx,%lx]\n", | ||
729 | sg - end + nents, addr, size); | ||
730 | continue; | ||
731 | } | ||
732 | |||
733 | DBGA(" (%ld) sg [%lx,%lx]\n", | ||
734 | sg - end + nents, addr, size); | ||
735 | |||
736 | npages = calc_npages((addr & ~PAGE_MASK) + size); | ||
737 | ofs = (addr - arena->dma_base) >> PAGE_SHIFT; | ||
738 | iommu_arena_free(arena, ofs, npages); | ||
739 | |||
740 | tend = addr + size - 1; | ||
741 | if (fbeg > addr) fbeg = addr; | ||
742 | if (fend < tend) fend = tend; | ||
743 | } | ||
744 | |||
745 | /* If we're freeing ptes above the `next_entry' pointer (they | ||
746 | may have snuck back into the TLB since the last wrap flush), | ||
747 | we need to flush the TLB before reallocating the latter. */ | ||
748 | if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) | ||
749 | alpha_mv.mv_pci_tbi(hose, fbeg, fend); | ||
750 | |||
751 | spin_unlock_irqrestore(&arena->lock, flags); | ||
752 | |||
753 | DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); | ||
754 | } | ||
755 | |||
756 | |||
757 | /* Return whether the given PCI device DMA address mask can be | ||
758 | supported properly. */ | ||
759 | |||
760 | int | ||
761 | pci_dma_supported(struct pci_dev *pdev, u64 mask) | ||
762 | { | ||
763 | struct pci_controller *hose; | ||
764 | struct pci_iommu_arena *arena; | ||
765 | |||
766 | /* If there exists a direct map, and the mask fits either | ||
767 | the entire direct mapped space or the total system memory as | ||
768 | shifted by the map base */ | ||
769 | if (__direct_map_size != 0 | ||
770 | && (__direct_map_base + __direct_map_size - 1 <= mask || | ||
771 | __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) | ||
772 | return 1; | ||
773 | |||
774 | /* Check that we have a scatter-gather arena that fits. */ | ||
775 | hose = pdev ? pdev->sysdata : pci_isa_hose; | ||
776 | arena = hose->sg_isa; | ||
777 | if (arena && arena->dma_base + arena->size - 1 <= mask) | ||
778 | return 1; | ||
779 | arena = hose->sg_pci; | ||
780 | if (arena && arena->dma_base + arena->size - 1 <= mask) | ||
781 | return 1; | ||
782 | |||
783 | /* As last resort try ZONE_DMA. */ | ||
784 | if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) | ||
785 | return 1; | ||
786 | |||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | |||
791 | /* | ||
792 | * AGP GART extensions to the IOMMU | ||
793 | */ | ||
794 | int | ||
795 | iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) | ||
796 | { | ||
797 | unsigned long flags; | ||
798 | unsigned long *ptes; | ||
799 | long i, p; | ||
800 | |||
801 | if (!arena) return -EINVAL; | ||
802 | |||
803 | spin_lock_irqsave(&arena->lock, flags); | ||
804 | |||
805 | /* Search for N empty ptes. */ | ||
806 | ptes = arena->ptes; | ||
807 | p = iommu_arena_find_pages(arena, pg_count, align_mask); | ||
808 | if (p < 0) { | ||
809 | spin_unlock_irqrestore(&arena->lock, flags); | ||
810 | return -1; | ||
811 | } | ||
812 | |||
813 | /* Success. Mark them all reserved (ie not zero and invalid) | ||
814 | for the iommu tlb that could load them from under us. | ||
815 | They will be filled in with valid bits by _bind() */ | ||
816 | for (i = 0; i < pg_count; ++i) | ||
817 | ptes[p+i] = IOMMU_RESERVED_PTE; | ||
818 | |||
819 | arena->next_entry = p + pg_count; | ||
820 | spin_unlock_irqrestore(&arena->lock, flags); | ||
821 | |||
822 | return p; | ||
823 | } | ||
824 | |||
825 | int | ||
826 | iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) | ||
827 | { | ||
828 | unsigned long *ptes; | ||
829 | long i; | ||
830 | |||
831 | if (!arena) return -EINVAL; | ||
832 | |||
833 | ptes = arena->ptes; | ||
834 | |||
835 | /* Make sure they're all reserved first... */ | ||
836 | for(i = pg_start; i < pg_start + pg_count; i++) | ||
837 | if (ptes[i] != IOMMU_RESERVED_PTE) | ||
838 | return -EBUSY; | ||
839 | |||
840 | iommu_arena_free(arena, pg_start, pg_count); | ||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | int | ||
845 | iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, | ||
846 | unsigned long *physaddrs) | ||
847 | { | ||
848 | unsigned long flags; | ||
849 | unsigned long *ptes; | ||
850 | long i, j; | ||
851 | |||
852 | if (!arena) return -EINVAL; | ||
853 | |||
854 | spin_lock_irqsave(&arena->lock, flags); | ||
855 | |||
856 | ptes = arena->ptes; | ||
857 | |||
858 | for(j = pg_start; j < pg_start + pg_count; j++) { | ||
859 | if (ptes[j] != IOMMU_RESERVED_PTE) { | ||
860 | spin_unlock_irqrestore(&arena->lock, flags); | ||
861 | return -EBUSY; | ||
862 | } | ||
863 | } | ||
864 | |||
865 | for(i = 0, j = pg_start; i < pg_count; i++, j++) | ||
866 | ptes[j] = mk_iommu_pte(physaddrs[i]); | ||
867 | |||
868 | spin_unlock_irqrestore(&arena->lock, flags); | ||
869 | |||
870 | return 0; | ||
871 | } | ||
872 | |||
873 | int | ||
874 | iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) | ||
875 | { | ||
876 | unsigned long *p; | ||
877 | long i; | ||
878 | |||
879 | if (!arena) return -EINVAL; | ||
880 | |||
881 | p = arena->ptes + pg_start; | ||
882 | for(i = 0; i < pg_count; i++) | ||
883 | p[i] = IOMMU_RESERVED_PTE; | ||
884 | |||
885 | return 0; | ||
886 | } | ||
887 | |||
888 | /* True if the machine supports DAC addressing, and DEV can | ||
889 | make use of it given MASK. */ | ||
890 | |||
891 | int | ||
892 | pci_dac_dma_supported(struct pci_dev *dev, u64 mask) | ||
893 | { | ||
894 | dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; | ||
895 | int ok = 1; | ||
896 | |||
897 | /* If this is not set, the machine doesn't support DAC at all. */ | ||
898 | if (dac_offset == 0) | ||
899 | ok = 0; | ||
900 | |||
901 | /* The device has to be able to address our DAC bit. */ | ||
902 | if ((dac_offset & dev->dma_mask) != dac_offset) | ||
903 | ok = 0; | ||
904 | |||
905 | /* If both conditions above are met, we are fine. */ | ||
906 | DBGA("pci_dac_dma_supported %s from %p\n", | ||
907 | ok ? "yes" : "no", __builtin_return_address(0)); | ||
908 | |||
909 | return ok; | ||
910 | } | ||
911 | |||
912 | dma64_addr_t | ||
913 | pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, | ||
914 | unsigned long offset, int direction) | ||
915 | { | ||
916 | return (alpha_mv.pci_dac_offset | ||
917 | + __pa(page_address(page)) | ||
918 | + (dma64_addr_t) offset); | ||
919 | } | ||
920 | |||
921 | struct page * | ||
922 | pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) | ||
923 | { | ||
924 | unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset; | ||
925 | return virt_to_page(__va(paddr)); | ||
926 | } | ||
927 | |||
928 | unsigned long | ||
929 | pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) | ||
930 | { | ||
931 | return (dma_addr & ~PAGE_MASK); | ||
932 | } | ||
933 | |||
934 | |||
935 | /* Helper for generic DMA-mapping functions. */ | ||
936 | |||
937 | struct pci_dev * | ||
938 | alpha_gendev_to_pci(struct device *dev) | ||
939 | { | ||
940 | if (dev && dev->bus == &pci_bus_type) | ||
941 | return to_pci_dev(dev); | ||
942 | |||
943 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, | ||
944 | BUG() otherwise. */ | ||
945 | BUG_ON(!isa_bridge); | ||
946 | |||
947 | /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA | ||
948 | bridge is bus master then). */ | ||
949 | if (!dev || !dev->dma_mask || !*dev->dma_mask) | ||
950 | return isa_bridge; | ||
951 | |||
952 | /* For EISA bus masters, return isa_bridge (it might have smaller | ||
953 | dma_mask due to wiring limitations). */ | ||
954 | if (*dev->dma_mask >= isa_bridge->dma_mask) | ||
955 | return isa_bridge; | ||
956 | |||
957 | /* This assumes ISA bus master with dma_mask 0xffffff. */ | ||
958 | return NULL; | ||
959 | } | ||
960 | |||
961 | int | ||
962 | dma_set_mask(struct device *dev, u64 mask) | ||
963 | { | ||
964 | if (!dev->dma_mask || | ||
965 | !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) | ||
966 | return -EIO; | ||
967 | |||
968 | *dev->dma_mask = mask; | ||
969 | |||
970 | return 0; | ||
971 | } | ||
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c new file mode 100644 index 000000000000..4933f3ce5833 --- /dev/null +++ b/arch/alpha/kernel/process.c | |||
@@ -0,0 +1,528 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/process.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This file handles the architecture-dependent parts of process handling. | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/stddef.h> | ||
20 | #include <linux/unistd.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/user.h> | ||
24 | #include <linux/a.out.h> | ||
25 | #include <linux/utsname.h> | ||
26 | #include <linux/time.h> | ||
27 | #include <linux/major.h> | ||
28 | #include <linux/stat.h> | ||
29 | #include <linux/mman.h> | ||
30 | #include <linux/elfcore.h> | ||
31 | #include <linux/reboot.h> | ||
32 | #include <linux/tty.h> | ||
33 | #include <linux/console.h> | ||
34 | |||
35 | #include <asm/reg.h> | ||
36 | #include <asm/uaccess.h> | ||
37 | #include <asm/system.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/hwrpb.h> | ||
41 | #include <asm/fpu.h> | ||
42 | |||
43 | #include "proto.h" | ||
44 | #include "pci_impl.h" | ||
45 | |||
46 | void default_idle(void) | ||
47 | { | ||
48 | barrier(); | ||
49 | } | ||
50 | |||
51 | void | ||
52 | cpu_idle(void) | ||
53 | { | ||
54 | while (1) { | ||
55 | void (*idle)(void) = default_idle; | ||
56 | /* FIXME -- EV6 and LCA45 know how to power down | ||
57 | the CPU. */ | ||
58 | |||
59 | while (!need_resched()) | ||
60 | idle(); | ||
61 | schedule(); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | |||
66 | struct halt_info { | ||
67 | int mode; | ||
68 | char *restart_cmd; | ||
69 | }; | ||
70 | |||
71 | static void | ||
72 | common_shutdown_1(void *generic_ptr) | ||
73 | { | ||
74 | struct halt_info *how = (struct halt_info *)generic_ptr; | ||
75 | struct percpu_struct *cpup; | ||
76 | unsigned long *pflags, flags; | ||
77 | int cpuid = smp_processor_id(); | ||
78 | |||
79 | /* No point in taking interrupts anymore. */ | ||
80 | local_irq_disable(); | ||
81 | |||
82 | cpup = (struct percpu_struct *) | ||
83 | ((unsigned long)hwrpb + hwrpb->processor_offset | ||
84 | + hwrpb->processor_size * cpuid); | ||
85 | pflags = &cpup->flags; | ||
86 | flags = *pflags; | ||
87 | |||
88 | /* Clear reason to "default"; clear "bootstrap in progress". */ | ||
89 | flags &= ~0x00ff0001UL; | ||
90 | |||
91 | #ifdef CONFIG_SMP | ||
92 | /* Secondaries halt here. */ | ||
93 | if (cpuid != boot_cpuid) { | ||
94 | flags |= 0x00040000UL; /* "remain halted" */ | ||
95 | *pflags = flags; | ||
96 | clear_bit(cpuid, &cpu_present_mask); | ||
97 | halt(); | ||
98 | } | ||
99 | #endif | ||
100 | |||
101 | if (how->mode == LINUX_REBOOT_CMD_RESTART) { | ||
102 | if (!how->restart_cmd) { | ||
103 | flags |= 0x00020000UL; /* "cold bootstrap" */ | ||
104 | } else { | ||
105 | /* For SRM, we could probably set environment | ||
106 | variables to get this to work. We'd have to | ||
107 | delay this until after srm_paging_stop unless | ||
108 | we ever got srm_fixup working. | ||
109 | |||
110 | At the moment, SRM will use the last boot device, | ||
111 | but the file and flags will be the defaults, when | ||
112 | doing a "warm" bootstrap. */ | ||
113 | flags |= 0x00030000UL; /* "warm bootstrap" */ | ||
114 | } | ||
115 | } else { | ||
116 | flags |= 0x00040000UL; /* "remain halted" */ | ||
117 | } | ||
118 | *pflags = flags; | ||
119 | |||
120 | #ifdef CONFIG_SMP | ||
121 | /* Wait for the secondaries to halt. */ | ||
122 | cpu_clear(boot_cpuid, cpu_possible_map); | ||
123 | while (cpus_weight(cpu_possible_map)) | ||
124 | barrier(); | ||
125 | #endif | ||
126 | |||
127 | /* If booted from SRM, reset some of the original environment. */ | ||
128 | if (alpha_using_srm) { | ||
129 | #ifdef CONFIG_DUMMY_CONSOLE | ||
130 | /* This has the effect of resetting the VGA video origin. */ | ||
131 | take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); | ||
132 | #endif | ||
133 | pci_restore_srm_config(); | ||
134 | set_hae(srm_hae); | ||
135 | } | ||
136 | |||
137 | if (alpha_mv.kill_arch) | ||
138 | alpha_mv.kill_arch(how->mode); | ||
139 | |||
140 | if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) { | ||
141 | /* Unfortunately, since MILO doesn't currently understand | ||
142 | the hwrpb bits above, we can't reliably halt the | ||
143 | processor and keep it halted. So just loop. */ | ||
144 | return; | ||
145 | } | ||
146 | |||
147 | if (alpha_using_srm) | ||
148 | srm_paging_stop(); | ||
149 | |||
150 | halt(); | ||
151 | } | ||
152 | |||
153 | static void | ||
154 | common_shutdown(int mode, char *restart_cmd) | ||
155 | { | ||
156 | struct halt_info args; | ||
157 | args.mode = mode; | ||
158 | args.restart_cmd = restart_cmd; | ||
159 | on_each_cpu(common_shutdown_1, &args, 1, 0); | ||
160 | } | ||
161 | |||
162 | void | ||
163 | machine_restart(char *restart_cmd) | ||
164 | { | ||
165 | common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd); | ||
166 | } | ||
167 | |||
168 | EXPORT_SYMBOL(machine_restart); | ||
169 | |||
170 | void | ||
171 | machine_halt(void) | ||
172 | { | ||
173 | common_shutdown(LINUX_REBOOT_CMD_HALT, NULL); | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL(machine_halt); | ||
177 | |||
178 | void | ||
179 | machine_power_off(void) | ||
180 | { | ||
181 | common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL); | ||
182 | } | ||
183 | |||
184 | EXPORT_SYMBOL(machine_power_off); | ||
185 | |||
186 | /* Used by sysrq-p, among others. I don't believe r9-r15 are ever | ||
187 | saved in the context it's used. */ | ||
188 | |||
189 | void | ||
190 | show_regs(struct pt_regs *regs) | ||
191 | { | ||
192 | dik_show_regs(regs, NULL); | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Re-start a thread when doing execve() | ||
197 | */ | ||
198 | void | ||
199 | start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) | ||
200 | { | ||
201 | set_fs(USER_DS); | ||
202 | regs->pc = pc; | ||
203 | regs->ps = 8; | ||
204 | wrusp(sp); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Free current thread data structures etc.. | ||
209 | */ | ||
210 | void | ||
211 | exit_thread(void) | ||
212 | { | ||
213 | } | ||
214 | |||
215 | void | ||
216 | flush_thread(void) | ||
217 | { | ||
218 | /* Arrange for each exec'ed process to start off with a clean slate | ||
219 | with respect to the FPU. This is all exceptions disabled. */ | ||
220 | current_thread_info()->ieee_state = 0; | ||
221 | wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0)); | ||
222 | |||
223 | /* Clean slate for TLS. */ | ||
224 | current_thread_info()->pcb.unique = 0; | ||
225 | } | ||
226 | |||
227 | void | ||
228 | release_thread(struct task_struct *dead_task) | ||
229 | { | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * "alpha_clone()".. By the time we get here, the | ||
234 | * non-volatile registers have also been saved on the | ||
235 | * stack. We do some ugly pointer stuff here.. (see | ||
236 | * also copy_thread) | ||
237 | * | ||
238 | * Notice that "fork()" is implemented in terms of clone, | ||
239 | * with parameters (SIGCHLD, 0). | ||
240 | */ | ||
241 | int | ||
242 | alpha_clone(unsigned long clone_flags, unsigned long usp, | ||
243 | int __user *parent_tid, int __user *child_tid, | ||
244 | unsigned long tls_value, struct pt_regs *regs) | ||
245 | { | ||
246 | if (!usp) | ||
247 | usp = rdusp(); | ||
248 | |||
249 | return do_fork(clone_flags, usp, regs, 0, parent_tid, child_tid); | ||
250 | } | ||
251 | |||
252 | int | ||
253 | alpha_vfork(struct pt_regs *regs) | ||
254 | { | ||
255 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), | ||
256 | regs, 0, NULL, NULL); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Copy an alpha thread.. | ||
261 | * | ||
262 | * Note the "stack_offset" stuff: when returning to kernel mode, we need | ||
263 | * to have some extra stack-space for the kernel stack that still exists | ||
264 | * after the "ret_from_fork". When returning to user mode, we only want | ||
265 | * the space needed by the syscall stack frame (ie "struct pt_regs"). | ||
266 | * Use the passed "regs" pointer to determine how much space we need | ||
267 | * for a kernel fork(). | ||
268 | */ | ||
269 | |||
270 | int | ||
271 | copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | ||
272 | unsigned long unused, | ||
273 | struct task_struct * p, struct pt_regs * regs) | ||
274 | { | ||
275 | extern void ret_from_fork(void); | ||
276 | |||
277 | struct thread_info *childti = p->thread_info; | ||
278 | struct pt_regs * childregs; | ||
279 | struct switch_stack * childstack, *stack; | ||
280 | unsigned long stack_offset, settls; | ||
281 | |||
282 | stack_offset = PAGE_SIZE - sizeof(struct pt_regs); | ||
283 | if (!(regs->ps & 8)) | ||
284 | stack_offset = (PAGE_SIZE-1) & (unsigned long) regs; | ||
285 | childregs = (struct pt_regs *) | ||
286 | (stack_offset + PAGE_SIZE + (long) childti); | ||
287 | |||
288 | *childregs = *regs; | ||
289 | settls = regs->r20; | ||
290 | childregs->r0 = 0; | ||
291 | childregs->r19 = 0; | ||
292 | childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ | ||
293 | regs->r20 = 0; | ||
294 | stack = ((struct switch_stack *) regs) - 1; | ||
295 | childstack = ((struct switch_stack *) childregs) - 1; | ||
296 | *childstack = *stack; | ||
297 | childstack->r26 = (unsigned long) ret_from_fork; | ||
298 | childti->pcb.usp = usp; | ||
299 | childti->pcb.ksp = (unsigned long) childstack; | ||
300 | childti->pcb.flags = 1; /* set FEN, clear everything else */ | ||
301 | |||
302 | /* Set a new TLS for the child thread? Peek back into the | ||
303 | syscall arguments that we saved on syscall entry. Oops, | ||
304 | except we'd have clobbered it with the parent/child set | ||
305 | of r20. Read the saved copy. */ | ||
306 | /* Note: if CLONE_SETTLS is not set, then we must inherit the | ||
307 | value from the parent, which will have been set by the block | ||
308 | copy in dup_task_struct. This is non-intuitive, but is | ||
309 | required for proper operation in the case of a threaded | ||
310 | application calling fork. */ | ||
311 | if (clone_flags & CLONE_SETTLS) | ||
312 | childti->pcb.unique = settls; | ||
313 | |||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Fill in the user structure for an ECOFF core dump. | ||
319 | */ | ||
320 | void | ||
321 | dump_thread(struct pt_regs * pt, struct user * dump) | ||
322 | { | ||
323 | /* switch stack follows right below pt_regs: */ | ||
324 | struct switch_stack * sw = ((struct switch_stack *) pt) - 1; | ||
325 | |||
326 | dump->magic = CMAGIC; | ||
327 | dump->start_code = current->mm->start_code; | ||
328 | dump->start_data = current->mm->start_data; | ||
329 | dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); | ||
330 | dump->u_tsize = ((current->mm->end_code - dump->start_code) | ||
331 | >> PAGE_SHIFT); | ||
332 | dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) | ||
333 | >> PAGE_SHIFT); | ||
334 | dump->u_ssize = (current->mm->start_stack - dump->start_stack | ||
335 | + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
336 | |||
337 | /* | ||
338 | * We store the registers in an order/format that is | ||
339 | * compatible with DEC Unix/OSF/1 as this makes life easier | ||
340 | * for gdb. | ||
341 | */ | ||
342 | dump->regs[EF_V0] = pt->r0; | ||
343 | dump->regs[EF_T0] = pt->r1; | ||
344 | dump->regs[EF_T1] = pt->r2; | ||
345 | dump->regs[EF_T2] = pt->r3; | ||
346 | dump->regs[EF_T3] = pt->r4; | ||
347 | dump->regs[EF_T4] = pt->r5; | ||
348 | dump->regs[EF_T5] = pt->r6; | ||
349 | dump->regs[EF_T6] = pt->r7; | ||
350 | dump->regs[EF_T7] = pt->r8; | ||
351 | dump->regs[EF_S0] = sw->r9; | ||
352 | dump->regs[EF_S1] = sw->r10; | ||
353 | dump->regs[EF_S2] = sw->r11; | ||
354 | dump->regs[EF_S3] = sw->r12; | ||
355 | dump->regs[EF_S4] = sw->r13; | ||
356 | dump->regs[EF_S5] = sw->r14; | ||
357 | dump->regs[EF_S6] = sw->r15; | ||
358 | dump->regs[EF_A3] = pt->r19; | ||
359 | dump->regs[EF_A4] = pt->r20; | ||
360 | dump->regs[EF_A5] = pt->r21; | ||
361 | dump->regs[EF_T8] = pt->r22; | ||
362 | dump->regs[EF_T9] = pt->r23; | ||
363 | dump->regs[EF_T10] = pt->r24; | ||
364 | dump->regs[EF_T11] = pt->r25; | ||
365 | dump->regs[EF_RA] = pt->r26; | ||
366 | dump->regs[EF_T12] = pt->r27; | ||
367 | dump->regs[EF_AT] = pt->r28; | ||
368 | dump->regs[EF_SP] = rdusp(); | ||
369 | dump->regs[EF_PS] = pt->ps; | ||
370 | dump->regs[EF_PC] = pt->pc; | ||
371 | dump->regs[EF_GP] = pt->gp; | ||
372 | dump->regs[EF_A0] = pt->r16; | ||
373 | dump->regs[EF_A1] = pt->r17; | ||
374 | dump->regs[EF_A2] = pt->r18; | ||
375 | memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Fill in the user structure for a ELF core dump. | ||
380 | */ | ||
381 | void | ||
382 | dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) | ||
383 | { | ||
384 | /* switch stack follows right below pt_regs: */ | ||
385 | struct switch_stack * sw = ((struct switch_stack *) pt) - 1; | ||
386 | |||
387 | dest[ 0] = pt->r0; | ||
388 | dest[ 1] = pt->r1; | ||
389 | dest[ 2] = pt->r2; | ||
390 | dest[ 3] = pt->r3; | ||
391 | dest[ 4] = pt->r4; | ||
392 | dest[ 5] = pt->r5; | ||
393 | dest[ 6] = pt->r6; | ||
394 | dest[ 7] = pt->r7; | ||
395 | dest[ 8] = pt->r8; | ||
396 | dest[ 9] = sw->r9; | ||
397 | dest[10] = sw->r10; | ||
398 | dest[11] = sw->r11; | ||
399 | dest[12] = sw->r12; | ||
400 | dest[13] = sw->r13; | ||
401 | dest[14] = sw->r14; | ||
402 | dest[15] = sw->r15; | ||
403 | dest[16] = pt->r16; | ||
404 | dest[17] = pt->r17; | ||
405 | dest[18] = pt->r18; | ||
406 | dest[19] = pt->r19; | ||
407 | dest[20] = pt->r20; | ||
408 | dest[21] = pt->r21; | ||
409 | dest[22] = pt->r22; | ||
410 | dest[23] = pt->r23; | ||
411 | dest[24] = pt->r24; | ||
412 | dest[25] = pt->r25; | ||
413 | dest[26] = pt->r26; | ||
414 | dest[27] = pt->r27; | ||
415 | dest[28] = pt->r28; | ||
416 | dest[29] = pt->gp; | ||
417 | dest[30] = rdusp(); | ||
418 | dest[31] = pt->pc; | ||
419 | |||
420 | /* Once upon a time this was the PS value. Which is stupid | ||
421 | since that is always 8 for usermode. Usurped for the more | ||
422 | useful value of the thread's UNIQUE field. */ | ||
423 | dest[32] = ti->pcb.unique; | ||
424 | } | ||
425 | |||
426 | int | ||
427 | dump_elf_task(elf_greg_t *dest, struct task_struct *task) | ||
428 | { | ||
429 | struct thread_info *ti; | ||
430 | struct pt_regs *pt; | ||
431 | |||
432 | ti = task->thread_info; | ||
433 | pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1; | ||
434 | |||
435 | dump_elf_thread(dest, pt, ti); | ||
436 | |||
437 | return 1; | ||
438 | } | ||
439 | |||
440 | int | ||
441 | dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task) | ||
442 | { | ||
443 | struct thread_info *ti; | ||
444 | struct pt_regs *pt; | ||
445 | struct switch_stack *sw; | ||
446 | |||
447 | ti = task->thread_info; | ||
448 | pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1; | ||
449 | sw = (struct switch_stack *)pt - 1; | ||
450 | |||
451 | memcpy(dest, sw->fp, 32 * 8); | ||
452 | |||
453 | return 1; | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * sys_execve() executes a new program. | ||
458 | */ | ||
459 | asmlinkage int | ||
460 | do_sys_execve(char __user *ufilename, char __user * __user *argv, | ||
461 | char __user * __user *envp, struct pt_regs *regs) | ||
462 | { | ||
463 | int error; | ||
464 | char *filename; | ||
465 | |||
466 | filename = getname(ufilename); | ||
467 | error = PTR_ERR(filename); | ||
468 | if (IS_ERR(filename)) | ||
469 | goto out; | ||
470 | error = do_execve(filename, argv, envp, regs); | ||
471 | putname(filename); | ||
472 | out: | ||
473 | return error; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Return saved PC of a blocked thread. This assumes the frame | ||
478 | * pointer is the 6th saved long on the kernel stack and that the | ||
479 | * saved return address is the first long in the frame. This all | ||
480 | * holds provided the thread blocked through a call to schedule() ($15 | ||
481 | * is the frame pointer in schedule() and $15 is saved at offset 48 by | ||
482 | * entry.S:do_switch_stack). | ||
483 | * | ||
484 | * Under heavy swap load I've seen this lose in an ugly way. So do | ||
485 | * some extra sanity checking on the ranges we expect these pointers | ||
486 | * to be in so that we can fail gracefully. This is just for ps after | ||
487 | * all. -- r~ | ||
488 | */ | ||
489 | |||
490 | unsigned long | ||
491 | thread_saved_pc(task_t *t) | ||
492 | { | ||
493 | unsigned long base = (unsigned long)t->thread_info; | ||
494 | unsigned long fp, sp = t->thread_info->pcb.ksp; | ||
495 | |||
496 | if (sp > base && sp+6*8 < base + 16*1024) { | ||
497 | fp = ((unsigned long*)sp)[6]; | ||
498 | if (fp > sp && fp < base + 16*1024) | ||
499 | return *(unsigned long *)fp; | ||
500 | } | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | unsigned long | ||
506 | get_wchan(struct task_struct *p) | ||
507 | { | ||
508 | unsigned long schedule_frame; | ||
509 | unsigned long pc; | ||
510 | if (!p || p == current || p->state == TASK_RUNNING) | ||
511 | return 0; | ||
512 | /* | ||
513 | * This one depends on the frame size of schedule(). Do a | ||
514 | * "disass schedule" in gdb to find the frame size. Also, the | ||
515 | * code assumes that sleep_on() follows immediately after | ||
516 | * interruptible_sleep_on() and that add_timer() follows | ||
517 | * immediately after interruptible_sleep(). Ugly, isn't it? | ||
518 | * Maybe adding a wchan field to task_struct would be better, | ||
519 | * after all... | ||
520 | */ | ||
521 | |||
522 | pc = thread_saved_pc(p); | ||
523 | if (in_sched_functions(pc)) { | ||
524 | schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6]; | ||
525 | return ((unsigned long *)schedule_frame)[12]; | ||
526 | } | ||
527 | return pc; | ||
528 | } | ||
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h new file mode 100644 index 000000000000..e1560fb15610 --- /dev/null +++ b/arch/alpha/kernel/proto.h | |||
@@ -0,0 +1,210 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/interrupt.h> | ||
3 | |||
4 | |||
5 | /* Prototypes of functions used across modules here in this directory. */ | ||
6 | |||
7 | #define vucp volatile unsigned char * | ||
8 | #define vusp volatile unsigned short * | ||
9 | #define vip volatile int * | ||
10 | #define vuip volatile unsigned int * | ||
11 | #define vulp volatile unsigned long * | ||
12 | |||
13 | struct pt_regs; | ||
14 | struct task_struct; | ||
15 | struct pci_dev; | ||
16 | struct pci_controller; | ||
17 | |||
18 | /* core_apecs.c */ | ||
19 | extern struct pci_ops apecs_pci_ops; | ||
20 | extern void apecs_init_arch(void); | ||
21 | extern void apecs_pci_clr_err(void); | ||
22 | extern void apecs_machine_check(u64, u64, struct pt_regs *); | ||
23 | extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
24 | |||
25 | /* core_cia.c */ | ||
26 | extern struct pci_ops cia_pci_ops; | ||
27 | extern void cia_init_pci(void); | ||
28 | extern void cia_init_arch(void); | ||
29 | extern void pyxis_init_arch(void); | ||
30 | extern void cia_kill_arch(int); | ||
31 | extern void cia_machine_check(u64, u64, struct pt_regs *); | ||
32 | extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
33 | |||
34 | /* core_irongate.c */ | ||
35 | extern struct pci_ops irongate_pci_ops; | ||
36 | extern int irongate_pci_clr_err(void); | ||
37 | extern void irongate_init_arch(void); | ||
38 | extern void irongate_machine_check(u64, u64, struct pt_regs *); | ||
39 | #define irongate_pci_tbi ((void *)0) | ||
40 | |||
41 | /* core_lca.c */ | ||
42 | extern struct pci_ops lca_pci_ops; | ||
43 | extern void lca_init_arch(void); | ||
44 | extern void lca_machine_check(u64, u64, struct pt_regs *); | ||
45 | extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
46 | |||
47 | /* core_marvel.c */ | ||
48 | extern struct pci_ops marvel_pci_ops; | ||
49 | extern void marvel_init_arch(void); | ||
50 | extern void marvel_kill_arch(int); | ||
51 | extern void marvel_machine_check(u64, u64, struct pt_regs *); | ||
52 | extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
53 | extern int marvel_pa_to_nid(unsigned long); | ||
54 | extern int marvel_cpuid_to_nid(int); | ||
55 | extern unsigned long marvel_node_mem_start(int); | ||
56 | extern unsigned long marvel_node_mem_size(int); | ||
57 | extern struct _alpha_agp_info *marvel_agp_info(void); | ||
58 | struct io7 *marvel_find_io7(int pe); | ||
59 | struct io7 *marvel_next_io7(struct io7 *prev); | ||
60 | void io7_clear_errors(struct io7 *io7); | ||
61 | |||
62 | /* core_mcpcia.c */ | ||
63 | extern struct pci_ops mcpcia_pci_ops; | ||
64 | extern void mcpcia_init_arch(void); | ||
65 | extern void mcpcia_init_hoses(void); | ||
66 | extern void mcpcia_machine_check(u64, u64, struct pt_regs *); | ||
67 | extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
68 | |||
69 | /* core_polaris.c */ | ||
70 | extern struct pci_ops polaris_pci_ops; | ||
71 | extern int polaris_read_config_dword(struct pci_dev *, int, u32 *); | ||
72 | extern int polaris_write_config_dword(struct pci_dev *, int, u32); | ||
73 | extern void polaris_init_arch(void); | ||
74 | extern void polaris_machine_check(u64, u64, struct pt_regs *); | ||
75 | #define polaris_pci_tbi ((void *)0) | ||
76 | |||
77 | /* core_t2.c */ | ||
78 | extern struct pci_ops t2_pci_ops; | ||
79 | extern void t2_init_arch(void); | ||
80 | extern void t2_kill_arch(int); | ||
81 | extern void t2_machine_check(u64, u64, struct pt_regs *); | ||
82 | extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
83 | |||
84 | /* core_titan.c */ | ||
85 | extern struct pci_ops titan_pci_ops; | ||
86 | extern void titan_init_arch(void); | ||
87 | extern void titan_kill_arch(int); | ||
88 | extern void titan_machine_check(u64, u64, struct pt_regs *); | ||
89 | extern void titan_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
90 | extern struct _alpha_agp_info *titan_agp_info(void); | ||
91 | |||
92 | /* core_tsunami.c */ | ||
93 | extern struct pci_ops tsunami_pci_ops; | ||
94 | extern void tsunami_init_arch(void); | ||
95 | extern void tsunami_kill_arch(int); | ||
96 | extern void tsunami_machine_check(u64, u64, struct pt_regs *); | ||
97 | extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
98 | |||
99 | /* core_wildfire.c */ | ||
100 | extern struct pci_ops wildfire_pci_ops; | ||
101 | extern void wildfire_init_arch(void); | ||
102 | extern void wildfire_kill_arch(int); | ||
103 | extern void wildfire_machine_check(u64, u64, struct pt_regs *); | ||
104 | extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); | ||
105 | extern int wildfire_pa_to_nid(unsigned long); | ||
106 | extern int wildfire_cpuid_to_nid(int); | ||
107 | extern unsigned long wildfire_node_mem_start(int); | ||
108 | extern unsigned long wildfire_node_mem_size(int); | ||
109 | |||
110 | /* setup.c */ | ||
111 | extern unsigned long srm_hae; | ||
112 | extern int boot_cpuid; | ||
113 | #ifdef CONFIG_VERBOSE_MCHECK | ||
114 | extern unsigned long alpha_verbose_mcheck; | ||
115 | #endif | ||
116 | |||
117 | /* srmcons.c */ | ||
118 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM) | ||
119 | extern void register_srm_console(void); | ||
120 | extern void unregister_srm_console(void); | ||
121 | #else | ||
122 | #define register_srm_console() | ||
123 | #define unregister_srm_console() | ||
124 | #endif | ||
125 | |||
126 | /* smp.c */ | ||
127 | extern void setup_smp(void); | ||
128 | extern void handle_ipi(struct pt_regs *); | ||
129 | extern void smp_percpu_timer_interrupt(struct pt_regs *); | ||
130 | |||
131 | /* bios32.c */ | ||
132 | /* extern void reset_for_srm(void); */ | ||
133 | |||
134 | /* time.c */ | ||
135 | extern irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs); | ||
136 | extern void common_init_rtc(void); | ||
137 | extern unsigned long est_cycle_freq; | ||
138 | |||
139 | /* smc37c93x.c */ | ||
140 | extern void SMC93x_Init(void); | ||
141 | |||
142 | /* smc37c669.c */ | ||
143 | extern void SMC669_Init(int); | ||
144 | |||
145 | /* es1888.c */ | ||
146 | extern void es1888_init(void); | ||
147 | |||
148 | /* ns87312.c */ | ||
149 | extern void ns87312_enable_ide(long ide_base); | ||
150 | |||
151 | /* ../lib/fpreg.c */ | ||
152 | extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); | ||
153 | extern unsigned long alpha_read_fp_reg (unsigned long reg); | ||
154 | |||
155 | /* head.S */ | ||
156 | extern void wrmces(unsigned long mces); | ||
157 | extern void cserve_ena(unsigned long); | ||
158 | extern void cserve_dis(unsigned long); | ||
159 | extern void __smp_callin(unsigned long); | ||
160 | |||
161 | /* entry.S */ | ||
162 | extern void entArith(void); | ||
163 | extern void entIF(void); | ||
164 | extern void entInt(void); | ||
165 | extern void entMM(void); | ||
166 | extern void entSys(void); | ||
167 | extern void entUna(void); | ||
168 | extern void entDbg(void); | ||
169 | |||
170 | /* ptrace.c */ | ||
171 | extern int ptrace_set_bpt (struct task_struct *child); | ||
172 | extern int ptrace_cancel_bpt (struct task_struct *child); | ||
173 | |||
174 | /* traps.c */ | ||
175 | extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15); | ||
176 | extern void die_if_kernel(char *, struct pt_regs *, long, unsigned long *); | ||
177 | |||
178 | /* sys_titan.c */ | ||
179 | extern void titan_dispatch_irqs(u64, struct pt_regs *); | ||
180 | |||
181 | /* ../mm/init.c */ | ||
182 | extern void switch_to_system_map(void); | ||
183 | extern void srm_paging_stop(void); | ||
184 | |||
185 | /* ../mm/remap.c */ | ||
186 | extern int __alpha_remap_area_pages(unsigned long, unsigned long, | ||
187 | unsigned long, unsigned long); | ||
188 | |||
189 | /* irq.c */ | ||
190 | |||
191 | #ifdef CONFIG_SMP | ||
192 | #define mcheck_expected(cpu) (cpu_data[cpu].mcheck_expected) | ||
193 | #define mcheck_taken(cpu) (cpu_data[cpu].mcheck_taken) | ||
194 | #define mcheck_extra(cpu) (cpu_data[cpu].mcheck_extra) | ||
195 | #else | ||
196 | extern struct mcheck_info | ||
197 | { | ||
198 | unsigned char expected __attribute__((aligned(8))); | ||
199 | unsigned char taken; | ||
200 | unsigned char extra; | ||
201 | } __mcheck_info; | ||
202 | |||
203 | #define mcheck_expected(cpu) (*((void)(cpu), &__mcheck_info.expected)) | ||
204 | #define mcheck_taken(cpu) (*((void)(cpu), &__mcheck_info.taken)) | ||
205 | #define mcheck_extra(cpu) (*((void)(cpu), &__mcheck_info.extra)) | ||
206 | #endif | ||
207 | |||
208 | extern void process_mcheck_info(unsigned long vector, unsigned long la_ptr, | ||
209 | struct pt_regs *regs, const char *machine, | ||
210 | int expected); | ||
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c new file mode 100644 index 000000000000..d00583161574 --- /dev/null +++ b/arch/alpha/kernel/ptrace.c | |||
@@ -0,0 +1,415 @@ | |||
1 | /* ptrace.c */ | ||
2 | /* By Ross Biro 1/23/92 */ | ||
3 | /* edited by Linus Torvalds */ | ||
4 | /* mangled further by Bob Manson (manson@santafe.edu) */ | ||
5 | /* more mutilation by David Mosberger (davidm@azstarnet.com) */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/smp.h> | ||
11 | #include <linux/smp_lock.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/ptrace.h> | ||
14 | #include <linux/user.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/security.h> | ||
17 | |||
18 | #include <asm/uaccess.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/fpu.h> | ||
22 | |||
23 | #include "proto.h" | ||
24 | |||
25 | #define DEBUG DBG_MEM | ||
26 | #undef DEBUG | ||
27 | |||
28 | #ifdef DEBUG | ||
29 | enum { | ||
30 | DBG_MEM = (1<<0), | ||
31 | DBG_BPT = (1<<1), | ||
32 | DBG_MEM_ALL = (1<<2) | ||
33 | }; | ||
34 | #define DBG(fac,args) {if ((fac) & DEBUG) printk args;} | ||
35 | #else | ||
36 | #define DBG(fac,args) | ||
37 | #endif | ||
38 | |||
39 | #define BREAKINST 0x00000080 /* call_pal bpt */ | ||
40 | |||
41 | /* | ||
42 | * does not yet catch signals sent when the child dies. | ||
43 | * in exit.c or in signal.c. | ||
44 | */ | ||
45 | |||
46 | /* | ||
47 | * Processes always block with the following stack-layout: | ||
48 | * | ||
49 | * +================================+ <---- task + 2*PAGE_SIZE | ||
50 | * | PALcode saved frame (ps, pc, | ^ | ||
51 | * | gp, a0, a1, a2) | | | ||
52 | * +================================+ | struct pt_regs | ||
53 | * | | | | ||
54 | * | frame generated by SAVE_ALL | | | ||
55 | * | | v | ||
56 | * +================================+ | ||
57 | * | | ^ | ||
58 | * | frame saved by do_switch_stack | | struct switch_stack | ||
59 | * | | v | ||
60 | * +================================+ | ||
61 | */ | ||
62 | |||
63 | /* | ||
64 | * The following table maps a register index into the stack offset at | ||
65 | * which the register is saved. Register indices are 0-31 for integer | ||
66 | * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and | ||
67 | * zero have no stack-slot and need to be treated specially (see | ||
68 | * get_reg/put_reg below). | ||
69 | */ | ||
70 | enum { | ||
71 | REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64 | ||
72 | }; | ||
73 | |||
74 | static int regoff[] = { | ||
75 | PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3), | ||
76 | PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7), | ||
77 | PT_REG( r8), SW_REG( r9), SW_REG( r10), SW_REG( r11), | ||
78 | SW_REG( r12), SW_REG( r13), SW_REG( r14), SW_REG( r15), | ||
79 | PT_REG( r16), PT_REG( r17), PT_REG( r18), PT_REG( r19), | ||
80 | PT_REG( r20), PT_REG( r21), PT_REG( r22), PT_REG( r23), | ||
81 | PT_REG( r24), PT_REG( r25), PT_REG( r26), PT_REG( r27), | ||
82 | PT_REG( r28), PT_REG( gp), -1, -1, | ||
83 | SW_REG(fp[ 0]), SW_REG(fp[ 1]), SW_REG(fp[ 2]), SW_REG(fp[ 3]), | ||
84 | SW_REG(fp[ 4]), SW_REG(fp[ 5]), SW_REG(fp[ 6]), SW_REG(fp[ 7]), | ||
85 | SW_REG(fp[ 8]), SW_REG(fp[ 9]), SW_REG(fp[10]), SW_REG(fp[11]), | ||
86 | SW_REG(fp[12]), SW_REG(fp[13]), SW_REG(fp[14]), SW_REG(fp[15]), | ||
87 | SW_REG(fp[16]), SW_REG(fp[17]), SW_REG(fp[18]), SW_REG(fp[19]), | ||
88 | SW_REG(fp[20]), SW_REG(fp[21]), SW_REG(fp[22]), SW_REG(fp[23]), | ||
89 | SW_REG(fp[24]), SW_REG(fp[25]), SW_REG(fp[26]), SW_REG(fp[27]), | ||
90 | SW_REG(fp[28]), SW_REG(fp[29]), SW_REG(fp[30]), SW_REG(fp[31]), | ||
91 | PT_REG( pc) | ||
92 | }; | ||
93 | |||
94 | static unsigned long zero; | ||
95 | |||
96 | /* | ||
97 | * Get address of register REGNO in task TASK. | ||
98 | */ | ||
99 | static unsigned long * | ||
100 | get_reg_addr(struct task_struct * task, unsigned long regno) | ||
101 | { | ||
102 | unsigned long *addr; | ||
103 | |||
104 | if (regno == 30) { | ||
105 | addr = &task->thread_info->pcb.usp; | ||
106 | } else if (regno == 65) { | ||
107 | addr = &task->thread_info->pcb.unique; | ||
108 | } else if (regno == 31 || regno > 65) { | ||
109 | zero = 0; | ||
110 | addr = &zero; | ||
111 | } else { | ||
112 | addr = (void *)task->thread_info + regoff[regno]; | ||
113 | } | ||
114 | return addr; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Get contents of register REGNO in task TASK. | ||
119 | */ | ||
120 | static unsigned long | ||
121 | get_reg(struct task_struct * task, unsigned long regno) | ||
122 | { | ||
123 | /* Special hack for fpcr -- combine hardware and software bits. */ | ||
124 | if (regno == 63) { | ||
125 | unsigned long fpcr = *get_reg_addr(task, regno); | ||
126 | unsigned long swcr | ||
127 | = task->thread_info->ieee_state & IEEE_SW_MASK; | ||
128 | swcr = swcr_update_status(swcr, fpcr); | ||
129 | return fpcr | swcr; | ||
130 | } | ||
131 | return *get_reg_addr(task, regno); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Write contents of register REGNO in task TASK. | ||
136 | */ | ||
137 | static int | ||
138 | put_reg(struct task_struct *task, unsigned long regno, unsigned long data) | ||
139 | { | ||
140 | if (regno == 63) { | ||
141 | task->thread_info->ieee_state | ||
142 | = ((task->thread_info->ieee_state & ~IEEE_SW_MASK) | ||
143 | | (data & IEEE_SW_MASK)); | ||
144 | data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data); | ||
145 | } | ||
146 | *get_reg_addr(task, regno) = data; | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static inline int | ||
151 | read_int(struct task_struct *task, unsigned long addr, int * data) | ||
152 | { | ||
153 | int copied = access_process_vm(task, addr, data, sizeof(int), 0); | ||
154 | return (copied == sizeof(int)) ? 0 : -EIO; | ||
155 | } | ||
156 | |||
157 | static inline int | ||
158 | write_int(struct task_struct *task, unsigned long addr, int data) | ||
159 | { | ||
160 | int copied = access_process_vm(task, addr, &data, sizeof(int), 1); | ||
161 | return (copied == sizeof(int)) ? 0 : -EIO; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Set breakpoint. | ||
166 | */ | ||
167 | int | ||
168 | ptrace_set_bpt(struct task_struct * child) | ||
169 | { | ||
170 | int displ, i, res, reg_b, nsaved = 0; | ||
171 | unsigned int insn, op_code; | ||
172 | unsigned long pc; | ||
173 | |||
174 | pc = get_reg(child, REG_PC); | ||
175 | res = read_int(child, pc, (int *) &insn); | ||
176 | if (res < 0) | ||
177 | return res; | ||
178 | |||
179 | op_code = insn >> 26; | ||
180 | if (op_code >= 0x30) { | ||
181 | /* | ||
182 | * It's a branch: instead of trying to figure out | ||
183 | * whether the branch will be taken or not, we'll put | ||
184 | * a breakpoint at either location. This is simpler, | ||
185 | * more reliable, and probably not a whole lot slower | ||
186 | * than the alternative approach of emulating the | ||
187 | * branch (emulation can be tricky for fp branches). | ||
188 | */ | ||
189 | displ = ((s32)(insn << 11)) >> 9; | ||
190 | child->thread_info->bpt_addr[nsaved++] = pc + 4; | ||
191 | if (displ) /* guard against unoptimized code */ | ||
192 | child->thread_info->bpt_addr[nsaved++] | ||
193 | = pc + 4 + displ; | ||
194 | DBG(DBG_BPT, ("execing branch\n")); | ||
195 | } else if (op_code == 0x1a) { | ||
196 | reg_b = (insn >> 16) & 0x1f; | ||
197 | child->thread_info->bpt_addr[nsaved++] = get_reg(child, reg_b); | ||
198 | DBG(DBG_BPT, ("execing jump\n")); | ||
199 | } else { | ||
200 | child->thread_info->bpt_addr[nsaved++] = pc + 4; | ||
201 | DBG(DBG_BPT, ("execing normal insn\n")); | ||
202 | } | ||
203 | |||
204 | /* install breakpoints: */ | ||
205 | for (i = 0; i < nsaved; ++i) { | ||
206 | res = read_int(child, child->thread_info->bpt_addr[i], | ||
207 | (int *) &insn); | ||
208 | if (res < 0) | ||
209 | return res; | ||
210 | child->thread_info->bpt_insn[i] = insn; | ||
211 | DBG(DBG_BPT, (" -> next_pc=%lx\n", | ||
212 | child->thread_info->bpt_addr[i])); | ||
213 | res = write_int(child, child->thread_info->bpt_addr[i], | ||
214 | BREAKINST); | ||
215 | if (res < 0) | ||
216 | return res; | ||
217 | } | ||
218 | child->thread_info->bpt_nsaved = nsaved; | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Ensure no single-step breakpoint is pending. Returns non-zero | ||
224 | * value if child was being single-stepped. | ||
225 | */ | ||
226 | int | ||
227 | ptrace_cancel_bpt(struct task_struct * child) | ||
228 | { | ||
229 | int i, nsaved = child->thread_info->bpt_nsaved; | ||
230 | |||
231 | child->thread_info->bpt_nsaved = 0; | ||
232 | |||
233 | if (nsaved > 2) { | ||
234 | printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); | ||
235 | nsaved = 2; | ||
236 | } | ||
237 | |||
238 | for (i = 0; i < nsaved; ++i) { | ||
239 | write_int(child, child->thread_info->bpt_addr[i], | ||
240 | child->thread_info->bpt_insn[i]); | ||
241 | } | ||
242 | return (nsaved != 0); | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Called by kernel/ptrace.c when detaching.. | ||
247 | * | ||
248 | * Make sure the single step bit is not set. | ||
249 | */ | ||
250 | void ptrace_disable(struct task_struct *child) | ||
251 | { | ||
252 | ptrace_cancel_bpt(child); | ||
253 | } | ||
254 | |||
255 | asmlinkage long | ||
256 | do_sys_ptrace(long request, long pid, long addr, long data, | ||
257 | struct pt_regs *regs) | ||
258 | { | ||
259 | struct task_struct *child; | ||
260 | unsigned long tmp; | ||
261 | size_t copied; | ||
262 | long ret; | ||
263 | |||
264 | lock_kernel(); | ||
265 | DBG(DBG_MEM, ("request=%ld pid=%ld addr=0x%lx data=0x%lx\n", | ||
266 | request, pid, addr, data)); | ||
267 | ret = -EPERM; | ||
268 | if (request == PTRACE_TRACEME) { | ||
269 | /* are we already being traced? */ | ||
270 | if (current->ptrace & PT_PTRACED) | ||
271 | goto out_notsk; | ||
272 | ret = security_ptrace(current->parent, current); | ||
273 | if (ret) | ||
274 | goto out_notsk; | ||
275 | /* set the ptrace bit in the process ptrace flags. */ | ||
276 | current->ptrace |= PT_PTRACED; | ||
277 | ret = 0; | ||
278 | goto out_notsk; | ||
279 | } | ||
280 | if (pid == 1) /* you may not mess with init */ | ||
281 | goto out_notsk; | ||
282 | |||
283 | ret = -ESRCH; | ||
284 | read_lock(&tasklist_lock); | ||
285 | child = find_task_by_pid(pid); | ||
286 | if (child) | ||
287 | get_task_struct(child); | ||
288 | read_unlock(&tasklist_lock); | ||
289 | if (!child) | ||
290 | goto out_notsk; | ||
291 | |||
292 | if (request == PTRACE_ATTACH) { | ||
293 | ret = ptrace_attach(child); | ||
294 | goto out; | ||
295 | } | ||
296 | |||
297 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | ||
298 | if (ret < 0) | ||
299 | goto out; | ||
300 | |||
301 | switch (request) { | ||
302 | /* When I and D space are separate, these will need to be fixed. */ | ||
303 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | ||
304 | case PTRACE_PEEKDATA: | ||
305 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | ||
306 | ret = -EIO; | ||
307 | if (copied != sizeof(tmp)) | ||
308 | break; | ||
309 | |||
310 | regs->r0 = 0; /* special return: no errors */ | ||
311 | ret = tmp; | ||
312 | break; | ||
313 | |||
314 | /* Read register number ADDR. */ | ||
315 | case PTRACE_PEEKUSR: | ||
316 | regs->r0 = 0; /* special return: no errors */ | ||
317 | ret = get_reg(child, addr); | ||
318 | DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret)); | ||
319 | break; | ||
320 | |||
321 | /* When I and D space are separate, this will have to be fixed. */ | ||
322 | case PTRACE_POKETEXT: /* write the word at location addr. */ | ||
323 | case PTRACE_POKEDATA: | ||
324 | tmp = data; | ||
325 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1); | ||
326 | ret = (copied == sizeof(tmp)) ? 0 : -EIO; | ||
327 | break; | ||
328 | |||
329 | case PTRACE_POKEUSR: /* write the specified register */ | ||
330 | DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data)); | ||
331 | ret = put_reg(child, addr, data); | ||
332 | break; | ||
333 | |||
334 | case PTRACE_SYSCALL: | ||
335 | /* continue and stop at next (return from) syscall */ | ||
336 | case PTRACE_CONT: /* restart after signal. */ | ||
337 | ret = -EIO; | ||
338 | if ((unsigned long) data > _NSIG) | ||
339 | break; | ||
340 | if (request == PTRACE_SYSCALL) | ||
341 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
342 | else | ||
343 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
344 | child->exit_code = data; | ||
345 | /* make sure single-step breakpoint is gone. */ | ||
346 | ptrace_cancel_bpt(child); | ||
347 | wake_up_process(child); | ||
348 | ret = 0; | ||
349 | break; | ||
350 | |||
351 | /* | ||
352 | * Make the child exit. Best I can do is send it a sigkill. | ||
353 | * perhaps it should be put in the status that it wants to | ||
354 | * exit. | ||
355 | */ | ||
356 | case PTRACE_KILL: | ||
357 | ret = 0; | ||
358 | if (child->exit_state == EXIT_ZOMBIE) | ||
359 | break; | ||
360 | child->exit_code = SIGKILL; | ||
361 | /* make sure single-step breakpoint is gone. */ | ||
362 | ptrace_cancel_bpt(child); | ||
363 | wake_up_process(child); | ||
364 | goto out; | ||
365 | |||
366 | case PTRACE_SINGLESTEP: /* execute single instruction. */ | ||
367 | ret = -EIO; | ||
368 | if ((unsigned long) data > _NSIG) | ||
369 | break; | ||
370 | /* Mark single stepping. */ | ||
371 | child->thread_info->bpt_nsaved = -1; | ||
372 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
373 | child->exit_code = data; | ||
374 | wake_up_process(child); | ||
375 | /* give it a chance to run. */ | ||
376 | ret = 0; | ||
377 | goto out; | ||
378 | |||
379 | case PTRACE_DETACH: /* detach a process that was attached. */ | ||
380 | ret = ptrace_detach(child, data); | ||
381 | goto out; | ||
382 | |||
383 | default: | ||
384 | ret = ptrace_request(child, request, addr, data); | ||
385 | goto out; | ||
386 | } | ||
387 | out: | ||
388 | put_task_struct(child); | ||
389 | out_notsk: | ||
390 | unlock_kernel(); | ||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | asmlinkage void | ||
395 | syscall_trace(void) | ||
396 | { | ||
397 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
398 | return; | ||
399 | if (!(current->ptrace & PT_PTRACED)) | ||
400 | return; | ||
401 | /* The 0x80 provides a way for the tracing parent to distinguish | ||
402 | between a syscall stop and SIGTRAP delivery */ | ||
403 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | ||
404 | ? 0x80 : 0)); | ||
405 | |||
406 | /* | ||
407 | * This isn't the same as continuing with a signal, but it will do | ||
408 | * for normal use. strace only continues with a signal if the | ||
409 | * stopping signal is not SIGTRAP. -brl | ||
410 | */ | ||
411 | if (current->exit_code) { | ||
412 | send_sig(current->exit_code, current, 1); | ||
413 | current->exit_code = 0; | ||
414 | } | ||
415 | } | ||
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c new file mode 100644 index 000000000000..8c8aaa205eae --- /dev/null +++ b/arch/alpha/kernel/semaphore.c | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * Alpha semaphore implementation. | ||
3 | * | ||
4 | * (C) Copyright 1996 Linus Torvalds | ||
5 | * (C) Copyright 1999, 2000 Richard Henderson | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | /* | ||
13 | * This is basically the PPC semaphore scheme ported to use | ||
14 | * the Alpha ll/sc sequences, so see the PPC code for | ||
15 | * credits. | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * Atomically update sem->count. | ||
20 | * This does the equivalent of the following: | ||
21 | * | ||
22 | * old_count = sem->count; | ||
23 | * tmp = MAX(old_count, 0) + incr; | ||
24 | * sem->count = tmp; | ||
25 | * return old_count; | ||
26 | */ | ||
27 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
28 | { | ||
29 | long old_count, tmp = 0; | ||
30 | |||
31 | __asm__ __volatile__( | ||
32 | "1: ldl_l %0,%2\n" | ||
33 | " cmovgt %0,%0,%1\n" | ||
34 | " addl %1,%3,%1\n" | ||
35 | " stl_c %1,%2\n" | ||
36 | " beq %1,2f\n" | ||
37 | " mb\n" | ||
38 | ".subsection 2\n" | ||
39 | "2: br 1b\n" | ||
40 | ".previous" | ||
41 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
42 | : "Ir" (incr), "1" (tmp), "m" (sem->count)); | ||
43 | |||
44 | return old_count; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Perform the "down" function. Return zero for semaphore acquired, | ||
49 | * return negative for signalled out of the function. | ||
50 | * | ||
51 | * If called from down, the return is ignored and the wait loop is | ||
52 | * not interruptible. This means that a task waiting on a semaphore | ||
53 | * using "down()" cannot be killed until someone does an "up()" on | ||
54 | * the semaphore. | ||
55 | * | ||
56 | * If called from down_interruptible, the return value gets checked | ||
57 | * upon return. If the return value is negative then the task continues | ||
58 | * with the negative value in the return register (it can be tested by | ||
59 | * the caller). | ||
60 | * | ||
61 | * Either form may be used in conjunction with "up()". | ||
62 | */ | ||
63 | |||
64 | void __sched | ||
65 | __down_failed(struct semaphore *sem) | ||
66 | { | ||
67 | struct task_struct *tsk = current; | ||
68 | DECLARE_WAITQUEUE(wait, tsk); | ||
69 | |||
70 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
71 | printk("%s(%d): down failed(%p)\n", | ||
72 | tsk->comm, tsk->pid, sem); | ||
73 | #endif | ||
74 | |||
75 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
76 | wmb(); | ||
77 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
78 | |||
79 | /* | ||
80 | * Try to get the semaphore. If the count is > 0, then we've | ||
81 | * got the semaphore; we decrement count and exit the loop. | ||
82 | * If the count is 0 or negative, we set it to -1, indicating | ||
83 | * that we are asleep, and then sleep. | ||
84 | */ | ||
85 | while (__sem_update_count(sem, -1) <= 0) { | ||
86 | schedule(); | ||
87 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
88 | } | ||
89 | remove_wait_queue(&sem->wait, &wait); | ||
90 | tsk->state = TASK_RUNNING; | ||
91 | |||
92 | /* | ||
93 | * If there are any more sleepers, wake one of them up so | ||
94 | * that it can either get the semaphore, or set count to -1 | ||
95 | * indicating that there are still processes sleeping. | ||
96 | */ | ||
97 | wake_up(&sem->wait); | ||
98 | |||
99 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
100 | printk("%s(%d): down acquired(%p)\n", | ||
101 | tsk->comm, tsk->pid, sem); | ||
102 | #endif | ||
103 | } | ||
104 | |||
105 | int __sched | ||
106 | __down_failed_interruptible(struct semaphore *sem) | ||
107 | { | ||
108 | struct task_struct *tsk = current; | ||
109 | DECLARE_WAITQUEUE(wait, tsk); | ||
110 | long ret = 0; | ||
111 | |||
112 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
113 | printk("%s(%d): down failed(%p)\n", | ||
114 | tsk->comm, tsk->pid, sem); | ||
115 | #endif | ||
116 | |||
117 | tsk->state = TASK_INTERRUPTIBLE; | ||
118 | wmb(); | ||
119 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
120 | |||
121 | while (__sem_update_count(sem, -1) <= 0) { | ||
122 | if (signal_pending(current)) { | ||
123 | /* | ||
124 | * A signal is pending - give up trying. | ||
125 | * Set sem->count to 0 if it is negative, | ||
126 | * since we are no longer sleeping. | ||
127 | */ | ||
128 | __sem_update_count(sem, 0); | ||
129 | ret = -EINTR; | ||
130 | break; | ||
131 | } | ||
132 | schedule(); | ||
133 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
134 | } | ||
135 | |||
136 | remove_wait_queue(&sem->wait, &wait); | ||
137 | tsk->state = TASK_RUNNING; | ||
138 | wake_up(&sem->wait); | ||
139 | |||
140 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
141 | printk("%s(%d): down %s(%p)\n", | ||
142 | current->comm, current->pid, | ||
143 | (ret < 0 ? "interrupted" : "acquired"), sem); | ||
144 | #endif | ||
145 | return ret; | ||
146 | } | ||
147 | |||
148 | void | ||
149 | __up_wakeup(struct semaphore *sem) | ||
150 | { | ||
151 | /* | ||
152 | * Note that we incremented count in up() before we came here, | ||
153 | * but that was ineffective since the result was <= 0, and | ||
154 | * any negative value of count is equivalent to 0. | ||
155 | * This ends up setting count to 1, unless count is now > 0 | ||
156 | * (i.e. because some other cpu has called up() in the meantime), | ||
157 | * in which case we just increment count. | ||
158 | */ | ||
159 | __sem_update_count(sem, 1); | ||
160 | wake_up(&sem->wait); | ||
161 | } | ||
162 | |||
163 | void __sched | ||
164 | down(struct semaphore *sem) | ||
165 | { | ||
166 | #ifdef WAITQUEUE_DEBUG | ||
167 | CHECK_MAGIC(sem->__magic); | ||
168 | #endif | ||
169 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
170 | printk("%s(%d): down(%p) <count=%d> from %p\n", | ||
171 | current->comm, current->pid, sem, | ||
172 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
173 | #endif | ||
174 | __down(sem); | ||
175 | } | ||
176 | |||
177 | int __sched | ||
178 | down_interruptible(struct semaphore *sem) | ||
179 | { | ||
180 | #ifdef WAITQUEUE_DEBUG | ||
181 | CHECK_MAGIC(sem->__magic); | ||
182 | #endif | ||
183 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
184 | printk("%s(%d): down(%p) <count=%d> from %p\n", | ||
185 | current->comm, current->pid, sem, | ||
186 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
187 | #endif | ||
188 | return __down_interruptible(sem); | ||
189 | } | ||
190 | |||
191 | int | ||
192 | down_trylock(struct semaphore *sem) | ||
193 | { | ||
194 | int ret; | ||
195 | |||
196 | #ifdef WAITQUEUE_DEBUG | ||
197 | CHECK_MAGIC(sem->__magic); | ||
198 | #endif | ||
199 | |||
200 | ret = __down_trylock(sem); | ||
201 | |||
202 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
203 | printk("%s(%d): down_trylock %s from %p\n", | ||
204 | current->comm, current->pid, | ||
205 | ret ? "failed" : "acquired", | ||
206 | __builtin_return_address(0)); | ||
207 | #endif | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | void | ||
213 | up(struct semaphore *sem) | ||
214 | { | ||
215 | #ifdef WAITQUEUE_DEBUG | ||
216 | CHECK_MAGIC(sem->__magic); | ||
217 | #endif | ||
218 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
219 | printk("%s(%d): up(%p) <count=%d> from %p\n", | ||
220 | current->comm, current->pid, sem, | ||
221 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
222 | #endif | ||
223 | __up(sem); | ||
224 | } | ||
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c new file mode 100644 index 000000000000..b4e5f8ff2b25 --- /dev/null +++ b/arch/alpha/kernel/setup.c | |||
@@ -0,0 +1,1486 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/setup.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */ | ||
8 | |||
9 | /* | ||
10 | * Bootup setup stuff. | ||
11 | */ | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/stddef.h> | ||
17 | #include <linux/unistd.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/user.h> | ||
21 | #include <linux/a.out.h> | ||
22 | #include <linux/tty.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/config.h> /* CONFIG_ALPHA_LCA etc */ | ||
25 | #include <linux/mc146818rtc.h> | ||
26 | #include <linux/console.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/string.h> | ||
30 | #include <linux/ioport.h> | ||
31 | #include <linux/bootmem.h> | ||
32 | #include <linux/pci.h> | ||
33 | #include <linux/seq_file.h> | ||
34 | #include <linux/root_dev.h> | ||
35 | #include <linux/initrd.h> | ||
36 | #include <linux/eisa.h> | ||
37 | #ifdef CONFIG_MAGIC_SYSRQ | ||
38 | #include <linux/sysrq.h> | ||
39 | #include <linux/reboot.h> | ||
40 | #endif | ||
41 | #include <linux/notifier.h> | ||
42 | #include <asm/setup.h> | ||
43 | #include <asm/io.h> | ||
44 | |||
45 | extern struct notifier_block *panic_notifier_list; | ||
46 | static int alpha_panic_event(struct notifier_block *, unsigned long, void *); | ||
47 | static struct notifier_block alpha_panic_block = { | ||
48 | alpha_panic_event, | ||
49 | NULL, | ||
50 | INT_MAX /* try to do it first */ | ||
51 | }; | ||
52 | |||
53 | #include <asm/uaccess.h> | ||
54 | #include <asm/pgtable.h> | ||
55 | #include <asm/system.h> | ||
56 | #include <asm/hwrpb.h> | ||
57 | #include <asm/dma.h> | ||
58 | #include <asm/io.h> | ||
59 | #include <asm/mmu_context.h> | ||
60 | #include <asm/console.h> | ||
61 | |||
62 | #include "proto.h" | ||
63 | #include "pci_impl.h" | ||
64 | |||
65 | |||
66 | struct hwrpb_struct *hwrpb; | ||
67 | unsigned long srm_hae; | ||
68 | |||
69 | int alpha_l1i_cacheshape; | ||
70 | int alpha_l1d_cacheshape; | ||
71 | int alpha_l2_cacheshape; | ||
72 | int alpha_l3_cacheshape; | ||
73 | |||
74 | #ifdef CONFIG_VERBOSE_MCHECK | ||
75 | /* 0=minimum, 1=verbose, 2=all */ | ||
76 | /* These can be overridden via the command line, ie "verbose_mcheck=2") */ | ||
77 | unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; | ||
78 | #endif | ||
79 | |||
80 | /* Which processor we booted from. */ | ||
81 | int boot_cpuid; | ||
82 | |||
83 | /* | ||
84 | * Using SRM callbacks for initial console output. This works from | ||
85 | * setup_arch() time through the end of time_init(), as those places | ||
86 | * are under our (Alpha) control. | ||
87 | |||
88 | * "srmcons" specified in the boot command arguments allows us to | ||
89 | * see kernel messages during the period of time before the true | ||
90 | * console device is "registered" during console_init(). | ||
91 | * As of this version (2.5.59), console_init() will call | ||
92 | * disable_early_printk() as the last action before initializing | ||
93 | * the console drivers. That's the last possible time srmcons can be | ||
94 | * unregistered without interfering with console behavior. | ||
95 | * | ||
96 | * By default, OFF; set it with a bootcommand arg of "srmcons" or | ||
97 | * "console=srm". The meaning of these two args is: | ||
98 | * "srmcons" - early callback prints | ||
99 | * "console=srm" - full callback based console, including early prints | ||
100 | */ | ||
101 | int srmcons_output = 0; | ||
102 | |||
103 | /* Enforce a memory size limit; useful for testing. By default, none. */ | ||
104 | unsigned long mem_size_limit = 0; | ||
105 | |||
106 | /* Set AGP GART window size (0 means disabled). */ | ||
107 | unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; | ||
108 | |||
109 | #ifdef CONFIG_ALPHA_GENERIC | ||
110 | struct alpha_machine_vector alpha_mv; | ||
111 | int alpha_using_srm; | ||
112 | #endif | ||
113 | |||
114 | #define N(a) (sizeof(a)/sizeof(a[0])) | ||
115 | |||
116 | static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, | ||
117 | unsigned long); | ||
118 | static struct alpha_machine_vector *get_sysvec_byname(const char *); | ||
119 | static void get_sysnames(unsigned long, unsigned long, unsigned long, | ||
120 | char **, char **); | ||
121 | static void determine_cpu_caches (unsigned int); | ||
122 | |||
123 | static char command_line[COMMAND_LINE_SIZE]; | ||
124 | |||
125 | /* | ||
126 | * The format of "screen_info" is strange, and due to early | ||
127 | * i386-setup code. This is just enough to make the console | ||
128 | * code think we're on a VGA color display. | ||
129 | */ | ||
130 | |||
131 | struct screen_info screen_info = { | ||
132 | .orig_x = 0, | ||
133 | .orig_y = 25, | ||
134 | .orig_video_cols = 80, | ||
135 | .orig_video_lines = 25, | ||
136 | .orig_video_isVGA = 1, | ||
137 | .orig_video_points = 16 | ||
138 | }; | ||
139 | |||
140 | /* | ||
141 | * The direct map I/O window, if any. This should be the same | ||
142 | * for all busses, since it's used by virt_to_bus. | ||
143 | */ | ||
144 | |||
145 | unsigned long __direct_map_base; | ||
146 | unsigned long __direct_map_size; | ||
147 | |||
148 | /* | ||
149 | * Declare all of the machine vectors. | ||
150 | */ | ||
151 | |||
152 | /* GCC 2.7.2 (on alpha at least) is lame. It does not support either | ||
153 | __attribute__((weak)) or #pragma weak. Bypass it and talk directly | ||
154 | to the assembler. */ | ||
155 | |||
156 | #define WEAK(X) \ | ||
157 | extern struct alpha_machine_vector X; \ | ||
158 | asm(".weak "#X) | ||
159 | |||
160 | WEAK(alcor_mv); | ||
161 | WEAK(alphabook1_mv); | ||
162 | WEAK(avanti_mv); | ||
163 | WEAK(cabriolet_mv); | ||
164 | WEAK(clipper_mv); | ||
165 | WEAK(dp264_mv); | ||
166 | WEAK(eb164_mv); | ||
167 | WEAK(eb64p_mv); | ||
168 | WEAK(eb66_mv); | ||
169 | WEAK(eb66p_mv); | ||
170 | WEAK(eiger_mv); | ||
171 | WEAK(jensen_mv); | ||
172 | WEAK(lx164_mv); | ||
173 | WEAK(lynx_mv); | ||
174 | WEAK(marvel_ev7_mv); | ||
175 | WEAK(miata_mv); | ||
176 | WEAK(mikasa_mv); | ||
177 | WEAK(mikasa_primo_mv); | ||
178 | WEAK(monet_mv); | ||
179 | WEAK(nautilus_mv); | ||
180 | WEAK(noname_mv); | ||
181 | WEAK(noritake_mv); | ||
182 | WEAK(noritake_primo_mv); | ||
183 | WEAK(p2k_mv); | ||
184 | WEAK(pc164_mv); | ||
185 | WEAK(privateer_mv); | ||
186 | WEAK(rawhide_mv); | ||
187 | WEAK(ruffian_mv); | ||
188 | WEAK(rx164_mv); | ||
189 | WEAK(sable_mv); | ||
190 | WEAK(sable_gamma_mv); | ||
191 | WEAK(shark_mv); | ||
192 | WEAK(sx164_mv); | ||
193 | WEAK(takara_mv); | ||
194 | WEAK(titan_mv); | ||
195 | WEAK(webbrick_mv); | ||
196 | WEAK(wildfire_mv); | ||
197 | WEAK(xl_mv); | ||
198 | WEAK(xlt_mv); | ||
199 | |||
200 | #undef WEAK | ||
201 | |||
202 | /* | ||
203 | * I/O resources inherited from PeeCees. Except for perhaps the | ||
204 | * turbochannel alphas, everyone has these on some sort of SuperIO chip. | ||
205 | * | ||
206 | * ??? If this becomes less standard, move the struct out into the | ||
207 | * machine vector. | ||
208 | */ | ||
209 | |||
210 | static void __init | ||
211 | reserve_std_resources(void) | ||
212 | { | ||
213 | static struct resource standard_io_resources[] = { | ||
214 | { .name = "rtc", .start = -1, .end = -1 }, | ||
215 | { .name = "dma1", .start = 0x00, .end = 0x1f }, | ||
216 | { .name = "pic1", .start = 0x20, .end = 0x3f }, | ||
217 | { .name = "timer", .start = 0x40, .end = 0x5f }, | ||
218 | { .name = "keyboard", .start = 0x60, .end = 0x6f }, | ||
219 | { .name = "dma page reg", .start = 0x80, .end = 0x8f }, | ||
220 | { .name = "pic2", .start = 0xa0, .end = 0xbf }, | ||
221 | { .name = "dma2", .start = 0xc0, .end = 0xdf }, | ||
222 | }; | ||
223 | |||
224 | struct resource *io = &ioport_resource; | ||
225 | size_t i; | ||
226 | |||
227 | if (hose_head) { | ||
228 | struct pci_controller *hose; | ||
229 | for (hose = hose_head; hose; hose = hose->next) | ||
230 | if (hose->index == 0) { | ||
231 | io = hose->io_space; | ||
232 | break; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | /* Fix up for the Jensen's queer RTC placement. */ | ||
237 | standard_io_resources[0].start = RTC_PORT(0); | ||
238 | standard_io_resources[0].end = RTC_PORT(0) + 0x10; | ||
239 | |||
240 | for (i = 0; i < N(standard_io_resources); ++i) | ||
241 | request_resource(io, standard_io_resources+i); | ||
242 | } | ||
243 | |||
244 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) | ||
245 | #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) | ||
246 | #define PFN_PHYS(x) ((x) << PAGE_SHIFT) | ||
247 | #define PFN_MAX PFN_DOWN(0x80000000) | ||
248 | #define for_each_mem_cluster(memdesc, cluster, i) \ | ||
249 | for ((cluster) = (memdesc)->cluster, (i) = 0; \ | ||
250 | (i) < (memdesc)->numclusters; (i)++, (cluster)++) | ||
251 | |||
252 | static unsigned long __init | ||
253 | get_mem_size_limit(char *s) | ||
254 | { | ||
255 | unsigned long end = 0; | ||
256 | char *from = s; | ||
257 | |||
258 | end = simple_strtoul(from, &from, 0); | ||
259 | if ( *from == 'K' || *from == 'k' ) { | ||
260 | end = end << 10; | ||
261 | from++; | ||
262 | } else if ( *from == 'M' || *from == 'm' ) { | ||
263 | end = end << 20; | ||
264 | from++; | ||
265 | } else if ( *from == 'G' || *from == 'g' ) { | ||
266 | end = end << 30; | ||
267 | from++; | ||
268 | } | ||
269 | return end >> PAGE_SHIFT; /* Return the PFN of the limit. */ | ||
270 | } | ||
271 | |||
272 | #ifdef CONFIG_BLK_DEV_INITRD | ||
273 | void * __init | ||
274 | move_initrd(unsigned long mem_limit) | ||
275 | { | ||
276 | void *start; | ||
277 | unsigned long size; | ||
278 | |||
279 | size = initrd_end - initrd_start; | ||
280 | start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0); | ||
281 | if (!start || __pa(start) + size > mem_limit) { | ||
282 | initrd_start = initrd_end = 0; | ||
283 | return NULL; | ||
284 | } | ||
285 | memmove(start, (void *)initrd_start, size); | ||
286 | initrd_start = (unsigned long)start; | ||
287 | initrd_end = initrd_start + size; | ||
288 | printk("initrd moved to %p\n", start); | ||
289 | return start; | ||
290 | } | ||
291 | #endif | ||
292 | |||
293 | #ifndef CONFIG_DISCONTIGMEM | ||
294 | static void __init | ||
295 | setup_memory(void *kernel_end) | ||
296 | { | ||
297 | struct memclust_struct * cluster; | ||
298 | struct memdesc_struct * memdesc; | ||
299 | unsigned long start_kernel_pfn, end_kernel_pfn; | ||
300 | unsigned long bootmap_size, bootmap_pages, bootmap_start; | ||
301 | unsigned long start, end; | ||
302 | unsigned long i; | ||
303 | |||
304 | /* Find free clusters, and init and free the bootmem accordingly. */ | ||
305 | memdesc = (struct memdesc_struct *) | ||
306 | (hwrpb->mddt_offset + (unsigned long) hwrpb); | ||
307 | |||
308 | for_each_mem_cluster(memdesc, cluster, i) { | ||
309 | printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", | ||
310 | i, cluster->usage, cluster->start_pfn, | ||
311 | cluster->start_pfn + cluster->numpages); | ||
312 | |||
313 | /* Bit 0 is console/PALcode reserved. Bit 1 is | ||
314 | non-volatile memory -- we might want to mark | ||
315 | this for later. */ | ||
316 | if (cluster->usage & 3) | ||
317 | continue; | ||
318 | |||
319 | end = cluster->start_pfn + cluster->numpages; | ||
320 | if (end > max_low_pfn) | ||
321 | max_low_pfn = end; | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * Except for the NUMA systems (wildfire, marvel) all of the | ||
326 | * Alpha systems we run on support 32GB of memory or less. | ||
327 | * Since the NUMA systems introduce large holes in memory addressing, | ||
328 | * we can get into a situation where there is not enough contiguous | ||
329 | * memory for the memory map. | ||
330 | * | ||
331 | * Limit memory to the first 32GB to limit the NUMA systems to | ||
332 | * memory on their first node (wildfire) or 2 (marvel) to avoid | ||
333 | * not being able to produce the memory map. In order to access | ||
334 | * all of the memory on the NUMA systems, build with discontiguous | ||
335 | * memory support. | ||
336 | * | ||
337 | * If the user specified a memory limit, let that memory limit stand. | ||
338 | */ | ||
339 | if (!mem_size_limit) | ||
340 | mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT; | ||
341 | |||
342 | if (mem_size_limit && max_low_pfn >= mem_size_limit) | ||
343 | { | ||
344 | printk("setup: forcing memory size to %ldK (from %ldK).\n", | ||
345 | mem_size_limit << (PAGE_SHIFT - 10), | ||
346 | max_low_pfn << (PAGE_SHIFT - 10)); | ||
347 | max_low_pfn = mem_size_limit; | ||
348 | } | ||
349 | |||
350 | /* Find the bounds of kernel memory. */ | ||
351 | start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); | ||
352 | end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); | ||
353 | bootmap_start = -1; | ||
354 | |||
355 | try_again: | ||
356 | if (max_low_pfn <= end_kernel_pfn) | ||
357 | panic("not enough memory to boot"); | ||
358 | |||
359 | /* We need to know how many physically contiguous pages | ||
360 | we'll need for the bootmap. */ | ||
361 | bootmap_pages = bootmem_bootmap_pages(max_low_pfn); | ||
362 | |||
363 | /* Now find a good region where to allocate the bootmap. */ | ||
364 | for_each_mem_cluster(memdesc, cluster, i) { | ||
365 | if (cluster->usage & 3) | ||
366 | continue; | ||
367 | |||
368 | start = cluster->start_pfn; | ||
369 | end = start + cluster->numpages; | ||
370 | if (start >= max_low_pfn) | ||
371 | continue; | ||
372 | if (end > max_low_pfn) | ||
373 | end = max_low_pfn; | ||
374 | if (start < start_kernel_pfn) { | ||
375 | if (end > end_kernel_pfn | ||
376 | && end - end_kernel_pfn >= bootmap_pages) { | ||
377 | bootmap_start = end_kernel_pfn; | ||
378 | break; | ||
379 | } else if (end > start_kernel_pfn) | ||
380 | end = start_kernel_pfn; | ||
381 | } else if (start < end_kernel_pfn) | ||
382 | start = end_kernel_pfn; | ||
383 | if (end - start >= bootmap_pages) { | ||
384 | bootmap_start = start; | ||
385 | break; | ||
386 | } | ||
387 | } | ||
388 | |||
389 | if (bootmap_start == ~0UL) { | ||
390 | max_low_pfn >>= 1; | ||
391 | goto try_again; | ||
392 | } | ||
393 | |||
394 | /* Allocate the bootmap and mark the whole MM as reserved. */ | ||
395 | bootmap_size = init_bootmem(bootmap_start, max_low_pfn); | ||
396 | |||
397 | /* Mark the free regions. */ | ||
398 | for_each_mem_cluster(memdesc, cluster, i) { | ||
399 | if (cluster->usage & 3) | ||
400 | continue; | ||
401 | |||
402 | start = cluster->start_pfn; | ||
403 | end = cluster->start_pfn + cluster->numpages; | ||
404 | if (start >= max_low_pfn) | ||
405 | continue; | ||
406 | if (end > max_low_pfn) | ||
407 | end = max_low_pfn; | ||
408 | if (start < start_kernel_pfn) { | ||
409 | if (end > end_kernel_pfn) { | ||
410 | free_bootmem(PFN_PHYS(start), | ||
411 | (PFN_PHYS(start_kernel_pfn) | ||
412 | - PFN_PHYS(start))); | ||
413 | printk("freeing pages %ld:%ld\n", | ||
414 | start, start_kernel_pfn); | ||
415 | start = end_kernel_pfn; | ||
416 | } else if (end > start_kernel_pfn) | ||
417 | end = start_kernel_pfn; | ||
418 | } else if (start < end_kernel_pfn) | ||
419 | start = end_kernel_pfn; | ||
420 | if (start >= end) | ||
421 | continue; | ||
422 | |||
423 | free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); | ||
424 | printk("freeing pages %ld:%ld\n", start, end); | ||
425 | } | ||
426 | |||
427 | /* Reserve the bootmap memory. */ | ||
428 | reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size); | ||
429 | printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); | ||
430 | |||
431 | #ifdef CONFIG_BLK_DEV_INITRD | ||
432 | initrd_start = INITRD_START; | ||
433 | if (initrd_start) { | ||
434 | initrd_end = initrd_start+INITRD_SIZE; | ||
435 | printk("Initial ramdisk at: 0x%p (%lu bytes)\n", | ||
436 | (void *) initrd_start, INITRD_SIZE); | ||
437 | |||
438 | if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { | ||
439 | if (!move_initrd(PFN_PHYS(max_low_pfn))) | ||
440 | printk("initrd extends beyond end of memory " | ||
441 | "(0x%08lx > 0x%p)\ndisabling initrd\n", | ||
442 | initrd_end, | ||
443 | phys_to_virt(PFN_PHYS(max_low_pfn))); | ||
444 | } else { | ||
445 | reserve_bootmem(virt_to_phys((void *)initrd_start), | ||
446 | INITRD_SIZE); | ||
447 | } | ||
448 | } | ||
449 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
450 | } | ||
451 | #else | ||
452 | extern void setup_memory(void *); | ||
453 | #endif /* !CONFIG_DISCONTIGMEM */ | ||
454 | |||
455 | int __init | ||
456 | page_is_ram(unsigned long pfn) | ||
457 | { | ||
458 | struct memclust_struct * cluster; | ||
459 | struct memdesc_struct * memdesc; | ||
460 | unsigned long i; | ||
461 | |||
462 | memdesc = (struct memdesc_struct *) | ||
463 | (hwrpb->mddt_offset + (unsigned long) hwrpb); | ||
464 | for_each_mem_cluster(memdesc, cluster, i) | ||
465 | { | ||
466 | if (pfn >= cluster->start_pfn && | ||
467 | pfn < cluster->start_pfn + cluster->numpages) { | ||
468 | return (cluster->usage & 3) ? 0 : 1; | ||
469 | } | ||
470 | } | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | #undef PFN_UP | ||
476 | #undef PFN_DOWN | ||
477 | #undef PFN_PHYS | ||
478 | #undef PFN_MAX | ||
479 | |||
480 | void __init | ||
481 | setup_arch(char **cmdline_p) | ||
482 | { | ||
483 | extern char _end[]; | ||
484 | |||
485 | struct alpha_machine_vector *vec = NULL; | ||
486 | struct percpu_struct *cpu; | ||
487 | char *type_name, *var_name, *p; | ||
488 | void *kernel_end = _end; /* end of kernel */ | ||
489 | char *args = command_line; | ||
490 | |||
491 | hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr); | ||
492 | boot_cpuid = hard_smp_processor_id(); | ||
493 | |||
494 | /* | ||
495 | * Pre-process the system type to make sure it will be valid. | ||
496 | * | ||
497 | * This may restore real CABRIO and EB66+ family names, ie | ||
498 | * EB64+ and EB66. | ||
499 | * | ||
500 | * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series) | ||
501 | * and AS1200 (DIGITAL Server 5000 series) have the type as | ||
502 | * the negative of the real one. | ||
503 | */ | ||
504 | if ((long)hwrpb->sys_type < 0) { | ||
505 | hwrpb->sys_type = -((long)hwrpb->sys_type); | ||
506 | hwrpb_update_checksum(hwrpb); | ||
507 | } | ||
508 | |||
509 | /* Register a call for panic conditions. */ | ||
510 | notifier_chain_register(&panic_notifier_list, &alpha_panic_block); | ||
511 | |||
512 | #ifdef CONFIG_ALPHA_GENERIC | ||
513 | /* Assume that we've booted from SRM if we haven't booted from MILO. | ||
514 | Detect the later by looking for "MILO" in the system serial nr. */ | ||
515 | alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0; | ||
516 | #endif | ||
517 | |||
518 | /* If we are using SRM, we want to allow callbacks | ||
519 | as early as possible, so do this NOW, and then | ||
520 | they should work immediately thereafter. | ||
521 | */ | ||
522 | kernel_end = callback_init(kernel_end); | ||
523 | |||
524 | /* | ||
525 | * Locate the command line. | ||
526 | */ | ||
527 | /* Hack for Jensen... since we're restricted to 8 or 16 chars for | ||
528 | boot flags depending on the boot mode, we need some shorthand. | ||
529 | This should do for installation. */ | ||
530 | if (strcmp(COMMAND_LINE, "INSTALL") == 0) { | ||
531 | strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line); | ||
532 | } else { | ||
533 | strlcpy(command_line, COMMAND_LINE, sizeof command_line); | ||
534 | } | ||
535 | strcpy(saved_command_line, command_line); | ||
536 | *cmdline_p = command_line; | ||
537 | |||
538 | /* | ||
539 | * Process command-line arguments. | ||
540 | */ | ||
541 | while ((p = strsep(&args, " \t")) != NULL) { | ||
542 | if (!*p) continue; | ||
543 | if (strncmp(p, "alpha_mv=", 9) == 0) { | ||
544 | vec = get_sysvec_byname(p+9); | ||
545 | continue; | ||
546 | } | ||
547 | if (strncmp(p, "cycle=", 6) == 0) { | ||
548 | est_cycle_freq = simple_strtol(p+6, NULL, 0); | ||
549 | continue; | ||
550 | } | ||
551 | if (strncmp(p, "mem=", 4) == 0) { | ||
552 | mem_size_limit = get_mem_size_limit(p+4); | ||
553 | continue; | ||
554 | } | ||
555 | if (strncmp(p, "srmcons", 7) == 0) { | ||
556 | srmcons_output |= 1; | ||
557 | continue; | ||
558 | } | ||
559 | if (strncmp(p, "console=srm", 11) == 0) { | ||
560 | srmcons_output |= 2; | ||
561 | continue; | ||
562 | } | ||
563 | if (strncmp(p, "gartsize=", 9) == 0) { | ||
564 | alpha_agpgart_size = | ||
565 | get_mem_size_limit(p+9) << PAGE_SHIFT; | ||
566 | continue; | ||
567 | } | ||
568 | #ifdef CONFIG_VERBOSE_MCHECK | ||
569 | if (strncmp(p, "verbose_mcheck=", 15) == 0) { | ||
570 | alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0); | ||
571 | continue; | ||
572 | } | ||
573 | #endif | ||
574 | } | ||
575 | |||
576 | /* Replace the command line, now that we've killed it with strsep. */ | ||
577 | strcpy(command_line, saved_command_line); | ||
578 | |||
579 | /* If we want SRM console printk echoing early, do it now. */ | ||
580 | if (alpha_using_srm && srmcons_output) { | ||
581 | register_srm_console(); | ||
582 | |||
583 | /* | ||
584 | * If "console=srm" was specified, clear the srmcons_output | ||
585 | * flag now so that time.c won't unregister_srm_console | ||
586 | */ | ||
587 | if (srmcons_output & 2) | ||
588 | srmcons_output = 0; | ||
589 | } | ||
590 | |||
591 | #ifdef CONFIG_MAGIC_SYSRQ | ||
592 | /* If we're using SRM, make sysrq-b halt back to the prom, | ||
593 | not auto-reboot. */ | ||
594 | if (alpha_using_srm) { | ||
595 | struct sysrq_key_op *op = __sysrq_get_key_op('b'); | ||
596 | op->handler = (void *) machine_halt; | ||
597 | } | ||
598 | #endif | ||
599 | |||
600 | /* | ||
601 | * Identify and reconfigure for the current system. | ||
602 | */ | ||
603 | cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); | ||
604 | |||
605 | get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, | ||
606 | cpu->type, &type_name, &var_name); | ||
607 | if (*var_name == '0') | ||
608 | var_name = ""; | ||
609 | |||
610 | if (!vec) { | ||
611 | vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation, | ||
612 | cpu->type); | ||
613 | } | ||
614 | |||
615 | if (!vec) { | ||
616 | panic("Unsupported system type: %s%s%s (%ld %ld)\n", | ||
617 | type_name, (*var_name ? " variation " : ""), var_name, | ||
618 | hwrpb->sys_type, hwrpb->sys_variation); | ||
619 | } | ||
620 | if (vec != &alpha_mv) { | ||
621 | alpha_mv = *vec; | ||
622 | } | ||
623 | |||
624 | printk("Booting " | ||
625 | #ifdef CONFIG_ALPHA_GENERIC | ||
626 | "GENERIC " | ||
627 | #endif | ||
628 | "on %s%s%s using machine vector %s from %s\n", | ||
629 | type_name, (*var_name ? " variation " : ""), | ||
630 | var_name, alpha_mv.vector_name, | ||
631 | (alpha_using_srm ? "SRM" : "MILO")); | ||
632 | |||
633 | printk("Major Options: " | ||
634 | #ifdef CONFIG_SMP | ||
635 | "SMP " | ||
636 | #endif | ||
637 | #ifdef CONFIG_ALPHA_EV56 | ||
638 | "EV56 " | ||
639 | #endif | ||
640 | #ifdef CONFIG_ALPHA_EV67 | ||
641 | "EV67 " | ||
642 | #endif | ||
643 | #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS | ||
644 | "LEGACY_START " | ||
645 | #endif | ||
646 | #ifdef CONFIG_VERBOSE_MCHECK | ||
647 | "VERBOSE_MCHECK " | ||
648 | #endif | ||
649 | |||
650 | #ifdef CONFIG_DISCONTIGMEM | ||
651 | "DISCONTIGMEM " | ||
652 | #ifdef CONFIG_NUMA | ||
653 | "NUMA " | ||
654 | #endif | ||
655 | #endif | ||
656 | |||
657 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
658 | "DEBUG_SPINLOCK " | ||
659 | #endif | ||
660 | #ifdef CONFIG_MAGIC_SYSRQ | ||
661 | "MAGIC_SYSRQ " | ||
662 | #endif | ||
663 | "\n"); | ||
664 | |||
665 | printk("Command line: %s\n", command_line); | ||
666 | |||
667 | /* | ||
668 | * Sync up the HAE. | ||
669 | * Save the SRM's current value for restoration. | ||
670 | */ | ||
671 | srm_hae = *alpha_mv.hae_register; | ||
672 | __set_hae(alpha_mv.hae_cache); | ||
673 | |||
674 | /* Reset enable correctable error reports. */ | ||
675 | wrmces(0x7); | ||
676 | |||
677 | /* Find our memory. */ | ||
678 | setup_memory(kernel_end); | ||
679 | |||
680 | /* First guess at cpu cache sizes. Do this before init_arch. */ | ||
681 | determine_cpu_caches(cpu->type); | ||
682 | |||
683 | /* Initialize the machine. Usually has to do with setting up | ||
684 | DMA windows and the like. */ | ||
685 | if (alpha_mv.init_arch) | ||
686 | alpha_mv.init_arch(); | ||
687 | |||
688 | /* Reserve standard resources. */ | ||
689 | reserve_std_resources(); | ||
690 | |||
691 | /* | ||
692 | * Give us a default console. TGA users will see nothing until | ||
693 | * chr_dev_init is called, rather late in the boot sequence. | ||
694 | */ | ||
695 | |||
696 | #ifdef CONFIG_VT | ||
697 | #if defined(CONFIG_VGA_CONSOLE) | ||
698 | conswitchp = &vga_con; | ||
699 | #elif defined(CONFIG_DUMMY_CONSOLE) | ||
700 | conswitchp = &dummy_con; | ||
701 | #endif | ||
702 | #endif | ||
703 | |||
704 | /* Default root filesystem to sda2. */ | ||
705 | ROOT_DEV = Root_SDA2; | ||
706 | |||
707 | #ifdef CONFIG_EISA | ||
708 | /* FIXME: only set this when we actually have EISA in this box? */ | ||
709 | EISA_bus = 1; | ||
710 | #endif | ||
711 | |||
712 | /* | ||
713 | * Check ASN in HWRPB for validity, report if bad. | ||
714 | * FIXME: how was this failing? Should we trust it instead, | ||
715 | * and copy the value into alpha_mv.max_asn? | ||
716 | */ | ||
717 | |||
718 | if (hwrpb->max_asn != MAX_ASN) { | ||
719 | printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn); | ||
720 | } | ||
721 | |||
722 | /* | ||
723 | * Identify the flock of penguins. | ||
724 | */ | ||
725 | |||
726 | #ifdef CONFIG_SMP | ||
727 | setup_smp(); | ||
728 | #endif | ||
729 | paging_init(); | ||
730 | } | ||
731 | |||
732 | void __init | ||
733 | disable_early_printk(void) | ||
734 | { | ||
735 | if (alpha_using_srm && srmcons_output) { | ||
736 | unregister_srm_console(); | ||
737 | srmcons_output = 0; | ||
738 | } | ||
739 | } | ||
740 | |||
741 | static char sys_unknown[] = "Unknown"; | ||
742 | static char systype_names[][16] = { | ||
743 | "0", | ||
744 | "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen", | ||
745 | "Pelican", "Morgan", "Sable", "Medulla", "Noname", | ||
746 | "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind", | ||
747 | "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1", | ||
748 | "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake", | ||
749 | "Cortex", "29", "Miata", "XXM", "Takara", "Yukon", | ||
750 | "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel" | ||
751 | }; | ||
752 | |||
753 | static char unofficial_names[][8] = {"100", "Ruffian"}; | ||
754 | |||
755 | static char api_names[][16] = {"200", "Nautilus"}; | ||
756 | |||
757 | static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"}; | ||
758 | static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4}; | ||
759 | |||
760 | static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"}; | ||
761 | static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2}; | ||
762 | |||
763 | static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"}; | ||
764 | static int eb64p_indices[] = {0,0,1,2}; | ||
765 | |||
766 | static char eb66_names[][8] = {"EB66", "EB66+"}; | ||
767 | static int eb66_indices[] = {0,0,1}; | ||
768 | |||
769 | static char marvel_names[][16] = { | ||
770 | "Marvel/EV7" | ||
771 | }; | ||
772 | static int marvel_indices[] = { 0 }; | ||
773 | |||
774 | static char rawhide_names[][16] = { | ||
775 | "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci" | ||
776 | }; | ||
777 | static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4}; | ||
778 | |||
779 | static char titan_names[][16] = { | ||
780 | "DEFAULT", "Privateer", "Falcon", "Granite" | ||
781 | }; | ||
782 | static int titan_indices[] = {0,1,2,2,3}; | ||
783 | |||
784 | static char tsunami_names[][16] = { | ||
785 | "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper", | ||
786 | "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne", | ||
787 | "Flying Clipper", "Shark" | ||
788 | }; | ||
789 | static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12}; | ||
790 | |||
791 | static struct alpha_machine_vector * __init | ||
792 | get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu) | ||
793 | { | ||
794 | static struct alpha_machine_vector *systype_vecs[] __initdata = | ||
795 | { | ||
796 | NULL, /* 0 */ | ||
797 | NULL, /* ADU */ | ||
798 | NULL, /* Cobra */ | ||
799 | NULL, /* Ruby */ | ||
800 | NULL, /* Flamingo */ | ||
801 | NULL, /* Mannequin */ | ||
802 | &jensen_mv, | ||
803 | NULL, /* Pelican */ | ||
804 | NULL, /* Morgan */ | ||
805 | NULL, /* Sable -- see below. */ | ||
806 | NULL, /* Medulla */ | ||
807 | &noname_mv, | ||
808 | NULL, /* Turbolaser */ | ||
809 | &avanti_mv, | ||
810 | NULL, /* Mustang */ | ||
811 | NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */ | ||
812 | NULL, /* Tradewind */ | ||
813 | NULL, /* Mikasa -- see below. */ | ||
814 | NULL, /* EB64 */ | ||
815 | NULL, /* EB66 -- see variation. */ | ||
816 | NULL, /* EB64+ -- see variation. */ | ||
817 | &alphabook1_mv, | ||
818 | &rawhide_mv, | ||
819 | NULL, /* K2 */ | ||
820 | &lynx_mv, /* Lynx */ | ||
821 | &xl_mv, | ||
822 | NULL, /* EB164 -- see variation. */ | ||
823 | NULL, /* Noritake -- see below. */ | ||
824 | NULL, /* Cortex */ | ||
825 | NULL, /* 29 */ | ||
826 | &miata_mv, | ||
827 | NULL, /* XXM */ | ||
828 | &takara_mv, | ||
829 | NULL, /* Yukon */ | ||
830 | NULL, /* Tsunami -- see variation. */ | ||
831 | &wildfire_mv, /* Wildfire */ | ||
832 | NULL, /* CUSCO */ | ||
833 | &eiger_mv, /* Eiger */ | ||
834 | NULL, /* Titan */ | ||
835 | NULL, /* Marvel */ | ||
836 | }; | ||
837 | |||
838 | static struct alpha_machine_vector *unofficial_vecs[] __initdata = | ||
839 | { | ||
840 | NULL, /* 100 */ | ||
841 | &ruffian_mv, | ||
842 | }; | ||
843 | |||
844 | static struct alpha_machine_vector *api_vecs[] __initdata = | ||
845 | { | ||
846 | NULL, /* 200 */ | ||
847 | &nautilus_mv, | ||
848 | }; | ||
849 | |||
850 | static struct alpha_machine_vector *alcor_vecs[] __initdata = | ||
851 | { | ||
852 | &alcor_mv, &xlt_mv, &xlt_mv | ||
853 | }; | ||
854 | |||
855 | static struct alpha_machine_vector *eb164_vecs[] __initdata = | ||
856 | { | ||
857 | &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv | ||
858 | }; | ||
859 | |||
860 | static struct alpha_machine_vector *eb64p_vecs[] __initdata = | ||
861 | { | ||
862 | &eb64p_mv, | ||
863 | &cabriolet_mv, | ||
864 | &cabriolet_mv /* AlphaPCI64 */ | ||
865 | }; | ||
866 | |||
867 | static struct alpha_machine_vector *eb66_vecs[] __initdata = | ||
868 | { | ||
869 | &eb66_mv, | ||
870 | &eb66p_mv | ||
871 | }; | ||
872 | |||
873 | static struct alpha_machine_vector *marvel_vecs[] __initdata = | ||
874 | { | ||
875 | &marvel_ev7_mv, | ||
876 | }; | ||
877 | |||
878 | static struct alpha_machine_vector *titan_vecs[] __initdata = | ||
879 | { | ||
880 | &titan_mv, /* default */ | ||
881 | &privateer_mv, /* privateer */ | ||
882 | &titan_mv, /* falcon */ | ||
883 | &privateer_mv, /* granite */ | ||
884 | }; | ||
885 | |||
886 | static struct alpha_machine_vector *tsunami_vecs[] __initdata = | ||
887 | { | ||
888 | NULL, | ||
889 | &dp264_mv, /* dp264 */ | ||
890 | &dp264_mv, /* warhol */ | ||
891 | &dp264_mv, /* windjammer */ | ||
892 | &monet_mv, /* monet */ | ||
893 | &clipper_mv, /* clipper */ | ||
894 | &dp264_mv, /* goldrush */ | ||
895 | &webbrick_mv, /* webbrick */ | ||
896 | &dp264_mv, /* catamaran */ | ||
897 | NULL, /* brisbane? */ | ||
898 | NULL, /* melbourne? */ | ||
899 | NULL, /* flying clipper? */ | ||
900 | &shark_mv, /* shark */ | ||
901 | }; | ||
902 | |||
903 | /* ??? Do we need to distinguish between Rawhides? */ | ||
904 | |||
905 | struct alpha_machine_vector *vec; | ||
906 | |||
907 | /* Search the system tables first... */ | ||
908 | vec = NULL; | ||
909 | if (type < N(systype_vecs)) { | ||
910 | vec = systype_vecs[type]; | ||
911 | } else if ((type > ST_API_BIAS) && | ||
912 | (type - ST_API_BIAS) < N(api_vecs)) { | ||
913 | vec = api_vecs[type - ST_API_BIAS]; | ||
914 | } else if ((type > ST_UNOFFICIAL_BIAS) && | ||
915 | (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) { | ||
916 | vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS]; | ||
917 | } | ||
918 | |||
919 | /* If we've not found one, try for a variation. */ | ||
920 | |||
921 | if (!vec) { | ||
922 | /* Member ID is a bit-field. */ | ||
923 | unsigned long member = (variation >> 10) & 0x3f; | ||
924 | |||
925 | cpu &= 0xffffffff; /* make it usable */ | ||
926 | |||
927 | switch (type) { | ||
928 | case ST_DEC_ALCOR: | ||
929 | if (member < N(alcor_indices)) | ||
930 | vec = alcor_vecs[alcor_indices[member]]; | ||
931 | break; | ||
932 | case ST_DEC_EB164: | ||
933 | if (member < N(eb164_indices)) | ||
934 | vec = eb164_vecs[eb164_indices[member]]; | ||
935 | /* PC164 may show as EB164 variation with EV56 CPU, | ||
936 | but, since no true EB164 had anything but EV5... */ | ||
937 | if (vec == &eb164_mv && cpu == EV56_CPU) | ||
938 | vec = &pc164_mv; | ||
939 | break; | ||
940 | case ST_DEC_EB64P: | ||
941 | if (member < N(eb64p_indices)) | ||
942 | vec = eb64p_vecs[eb64p_indices[member]]; | ||
943 | break; | ||
944 | case ST_DEC_EB66: | ||
945 | if (member < N(eb66_indices)) | ||
946 | vec = eb66_vecs[eb66_indices[member]]; | ||
947 | break; | ||
948 | case ST_DEC_MARVEL: | ||
949 | if (member < N(marvel_indices)) | ||
950 | vec = marvel_vecs[marvel_indices[member]]; | ||
951 | break; | ||
952 | case ST_DEC_TITAN: | ||
953 | vec = titan_vecs[0]; /* default */ | ||
954 | if (member < N(titan_indices)) | ||
955 | vec = titan_vecs[titan_indices[member]]; | ||
956 | break; | ||
957 | case ST_DEC_TSUNAMI: | ||
958 | if (member < N(tsunami_indices)) | ||
959 | vec = tsunami_vecs[tsunami_indices[member]]; | ||
960 | break; | ||
961 | case ST_DEC_1000: | ||
962 | if (cpu == EV5_CPU || cpu == EV56_CPU) | ||
963 | vec = &mikasa_primo_mv; | ||
964 | else | ||
965 | vec = &mikasa_mv; | ||
966 | break; | ||
967 | case ST_DEC_NORITAKE: | ||
968 | if (cpu == EV5_CPU || cpu == EV56_CPU) | ||
969 | vec = &noritake_primo_mv; | ||
970 | else | ||
971 | vec = &noritake_mv; | ||
972 | break; | ||
973 | case ST_DEC_2100_A500: | ||
974 | if (cpu == EV5_CPU || cpu == EV56_CPU) | ||
975 | vec = &sable_gamma_mv; | ||
976 | else | ||
977 | vec = &sable_mv; | ||
978 | break; | ||
979 | } | ||
980 | } | ||
981 | return vec; | ||
982 | } | ||
983 | |||
984 | static struct alpha_machine_vector * __init | ||
985 | get_sysvec_byname(const char *name) | ||
986 | { | ||
987 | static struct alpha_machine_vector *all_vecs[] __initdata = | ||
988 | { | ||
989 | &alcor_mv, | ||
990 | &alphabook1_mv, | ||
991 | &avanti_mv, | ||
992 | &cabriolet_mv, | ||
993 | &clipper_mv, | ||
994 | &dp264_mv, | ||
995 | &eb164_mv, | ||
996 | &eb64p_mv, | ||
997 | &eb66_mv, | ||
998 | &eb66p_mv, | ||
999 | &eiger_mv, | ||
1000 | &jensen_mv, | ||
1001 | &lx164_mv, | ||
1002 | &lynx_mv, | ||
1003 | &miata_mv, | ||
1004 | &mikasa_mv, | ||
1005 | &mikasa_primo_mv, | ||
1006 | &monet_mv, | ||
1007 | &nautilus_mv, | ||
1008 | &noname_mv, | ||
1009 | &noritake_mv, | ||
1010 | &noritake_primo_mv, | ||
1011 | &p2k_mv, | ||
1012 | &pc164_mv, | ||
1013 | &privateer_mv, | ||
1014 | &rawhide_mv, | ||
1015 | &ruffian_mv, | ||
1016 | &rx164_mv, | ||
1017 | &sable_mv, | ||
1018 | &sable_gamma_mv, | ||
1019 | &shark_mv, | ||
1020 | &sx164_mv, | ||
1021 | &takara_mv, | ||
1022 | &webbrick_mv, | ||
1023 | &wildfire_mv, | ||
1024 | &xl_mv, | ||
1025 | &xlt_mv | ||
1026 | }; | ||
1027 | |||
1028 | size_t i; | ||
1029 | |||
1030 | for (i = 0; i < N(all_vecs); ++i) { | ||
1031 | struct alpha_machine_vector *mv = all_vecs[i]; | ||
1032 | if (strcasecmp(mv->vector_name, name) == 0) | ||
1033 | return mv; | ||
1034 | } | ||
1035 | return NULL; | ||
1036 | } | ||
1037 | |||
1038 | static void | ||
1039 | get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu, | ||
1040 | char **type_name, char **variation_name) | ||
1041 | { | ||
1042 | unsigned long member; | ||
1043 | |||
1044 | /* If not in the tables, make it UNKNOWN, | ||
1045 | else set type name to family */ | ||
1046 | if (type < N(systype_names)) { | ||
1047 | *type_name = systype_names[type]; | ||
1048 | } else if ((type > ST_API_BIAS) && | ||
1049 | (type - ST_API_BIAS) < N(api_names)) { | ||
1050 | *type_name = api_names[type - ST_API_BIAS]; | ||
1051 | } else if ((type > ST_UNOFFICIAL_BIAS) && | ||
1052 | (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) { | ||
1053 | *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS]; | ||
1054 | } else { | ||
1055 | *type_name = sys_unknown; | ||
1056 | *variation_name = sys_unknown; | ||
1057 | return; | ||
1058 | } | ||
1059 | |||
1060 | /* Set variation to "0"; if variation is zero, done. */ | ||
1061 | *variation_name = systype_names[0]; | ||
1062 | if (variation == 0) { | ||
1063 | return; | ||
1064 | } | ||
1065 | |||
1066 | member = (variation >> 10) & 0x3f; /* member ID is a bit-field */ | ||
1067 | |||
1068 | cpu &= 0xffffffff; /* make it usable */ | ||
1069 | |||
1070 | switch (type) { /* select by family */ | ||
1071 | default: /* default to variation "0" for now */ | ||
1072 | break; | ||
1073 | case ST_DEC_EB164: | ||
1074 | if (member < N(eb164_indices)) | ||
1075 | *variation_name = eb164_names[eb164_indices[member]]; | ||
1076 | /* PC164 may show as EB164 variation, but with EV56 CPU, | ||
1077 | so, since no true EB164 had anything but EV5... */ | ||
1078 | if (eb164_indices[member] == 0 && cpu == EV56_CPU) | ||
1079 | *variation_name = eb164_names[1]; /* make it PC164 */ | ||
1080 | break; | ||
1081 | case ST_DEC_ALCOR: | ||
1082 | if (member < N(alcor_indices)) | ||
1083 | *variation_name = alcor_names[alcor_indices[member]]; | ||
1084 | break; | ||
1085 | case ST_DEC_EB64P: | ||
1086 | if (member < N(eb64p_indices)) | ||
1087 | *variation_name = eb64p_names[eb64p_indices[member]]; | ||
1088 | break; | ||
1089 | case ST_DEC_EB66: | ||
1090 | if (member < N(eb66_indices)) | ||
1091 | *variation_name = eb66_names[eb66_indices[member]]; | ||
1092 | break; | ||
1093 | case ST_DEC_MARVEL: | ||
1094 | if (member < N(marvel_indices)) | ||
1095 | *variation_name = marvel_names[marvel_indices[member]]; | ||
1096 | break; | ||
1097 | case ST_DEC_RAWHIDE: | ||
1098 | if (member < N(rawhide_indices)) | ||
1099 | *variation_name = rawhide_names[rawhide_indices[member]]; | ||
1100 | break; | ||
1101 | case ST_DEC_TITAN: | ||
1102 | *variation_name = titan_names[0]; /* default */ | ||
1103 | if (member < N(titan_indices)) | ||
1104 | *variation_name = titan_names[titan_indices[member]]; | ||
1105 | break; | ||
1106 | case ST_DEC_TSUNAMI: | ||
1107 | if (member < N(tsunami_indices)) | ||
1108 | *variation_name = tsunami_names[tsunami_indices[member]]; | ||
1109 | break; | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | /* | ||
1114 | * A change was made to the HWRPB via an ECO and the following code | ||
1115 | * tracks a part of the ECO. In HWRPB versions less than 5, the ECO | ||
1116 | * was not implemented in the console firmware. If it's revision 5 or | ||
1117 | * greater we can get the name of the platform as an ASCII string from | ||
1118 | * the HWRPB. That's what this function does. It checks the revision | ||
1119 | * level and if the string is in the HWRPB it returns the address of | ||
1120 | * the string--a pointer to the name of the platform. | ||
1121 | * | ||
1122 | * Returns: | ||
1123 | * - Pointer to a ASCII string if it's in the HWRPB | ||
1124 | * - Pointer to a blank string if the data is not in the HWRPB. | ||
1125 | */ | ||
1126 | |||
1127 | static char * | ||
1128 | platform_string(void) | ||
1129 | { | ||
1130 | struct dsr_struct *dsr; | ||
1131 | static char unk_system_string[] = "N/A"; | ||
1132 | |||
1133 | /* Go to the console for the string pointer. | ||
1134 | * If the rpb_vers is not 5 or greater the rpb | ||
1135 | * is old and does not have this data in it. | ||
1136 | */ | ||
1137 | if (hwrpb->revision < 5) | ||
1138 | return (unk_system_string); | ||
1139 | else { | ||
1140 | /* The Dynamic System Recognition struct | ||
1141 | * has the system platform name starting | ||
1142 | * after the character count of the string. | ||
1143 | */ | ||
1144 | dsr = ((struct dsr_struct *) | ||
1145 | ((char *)hwrpb + hwrpb->dsr_offset)); | ||
1146 | return ((char *)dsr + (dsr->sysname_off + | ||
1147 | sizeof(long))); | ||
1148 | } | ||
1149 | } | ||
1150 | |||
1151 | static int | ||
1152 | get_nr_processors(struct percpu_struct *cpubase, unsigned long num) | ||
1153 | { | ||
1154 | struct percpu_struct *cpu; | ||
1155 | unsigned long i; | ||
1156 | int count = 0; | ||
1157 | |||
1158 | for (i = 0; i < num; i++) { | ||
1159 | cpu = (struct percpu_struct *) | ||
1160 | ((char *)cpubase + i*hwrpb->processor_size); | ||
1161 | if ((cpu->flags & 0x1cc) == 0x1cc) | ||
1162 | count++; | ||
1163 | } | ||
1164 | return count; | ||
1165 | } | ||
1166 | |||
1167 | static void | ||
1168 | show_cache_size (struct seq_file *f, const char *which, int shape) | ||
1169 | { | ||
1170 | if (shape == -1) | ||
1171 | seq_printf (f, "%s\t\t: n/a\n", which); | ||
1172 | else if (shape == 0) | ||
1173 | seq_printf (f, "%s\t\t: unknown\n", which); | ||
1174 | else | ||
1175 | seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n", | ||
1176 | which, shape >> 10, shape & 15, | ||
1177 | 1 << ((shape >> 4) & 15)); | ||
1178 | } | ||
1179 | |||
1180 | static int | ||
1181 | show_cpuinfo(struct seq_file *f, void *slot) | ||
1182 | { | ||
1183 | extern struct unaligned_stat { | ||
1184 | unsigned long count, va, pc; | ||
1185 | } unaligned[2]; | ||
1186 | |||
1187 | static char cpu_names[][8] = { | ||
1188 | "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56", | ||
1189 | "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL", | ||
1190 | "EV68CX", "EV7", "EV79", "EV69" | ||
1191 | }; | ||
1192 | |||
1193 | struct percpu_struct *cpu = slot; | ||
1194 | unsigned int cpu_index; | ||
1195 | char *cpu_name; | ||
1196 | char *systype_name; | ||
1197 | char *sysvariation_name; | ||
1198 | int nr_processors; | ||
1199 | |||
1200 | cpu_index = (unsigned) (cpu->type - 1); | ||
1201 | cpu_name = "Unknown"; | ||
1202 | if (cpu_index < N(cpu_names)) | ||
1203 | cpu_name = cpu_names[cpu_index]; | ||
1204 | |||
1205 | get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, | ||
1206 | cpu->type, &systype_name, &sysvariation_name); | ||
1207 | |||
1208 | nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); | ||
1209 | |||
1210 | seq_printf(f, "cpu\t\t\t: Alpha\n" | ||
1211 | "cpu model\t\t: %s\n" | ||
1212 | "cpu variation\t\t: %ld\n" | ||
1213 | "cpu revision\t\t: %ld\n" | ||
1214 | "cpu serial number\t: %s\n" | ||
1215 | "system type\t\t: %s\n" | ||
1216 | "system variation\t: %s\n" | ||
1217 | "system revision\t\t: %ld\n" | ||
1218 | "system serial number\t: %s\n" | ||
1219 | "cycle frequency [Hz]\t: %lu %s\n" | ||
1220 | "timer frequency [Hz]\t: %lu.%02lu\n" | ||
1221 | "page size [bytes]\t: %ld\n" | ||
1222 | "phys. address bits\t: %ld\n" | ||
1223 | "max. addr. space #\t: %ld\n" | ||
1224 | "BogoMIPS\t\t: %lu.%02lu\n" | ||
1225 | "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n" | ||
1226 | "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n" | ||
1227 | "platform string\t\t: %s\n" | ||
1228 | "cpus detected\t\t: %d\n", | ||
1229 | cpu_name, cpu->variation, cpu->revision, | ||
1230 | (char*)cpu->serial_no, | ||
1231 | systype_name, sysvariation_name, hwrpb->sys_revision, | ||
1232 | (char*)hwrpb->ssn, | ||
1233 | est_cycle_freq ? : hwrpb->cycle_freq, | ||
1234 | est_cycle_freq ? "est." : "", | ||
1235 | hwrpb->intr_freq / 4096, | ||
1236 | (100 * hwrpb->intr_freq / 4096) % 100, | ||
1237 | hwrpb->pagesize, | ||
1238 | hwrpb->pa_bits, | ||
1239 | hwrpb->max_asn, | ||
1240 | loops_per_jiffy / (500000/HZ), | ||
1241 | (loops_per_jiffy / (5000/HZ)) % 100, | ||
1242 | unaligned[0].count, unaligned[0].pc, unaligned[0].va, | ||
1243 | unaligned[1].count, unaligned[1].pc, unaligned[1].va, | ||
1244 | platform_string(), nr_processors); | ||
1245 | |||
1246 | #ifdef CONFIG_SMP | ||
1247 | seq_printf(f, "cpus active\t\t: %d\n" | ||
1248 | "cpu active mask\t\t: %016lx\n", | ||
1249 | num_online_cpus(), cpus_addr(cpu_possible_map)[0]); | ||
1250 | #endif | ||
1251 | |||
1252 | show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); | ||
1253 | show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape); | ||
1254 | show_cache_size (f, "L2 cache", alpha_l2_cacheshape); | ||
1255 | show_cache_size (f, "L3 cache", alpha_l3_cacheshape); | ||
1256 | |||
1257 | return 0; | ||
1258 | } | ||
1259 | |||
1260 | static int __init | ||
1261 | read_mem_block(int *addr, int stride, int size) | ||
1262 | { | ||
1263 | long nloads = size / stride, cnt, tmp; | ||
1264 | |||
1265 | __asm__ __volatile__( | ||
1266 | " rpcc %0\n" | ||
1267 | "1: ldl %3,0(%2)\n" | ||
1268 | " subq %1,1,%1\n" | ||
1269 | /* Next two XORs introduce an explicit data dependency between | ||
1270 | consecutive loads in the loop, which will give us true load | ||
1271 | latency. */ | ||
1272 | " xor %3,%2,%2\n" | ||
1273 | " xor %3,%2,%2\n" | ||
1274 | " addq %2,%4,%2\n" | ||
1275 | " bne %1,1b\n" | ||
1276 | " rpcc %3\n" | ||
1277 | " subl %3,%0,%0\n" | ||
1278 | : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp) | ||
1279 | : "r" (stride), "1" (nloads), "2" (addr)); | ||
1280 | |||
1281 | return cnt / (size / stride); | ||
1282 | } | ||
1283 | |||
1284 | #define CSHAPE(totalsize, linesize, assoc) \ | ||
1285 | ((totalsize & ~0xff) | (linesize << 4) | assoc) | ||
1286 | |||
1287 | /* ??? EV5 supports up to 64M, but did the systems with more than | ||
1288 | 16M of BCACHE ever exist? */ | ||
1289 | #define MAX_BCACHE_SIZE 16*1024*1024 | ||
1290 | |||
1291 | /* Note that the offchip caches are direct mapped on all Alphas. */ | ||
1292 | static int __init | ||
1293 | external_cache_probe(int minsize, int width) | ||
1294 | { | ||
1295 | int cycles, prev_cycles = 1000000; | ||
1296 | int stride = 1 << width; | ||
1297 | long size = minsize, maxsize = MAX_BCACHE_SIZE * 2; | ||
1298 | |||
1299 | if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT) | ||
1300 | maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT); | ||
1301 | |||
1302 | /* Get the first block cached. */ | ||
1303 | read_mem_block(__va(0), stride, size); | ||
1304 | |||
1305 | while (size < maxsize) { | ||
1306 | /* Get an average load latency in cycles. */ | ||
1307 | cycles = read_mem_block(__va(0), stride, size); | ||
1308 | if (cycles > prev_cycles * 2) { | ||
1309 | /* Fine, we exceed the cache. */ | ||
1310 | printk("%ldK Bcache detected; load hit latency %d " | ||
1311 | "cycles, load miss latency %d cycles\n", | ||
1312 | size >> 11, prev_cycles, cycles); | ||
1313 | return CSHAPE(size >> 1, width, 1); | ||
1314 | } | ||
1315 | /* Try to get the next block cached. */ | ||
1316 | read_mem_block(__va(size), stride, size); | ||
1317 | prev_cycles = cycles; | ||
1318 | size <<= 1; | ||
1319 | } | ||
1320 | return -1; /* No BCACHE found. */ | ||
1321 | } | ||
1322 | |||
1323 | static void __init | ||
1324 | determine_cpu_caches (unsigned int cpu_type) | ||
1325 | { | ||
1326 | int L1I, L1D, L2, L3; | ||
1327 | |||
1328 | switch (cpu_type) { | ||
1329 | case EV4_CPU: | ||
1330 | case EV45_CPU: | ||
1331 | { | ||
1332 | if (cpu_type == EV4_CPU) | ||
1333 | L1I = CSHAPE(8*1024, 5, 1); | ||
1334 | else | ||
1335 | L1I = CSHAPE(16*1024, 5, 1); | ||
1336 | L1D = L1I; | ||
1337 | L3 = -1; | ||
1338 | |||
1339 | /* BIU_CTL is a write-only Abox register. PALcode has a | ||
1340 | shadow copy, and may be available from some versions | ||
1341 | of the CSERVE PALcall. If we can get it, then | ||
1342 | |||
1343 | unsigned long biu_ctl, size; | ||
1344 | size = 128*1024 * (1 << ((biu_ctl >> 28) & 7)); | ||
1345 | L2 = CSHAPE (size, 5, 1); | ||
1346 | |||
1347 | Unfortunately, we can't rely on that. | ||
1348 | */ | ||
1349 | L2 = external_cache_probe(128*1024, 5); | ||
1350 | break; | ||
1351 | } | ||
1352 | |||
1353 | case LCA4_CPU: | ||
1354 | { | ||
1355 | unsigned long car, size; | ||
1356 | |||
1357 | L1I = L1D = CSHAPE(8*1024, 5, 1); | ||
1358 | L3 = -1; | ||
1359 | |||
1360 | car = *(vuip) phys_to_virt (0x120000078UL); | ||
1361 | size = 64*1024 * (1 << ((car >> 5) & 7)); | ||
1362 | /* No typo -- 8 byte cacheline size. Whodathunk. */ | ||
1363 | L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1); | ||
1364 | break; | ||
1365 | } | ||
1366 | |||
1367 | case EV5_CPU: | ||
1368 | case EV56_CPU: | ||
1369 | { | ||
1370 | unsigned long sc_ctl, width; | ||
1371 | |||
1372 | L1I = L1D = CSHAPE(8*1024, 5, 1); | ||
1373 | |||
1374 | /* Check the line size of the Scache. */ | ||
1375 | sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL); | ||
1376 | width = sc_ctl & 0x1000 ? 6 : 5; | ||
1377 | L2 = CSHAPE (96*1024, width, 3); | ||
1378 | |||
1379 | /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode | ||
1380 | has a shadow copy, and may be available from some versions | ||
1381 | of the CSERVE PALcall. If we can get it, then | ||
1382 | |||
1383 | unsigned long bc_control, bc_config, size; | ||
1384 | size = 1024*1024 * (1 << ((bc_config & 7) - 1)); | ||
1385 | L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1); | ||
1386 | |||
1387 | Unfortunately, we can't rely on that. | ||
1388 | */ | ||
1389 | L3 = external_cache_probe(1024*1024, width); | ||
1390 | break; | ||
1391 | } | ||
1392 | |||
1393 | case PCA56_CPU: | ||
1394 | case PCA57_CPU: | ||
1395 | { | ||
1396 | unsigned long cbox_config, size; | ||
1397 | |||
1398 | if (cpu_type == PCA56_CPU) { | ||
1399 | L1I = CSHAPE(16*1024, 6, 1); | ||
1400 | L1D = CSHAPE(8*1024, 5, 1); | ||
1401 | } else { | ||
1402 | L1I = CSHAPE(32*1024, 6, 2); | ||
1403 | L1D = CSHAPE(16*1024, 5, 1); | ||
1404 | } | ||
1405 | L3 = -1; | ||
1406 | |||
1407 | cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); | ||
1408 | size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); | ||
1409 | |||
1410 | #if 0 | ||
1411 | L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); | ||
1412 | #else | ||
1413 | L2 = external_cache_probe(512*1024, 6); | ||
1414 | #endif | ||
1415 | break; | ||
1416 | } | ||
1417 | |||
1418 | case EV6_CPU: | ||
1419 | case EV67_CPU: | ||
1420 | case EV68CB_CPU: | ||
1421 | case EV68AL_CPU: | ||
1422 | case EV68CX_CPU: | ||
1423 | case EV69_CPU: | ||
1424 | L1I = L1D = CSHAPE(64*1024, 6, 2); | ||
1425 | L2 = external_cache_probe(1024*1024, 6); | ||
1426 | L3 = -1; | ||
1427 | break; | ||
1428 | |||
1429 | case EV7_CPU: | ||
1430 | case EV79_CPU: | ||
1431 | L1I = L1D = CSHAPE(64*1024, 6, 2); | ||
1432 | L2 = CSHAPE(7*1024*1024/4, 6, 7); | ||
1433 | L3 = -1; | ||
1434 | break; | ||
1435 | |||
1436 | default: | ||
1437 | /* Nothing known about this cpu type. */ | ||
1438 | L1I = L1D = L2 = L3 = 0; | ||
1439 | break; | ||
1440 | } | ||
1441 | |||
1442 | alpha_l1i_cacheshape = L1I; | ||
1443 | alpha_l1d_cacheshape = L1D; | ||
1444 | alpha_l2_cacheshape = L2; | ||
1445 | alpha_l3_cacheshape = L3; | ||
1446 | } | ||
1447 | |||
1448 | /* | ||
1449 | * We show only CPU #0 info. | ||
1450 | */ | ||
1451 | static void * | ||
1452 | c_start(struct seq_file *f, loff_t *pos) | ||
1453 | { | ||
1454 | return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset; | ||
1455 | } | ||
1456 | |||
1457 | static void * | ||
1458 | c_next(struct seq_file *f, void *v, loff_t *pos) | ||
1459 | { | ||
1460 | return NULL; | ||
1461 | } | ||
1462 | |||
1463 | static void | ||
1464 | c_stop(struct seq_file *f, void *v) | ||
1465 | { | ||
1466 | } | ||
1467 | |||
1468 | struct seq_operations cpuinfo_op = { | ||
1469 | .start = c_start, | ||
1470 | .next = c_next, | ||
1471 | .stop = c_stop, | ||
1472 | .show = show_cpuinfo, | ||
1473 | }; | ||
1474 | |||
1475 | |||
1476 | static int | ||
1477 | alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr) | ||
1478 | { | ||
1479 | #if 1 | ||
1480 | /* FIXME FIXME FIXME */ | ||
1481 | /* If we are using SRM and serial console, just hard halt here. */ | ||
1482 | if (alpha_using_srm && srmcons_output) | ||
1483 | __halt(); | ||
1484 | #endif | ||
1485 | return NOTIFY_DONE; | ||
1486 | } | ||
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c new file mode 100644 index 000000000000..08fe8071a7f8 --- /dev/null +++ b/arch/alpha/kernel/signal.c | |||
@@ -0,0 +1,672 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/signal.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * | ||
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | ||
7 | */ | ||
8 | |||
9 | #include <linux/sched.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/signal.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/wait.h> | ||
14 | #include <linux/ptrace.h> | ||
15 | #include <linux/unistd.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/stddef.h> | ||
20 | #include <linux/tty.h> | ||
21 | #include <linux/binfmts.h> | ||
22 | #include <linux/bitops.h> | ||
23 | |||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/sigcontext.h> | ||
26 | #include <asm/ucontext.h> | ||
27 | |||
28 | #include "proto.h" | ||
29 | |||
30 | |||
31 | #define DEBUG_SIG 0 | ||
32 | |||
33 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
34 | |||
35 | asmlinkage void ret_from_sys_call(void); | ||
36 | static int do_signal(sigset_t *, struct pt_regs *, struct switch_stack *, | ||
37 | unsigned long, unsigned long); | ||
38 | |||
39 | |||
40 | /* | ||
41 | * The OSF/1 sigprocmask calling sequence is different from the | ||
42 | * C sigprocmask() sequence.. | ||
43 | * | ||
44 | * how: | ||
45 | * 1 - SIG_BLOCK | ||
46 | * 2 - SIG_UNBLOCK | ||
47 | * 3 - SIG_SETMASK | ||
48 | * | ||
49 | * We change the range to -1 .. 1 in order to let gcc easily | ||
50 | * use the conditional move instructions. | ||
51 | * | ||
52 | * Note that we don't need to acquire the kernel lock for SMP | ||
53 | * operation, as all of this is local to this thread. | ||
54 | */ | ||
55 | asmlinkage unsigned long | ||
56 | do_osf_sigprocmask(int how, unsigned long newmask, struct pt_regs *regs) | ||
57 | { | ||
58 | unsigned long oldmask = -EINVAL; | ||
59 | |||
60 | if ((unsigned long)how-1 <= 2) { | ||
61 | long sign = how-2; /* -1 .. 1 */ | ||
62 | unsigned long block, unblock; | ||
63 | |||
64 | newmask &= _BLOCKABLE; | ||
65 | spin_lock_irq(¤t->sighand->siglock); | ||
66 | oldmask = current->blocked.sig[0]; | ||
67 | |||
68 | unblock = oldmask & ~newmask; | ||
69 | block = oldmask | newmask; | ||
70 | if (!sign) | ||
71 | block = unblock; | ||
72 | if (sign <= 0) | ||
73 | newmask = block; | ||
74 | if (_NSIG_WORDS > 1 && sign > 0) | ||
75 | sigemptyset(¤t->blocked); | ||
76 | current->blocked.sig[0] = newmask; | ||
77 | recalc_sigpending(); | ||
78 | spin_unlock_irq(¤t->sighand->siglock); | ||
79 | |||
80 | regs->r0 = 0; /* special no error return */ | ||
81 | } | ||
82 | return oldmask; | ||
83 | } | ||
84 | |||
85 | asmlinkage int | ||
86 | osf_sigaction(int sig, const struct osf_sigaction __user *act, | ||
87 | struct osf_sigaction __user *oact) | ||
88 | { | ||
89 | struct k_sigaction new_ka, old_ka; | ||
90 | int ret; | ||
91 | |||
92 | if (act) { | ||
93 | old_sigset_t mask; | ||
94 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
95 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
96 | __get_user(new_ka.sa.sa_flags, &act->sa_flags)) | ||
97 | return -EFAULT; | ||
98 | __get_user(mask, &act->sa_mask); | ||
99 | siginitset(&new_ka.sa.sa_mask, mask); | ||
100 | new_ka.ka_restorer = NULL; | ||
101 | } | ||
102 | |||
103 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
104 | |||
105 | if (!ret && oact) { | ||
106 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
107 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
108 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) | ||
109 | return -EFAULT; | ||
110 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
111 | } | ||
112 | |||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | asmlinkage long | ||
117 | sys_rt_sigaction(int sig, const struct sigaction __user *act, | ||
118 | struct sigaction __user *oact, | ||
119 | size_t sigsetsize, void __user *restorer) | ||
120 | { | ||
121 | struct k_sigaction new_ka, old_ka; | ||
122 | int ret; | ||
123 | |||
124 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
125 | if (sigsetsize != sizeof(sigset_t)) | ||
126 | return -EINVAL; | ||
127 | |||
128 | if (act) { | ||
129 | new_ka.ka_restorer = restorer; | ||
130 | if (copy_from_user(&new_ka.sa, act, sizeof(*act))) | ||
131 | return -EFAULT; | ||
132 | } | ||
133 | |||
134 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
135 | |||
136 | if (!ret && oact) { | ||
137 | if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) | ||
138 | return -EFAULT; | ||
139 | } | ||
140 | |||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Atomically swap in the new signal mask, and wait for a signal. | ||
146 | */ | ||
147 | asmlinkage int | ||
148 | do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw) | ||
149 | { | ||
150 | sigset_t oldset; | ||
151 | |||
152 | mask &= _BLOCKABLE; | ||
153 | spin_lock_irq(¤t->sighand->siglock); | ||
154 | oldset = current->blocked; | ||
155 | siginitset(¤t->blocked, mask); | ||
156 | recalc_sigpending(); | ||
157 | spin_unlock_irq(¤t->sighand->siglock); | ||
158 | |||
159 | /* Indicate EINTR on return from any possible signal handler, | ||
160 | which will not come back through here, but via sigreturn. */ | ||
161 | regs->r0 = EINTR; | ||
162 | regs->r19 = 1; | ||
163 | |||
164 | while (1) { | ||
165 | current->state = TASK_INTERRUPTIBLE; | ||
166 | schedule(); | ||
167 | if (do_signal(&oldset, regs, sw, 0, 0)) | ||
168 | return -EINTR; | ||
169 | } | ||
170 | } | ||
171 | |||
172 | asmlinkage int | ||
173 | do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, | ||
174 | struct pt_regs *regs, struct switch_stack *sw) | ||
175 | { | ||
176 | sigset_t oldset, set; | ||
177 | |||
178 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
179 | if (sigsetsize != sizeof(sigset_t)) | ||
180 | return -EINVAL; | ||
181 | if (copy_from_user(&set, uset, sizeof(set))) | ||
182 | return -EFAULT; | ||
183 | |||
184 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
185 | spin_lock_irq(¤t->sighand->siglock); | ||
186 | oldset = current->blocked; | ||
187 | current->blocked = set; | ||
188 | recalc_sigpending(); | ||
189 | spin_unlock_irq(¤t->sighand->siglock); | ||
190 | |||
191 | /* Indicate EINTR on return from any possible signal handler, | ||
192 | which will not come back through here, but via sigreturn. */ | ||
193 | regs->r0 = EINTR; | ||
194 | regs->r19 = 1; | ||
195 | |||
196 | while (1) { | ||
197 | current->state = TASK_INTERRUPTIBLE; | ||
198 | schedule(); | ||
199 | if (do_signal(&oldset, regs, sw, 0, 0)) | ||
200 | return -EINTR; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | asmlinkage int | ||
205 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) | ||
206 | { | ||
207 | return do_sigaltstack(uss, uoss, rdusp()); | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Do a signal return; undo the signal stack. | ||
212 | */ | ||
213 | |||
214 | #if _NSIG_WORDS > 1 | ||
215 | # error "Non SA_SIGINFO frame needs rearranging" | ||
216 | #endif | ||
217 | |||
218 | struct sigframe | ||
219 | { | ||
220 | struct sigcontext sc; | ||
221 | unsigned int retcode[3]; | ||
222 | }; | ||
223 | |||
224 | struct rt_sigframe | ||
225 | { | ||
226 | struct siginfo info; | ||
227 | struct ucontext uc; | ||
228 | unsigned int retcode[3]; | ||
229 | }; | ||
230 | |||
231 | /* If this changes, userland unwinders that Know Things about our signal | ||
232 | frame will break. Do not undertake lightly. It also implies an ABI | ||
233 | change wrt the size of siginfo_t, which may cause some pain. */ | ||
234 | extern char compile_time_assert | ||
235 | [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; | ||
236 | |||
237 | #define INSN_MOV_R30_R16 0x47fe0410 | ||
238 | #define INSN_LDI_R0 0x201f0000 | ||
239 | #define INSN_CALLSYS 0x00000083 | ||
240 | |||
241 | static long | ||
242 | restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | ||
243 | struct switch_stack *sw) | ||
244 | { | ||
245 | unsigned long usp; | ||
246 | long i, err = __get_user(regs->pc, &sc->sc_pc); | ||
247 | |||
248 | sw->r26 = (unsigned long) ret_from_sys_call; | ||
249 | |||
250 | err |= __get_user(regs->r0, sc->sc_regs+0); | ||
251 | err |= __get_user(regs->r1, sc->sc_regs+1); | ||
252 | err |= __get_user(regs->r2, sc->sc_regs+2); | ||
253 | err |= __get_user(regs->r3, sc->sc_regs+3); | ||
254 | err |= __get_user(regs->r4, sc->sc_regs+4); | ||
255 | err |= __get_user(regs->r5, sc->sc_regs+5); | ||
256 | err |= __get_user(regs->r6, sc->sc_regs+6); | ||
257 | err |= __get_user(regs->r7, sc->sc_regs+7); | ||
258 | err |= __get_user(regs->r8, sc->sc_regs+8); | ||
259 | err |= __get_user(sw->r9, sc->sc_regs+9); | ||
260 | err |= __get_user(sw->r10, sc->sc_regs+10); | ||
261 | err |= __get_user(sw->r11, sc->sc_regs+11); | ||
262 | err |= __get_user(sw->r12, sc->sc_regs+12); | ||
263 | err |= __get_user(sw->r13, sc->sc_regs+13); | ||
264 | err |= __get_user(sw->r14, sc->sc_regs+14); | ||
265 | err |= __get_user(sw->r15, sc->sc_regs+15); | ||
266 | err |= __get_user(regs->r16, sc->sc_regs+16); | ||
267 | err |= __get_user(regs->r17, sc->sc_regs+17); | ||
268 | err |= __get_user(regs->r18, sc->sc_regs+18); | ||
269 | err |= __get_user(regs->r19, sc->sc_regs+19); | ||
270 | err |= __get_user(regs->r20, sc->sc_regs+20); | ||
271 | err |= __get_user(regs->r21, sc->sc_regs+21); | ||
272 | err |= __get_user(regs->r22, sc->sc_regs+22); | ||
273 | err |= __get_user(regs->r23, sc->sc_regs+23); | ||
274 | err |= __get_user(regs->r24, sc->sc_regs+24); | ||
275 | err |= __get_user(regs->r25, sc->sc_regs+25); | ||
276 | err |= __get_user(regs->r26, sc->sc_regs+26); | ||
277 | err |= __get_user(regs->r27, sc->sc_regs+27); | ||
278 | err |= __get_user(regs->r28, sc->sc_regs+28); | ||
279 | err |= __get_user(regs->gp, sc->sc_regs+29); | ||
280 | err |= __get_user(usp, sc->sc_regs+30); | ||
281 | wrusp(usp); | ||
282 | |||
283 | for (i = 0; i < 31; i++) | ||
284 | err |= __get_user(sw->fp[i], sc->sc_fpregs+i); | ||
285 | err |= __get_user(sw->fp[31], &sc->sc_fpcr); | ||
286 | |||
287 | return err; | ||
288 | } | ||
289 | |||
290 | /* Note that this syscall is also used by setcontext(3) to install | ||
291 | a given sigcontext. This because it's impossible to set *all* | ||
292 | registers and transfer control from userland. */ | ||
293 | |||
294 | asmlinkage void | ||
295 | do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs, | ||
296 | struct switch_stack *sw) | ||
297 | { | ||
298 | sigset_t set; | ||
299 | |||
300 | /* Verify that it's a good sigcontext before using it */ | ||
301 | if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) | ||
302 | goto give_sigsegv; | ||
303 | if (__get_user(set.sig[0], &sc->sc_mask)) | ||
304 | goto give_sigsegv; | ||
305 | |||
306 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
307 | spin_lock_irq(¤t->sighand->siglock); | ||
308 | current->blocked = set; | ||
309 | recalc_sigpending(); | ||
310 | spin_unlock_irq(¤t->sighand->siglock); | ||
311 | |||
312 | if (restore_sigcontext(sc, regs, sw)) | ||
313 | goto give_sigsegv; | ||
314 | |||
315 | /* Send SIGTRAP if we're single-stepping: */ | ||
316 | if (ptrace_cancel_bpt (current)) { | ||
317 | siginfo_t info; | ||
318 | |||
319 | info.si_signo = SIGTRAP; | ||
320 | info.si_errno = 0; | ||
321 | info.si_code = TRAP_BRKPT; | ||
322 | info.si_addr = (void __user *) regs->pc; | ||
323 | info.si_trapno = 0; | ||
324 | send_sig_info(SIGTRAP, &info, current); | ||
325 | } | ||
326 | return; | ||
327 | |||
328 | give_sigsegv: | ||
329 | force_sig(SIGSEGV, current); | ||
330 | } | ||
331 | |||
332 | asmlinkage void | ||
333 | do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs, | ||
334 | struct switch_stack *sw) | ||
335 | { | ||
336 | sigset_t set; | ||
337 | |||
338 | /* Verify that it's a good ucontext_t before using it */ | ||
339 | if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc))) | ||
340 | goto give_sigsegv; | ||
341 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
342 | goto give_sigsegv; | ||
343 | |||
344 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
345 | spin_lock_irq(¤t->sighand->siglock); | ||
346 | current->blocked = set; | ||
347 | recalc_sigpending(); | ||
348 | spin_unlock_irq(¤t->sighand->siglock); | ||
349 | |||
350 | if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw)) | ||
351 | goto give_sigsegv; | ||
352 | |||
353 | /* Send SIGTRAP if we're single-stepping: */ | ||
354 | if (ptrace_cancel_bpt (current)) { | ||
355 | siginfo_t info; | ||
356 | |||
357 | info.si_signo = SIGTRAP; | ||
358 | info.si_errno = 0; | ||
359 | info.si_code = TRAP_BRKPT; | ||
360 | info.si_addr = (void __user *) regs->pc; | ||
361 | info.si_trapno = 0; | ||
362 | send_sig_info(SIGTRAP, &info, current); | ||
363 | } | ||
364 | return; | ||
365 | |||
366 | give_sigsegv: | ||
367 | force_sig(SIGSEGV, current); | ||
368 | } | ||
369 | |||
370 | |||
371 | /* | ||
372 | * Set up a signal frame. | ||
373 | */ | ||
374 | |||
375 | static inline void __user * | ||
376 | get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | ||
377 | { | ||
378 | if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) | ||
379 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
380 | |||
381 | return (void __user *)((sp - frame_size) & -32ul); | ||
382 | } | ||
383 | |||
384 | static long | ||
385 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | ||
386 | struct switch_stack *sw, unsigned long mask, unsigned long sp) | ||
387 | { | ||
388 | long i, err = 0; | ||
389 | |||
390 | err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); | ||
391 | err |= __put_user(mask, &sc->sc_mask); | ||
392 | err |= __put_user(regs->pc, &sc->sc_pc); | ||
393 | err |= __put_user(8, &sc->sc_ps); | ||
394 | |||
395 | err |= __put_user(regs->r0 , sc->sc_regs+0); | ||
396 | err |= __put_user(regs->r1 , sc->sc_regs+1); | ||
397 | err |= __put_user(regs->r2 , sc->sc_regs+2); | ||
398 | err |= __put_user(regs->r3 , sc->sc_regs+3); | ||
399 | err |= __put_user(regs->r4 , sc->sc_regs+4); | ||
400 | err |= __put_user(regs->r5 , sc->sc_regs+5); | ||
401 | err |= __put_user(regs->r6 , sc->sc_regs+6); | ||
402 | err |= __put_user(regs->r7 , sc->sc_regs+7); | ||
403 | err |= __put_user(regs->r8 , sc->sc_regs+8); | ||
404 | err |= __put_user(sw->r9 , sc->sc_regs+9); | ||
405 | err |= __put_user(sw->r10 , sc->sc_regs+10); | ||
406 | err |= __put_user(sw->r11 , sc->sc_regs+11); | ||
407 | err |= __put_user(sw->r12 , sc->sc_regs+12); | ||
408 | err |= __put_user(sw->r13 , sc->sc_regs+13); | ||
409 | err |= __put_user(sw->r14 , sc->sc_regs+14); | ||
410 | err |= __put_user(sw->r15 , sc->sc_regs+15); | ||
411 | err |= __put_user(regs->r16, sc->sc_regs+16); | ||
412 | err |= __put_user(regs->r17, sc->sc_regs+17); | ||
413 | err |= __put_user(regs->r18, sc->sc_regs+18); | ||
414 | err |= __put_user(regs->r19, sc->sc_regs+19); | ||
415 | err |= __put_user(regs->r20, sc->sc_regs+20); | ||
416 | err |= __put_user(regs->r21, sc->sc_regs+21); | ||
417 | err |= __put_user(regs->r22, sc->sc_regs+22); | ||
418 | err |= __put_user(regs->r23, sc->sc_regs+23); | ||
419 | err |= __put_user(regs->r24, sc->sc_regs+24); | ||
420 | err |= __put_user(regs->r25, sc->sc_regs+25); | ||
421 | err |= __put_user(regs->r26, sc->sc_regs+26); | ||
422 | err |= __put_user(regs->r27, sc->sc_regs+27); | ||
423 | err |= __put_user(regs->r28, sc->sc_regs+28); | ||
424 | err |= __put_user(regs->gp , sc->sc_regs+29); | ||
425 | err |= __put_user(sp, sc->sc_regs+30); | ||
426 | err |= __put_user(0, sc->sc_regs+31); | ||
427 | |||
428 | for (i = 0; i < 31; i++) | ||
429 | err |= __put_user(sw->fp[i], sc->sc_fpregs+i); | ||
430 | err |= __put_user(0, sc->sc_fpregs+31); | ||
431 | err |= __put_user(sw->fp[31], &sc->sc_fpcr); | ||
432 | |||
433 | err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0); | ||
434 | err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1); | ||
435 | err |= __put_user(regs->trap_a2, &sc->sc_traparg_a2); | ||
436 | |||
437 | return err; | ||
438 | } | ||
439 | |||
440 | static void | ||
441 | setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | ||
442 | struct pt_regs *regs, struct switch_stack * sw) | ||
443 | { | ||
444 | unsigned long oldsp, r26, err = 0; | ||
445 | struct sigframe __user *frame; | ||
446 | |||
447 | oldsp = rdusp(); | ||
448 | frame = get_sigframe(ka, oldsp, sizeof(*frame)); | ||
449 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
450 | goto give_sigsegv; | ||
451 | |||
452 | err |= setup_sigcontext(&frame->sc, regs, sw, set->sig[0], oldsp); | ||
453 | if (err) | ||
454 | goto give_sigsegv; | ||
455 | |||
456 | /* Set up to return from userspace. If provided, use a stub | ||
457 | already in userspace. */ | ||
458 | if (ka->ka_restorer) { | ||
459 | r26 = (unsigned long) ka->ka_restorer; | ||
460 | } else { | ||
461 | err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); | ||
462 | err |= __put_user(INSN_LDI_R0+__NR_sigreturn, frame->retcode+1); | ||
463 | err |= __put_user(INSN_CALLSYS, frame->retcode+2); | ||
464 | imb(); | ||
465 | r26 = (unsigned long) frame->retcode; | ||
466 | } | ||
467 | |||
468 | /* Check that everything was written properly. */ | ||
469 | if (err) | ||
470 | goto give_sigsegv; | ||
471 | |||
472 | /* "Return" to the handler */ | ||
473 | regs->r26 = r26; | ||
474 | regs->r27 = regs->pc = (unsigned long) ka->sa.sa_handler; | ||
475 | regs->r16 = sig; /* a0: signal number */ | ||
476 | regs->r17 = 0; /* a1: exception code */ | ||
477 | regs->r18 = (unsigned long) &frame->sc; /* a2: sigcontext pointer */ | ||
478 | wrusp((unsigned long) frame); | ||
479 | |||
480 | #if DEBUG_SIG | ||
481 | printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", | ||
482 | current->comm, current->pid, frame, regs->pc, regs->r26); | ||
483 | #endif | ||
484 | |||
485 | return; | ||
486 | |||
487 | give_sigsegv: | ||
488 | force_sigsegv(sig, current); | ||
489 | } | ||
490 | |||
491 | static void | ||
492 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
493 | sigset_t *set, struct pt_regs *regs, struct switch_stack * sw) | ||
494 | { | ||
495 | unsigned long oldsp, r26, err = 0; | ||
496 | struct rt_sigframe __user *frame; | ||
497 | |||
498 | oldsp = rdusp(); | ||
499 | frame = get_sigframe(ka, oldsp, sizeof(*frame)); | ||
500 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
501 | goto give_sigsegv; | ||
502 | |||
503 | err |= copy_siginfo_to_user(&frame->info, info); | ||
504 | |||
505 | /* Create the ucontext. */ | ||
506 | err |= __put_user(0, &frame->uc.uc_flags); | ||
507 | err |= __put_user(0, &frame->uc.uc_link); | ||
508 | err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask); | ||
509 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | ||
510 | err |= __put_user(sas_ss_flags(oldsp), &frame->uc.uc_stack.ss_flags); | ||
511 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
512 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, sw, | ||
513 | set->sig[0], oldsp); | ||
514 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
515 | if (err) | ||
516 | goto give_sigsegv; | ||
517 | |||
518 | /* Set up to return from userspace. If provided, use a stub | ||
519 | already in userspace. */ | ||
520 | if (ka->ka_restorer) { | ||
521 | r26 = (unsigned long) ka->ka_restorer; | ||
522 | } else { | ||
523 | err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); | ||
524 | err |= __put_user(INSN_LDI_R0+__NR_rt_sigreturn, | ||
525 | frame->retcode+1); | ||
526 | err |= __put_user(INSN_CALLSYS, frame->retcode+2); | ||
527 | imb(); | ||
528 | r26 = (unsigned long) frame->retcode; | ||
529 | } | ||
530 | |||
531 | if (err) | ||
532 | goto give_sigsegv; | ||
533 | |||
534 | /* "Return" to the handler */ | ||
535 | regs->r26 = r26; | ||
536 | regs->r27 = regs->pc = (unsigned long) ka->sa.sa_handler; | ||
537 | regs->r16 = sig; /* a0: signal number */ | ||
538 | regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */ | ||
539 | regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */ | ||
540 | wrusp((unsigned long) frame); | ||
541 | |||
542 | #if DEBUG_SIG | ||
543 | printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", | ||
544 | current->comm, current->pid, frame, regs->pc, regs->r26); | ||
545 | #endif | ||
546 | |||
547 | return; | ||
548 | |||
549 | give_sigsegv: | ||
550 | force_sigsegv(sig, current); | ||
551 | } | ||
552 | |||
553 | |||
554 | /* | ||
555 | * OK, we're invoking a handler. | ||
556 | */ | ||
557 | static inline void | ||
558 | handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
559 | sigset_t *oldset, struct pt_regs * regs, struct switch_stack *sw) | ||
560 | { | ||
561 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
562 | setup_rt_frame(sig, ka, info, oldset, regs, sw); | ||
563 | else | ||
564 | setup_frame(sig, ka, oldset, regs, sw); | ||
565 | |||
566 | if (ka->sa.sa_flags & SA_RESETHAND) | ||
567 | ka->sa.sa_handler = SIG_DFL; | ||
568 | |||
569 | if (!(ka->sa.sa_flags & SA_NODEFER)) { | ||
570 | spin_lock_irq(¤t->sighand->siglock); | ||
571 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
572 | sigaddset(¤t->blocked,sig); | ||
573 | recalc_sigpending(); | ||
574 | spin_unlock_irq(¤t->sighand->siglock); | ||
575 | } | ||
576 | } | ||
577 | |||
578 | static inline void | ||
579 | syscall_restart(unsigned long r0, unsigned long r19, | ||
580 | struct pt_regs *regs, struct k_sigaction *ka) | ||
581 | { | ||
582 | switch (regs->r0) { | ||
583 | case ERESTARTSYS: | ||
584 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
585 | case ERESTARTNOHAND: | ||
586 | regs->r0 = EINTR; | ||
587 | break; | ||
588 | } | ||
589 | /* fallthrough */ | ||
590 | case ERESTARTNOINTR: | ||
591 | regs->r0 = r0; /* reset v0 and a3 and replay syscall */ | ||
592 | regs->r19 = r19; | ||
593 | regs->pc -= 4; | ||
594 | break; | ||
595 | case ERESTART_RESTARTBLOCK: | ||
596 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
597 | regs->r0 = EINTR; | ||
598 | break; | ||
599 | } | ||
600 | } | ||
601 | |||
602 | |||
603 | /* | ||
604 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
605 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
606 | * mistake. | ||
607 | * | ||
608 | * Note that we go through the signals twice: once to check the signals that | ||
609 | * the kernel can handle, and then we build all the user-level signal handling | ||
610 | * stack-frames in one go after that. | ||
611 | * | ||
612 | * "r0" and "r19" are the registers we need to restore for system call | ||
613 | * restart. "r0" is also used as an indicator whether we can restart at | ||
614 | * all (if we get here from anything but a syscall return, it will be 0) | ||
615 | */ | ||
616 | static int | ||
617 | do_signal(sigset_t *oldset, struct pt_regs * regs, struct switch_stack * sw, | ||
618 | unsigned long r0, unsigned long r19) | ||
619 | { | ||
620 | siginfo_t info; | ||
621 | int signr; | ||
622 | unsigned long single_stepping = ptrace_cancel_bpt(current); | ||
623 | struct k_sigaction ka; | ||
624 | |||
625 | if (!oldset) | ||
626 | oldset = ¤t->blocked; | ||
627 | |||
628 | /* This lets the debugger run, ... */ | ||
629 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
630 | /* ... so re-check the single stepping. */ | ||
631 | single_stepping |= ptrace_cancel_bpt(current); | ||
632 | |||
633 | if (signr > 0) { | ||
634 | /* Whee! Actually deliver the signal. */ | ||
635 | if (r0) syscall_restart(r0, r19, regs, &ka); | ||
636 | handle_signal(signr, &ka, &info, oldset, regs, sw); | ||
637 | if (single_stepping) | ||
638 | ptrace_set_bpt(current); /* re-set bpt */ | ||
639 | return 1; | ||
640 | } | ||
641 | |||
642 | if (r0) { | ||
643 | switch (regs->r0) { | ||
644 | case ERESTARTNOHAND: | ||
645 | case ERESTARTSYS: | ||
646 | case ERESTARTNOINTR: | ||
647 | /* Reset v0 and a3 and replay syscall. */ | ||
648 | regs->r0 = r0; | ||
649 | regs->r19 = r19; | ||
650 | regs->pc -= 4; | ||
651 | break; | ||
652 | case ERESTART_RESTARTBLOCK: | ||
653 | /* Force v0 to the restart syscall and reply. */ | ||
654 | regs->r0 = __NR_restart_syscall; | ||
655 | regs->pc -= 4; | ||
656 | break; | ||
657 | } | ||
658 | } | ||
659 | if (single_stepping) | ||
660 | ptrace_set_bpt(current); /* re-set breakpoint */ | ||
661 | |||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | void | ||
666 | do_notify_resume(sigset_t *oldset, struct pt_regs *regs, | ||
667 | struct switch_stack *sw, unsigned long r0, | ||
668 | unsigned long r19, unsigned long thread_info_flags) | ||
669 | { | ||
670 | if (thread_info_flags & _TIF_SIGPENDING) | ||
671 | do_signal(oldset, regs, sw, r0, r19); | ||
672 | } | ||
diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c new file mode 100644 index 000000000000..fd467b207f0f --- /dev/null +++ b/arch/alpha/kernel/smc37c669.c | |||
@@ -0,0 +1,2554 @@ | |||
1 | /* | ||
2 | * SMC 37C669 initialization code | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | |||
6 | #include <linux/slab.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | |||
12 | #include <asm/hwrpb.h> | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/segment.h> | ||
15 | |||
16 | #if 0 | ||
17 | # define DBG_DEVS(args) printk args | ||
18 | #else | ||
19 | # define DBG_DEVS(args) | ||
20 | #endif | ||
21 | |||
22 | #define KB 1024 | ||
23 | #define MB (1024*KB) | ||
24 | #define GB (1024*MB) | ||
25 | |||
26 | #define SMC_DEBUG 0 | ||
27 | |||
28 | /* File: smcc669_def.h | ||
29 | * | ||
30 | * Copyright (C) 1997 by | ||
31 | * Digital Equipment Corporation, Maynard, Massachusetts. | ||
32 | * All rights reserved. | ||
33 | * | ||
34 | * This software is furnished under a license and may be used and copied | ||
35 | * only in accordance of the terms of such license and with the | ||
36 | * inclusion of the above copyright notice. This software or any other | ||
37 | * copies thereof may not be provided or otherwise made available to any | ||
38 | * other person. No title to and ownership of the software is hereby | ||
39 | * transferred. | ||
40 | * | ||
41 | * The information in this software is subject to change without notice | ||
42 | * and should not be construed as a commitment by Digital Equipment | ||
43 | * Corporation. | ||
44 | * | ||
45 | * Digital assumes no responsibility for the use or reliability of its | ||
46 | * software on equipment which is not supplied by Digital. | ||
47 | * | ||
48 | * | ||
49 | * Abstract: | ||
50 | * | ||
51 | * This file contains header definitions for the SMC37c669 | ||
52 | * Super I/O controller. | ||
53 | * | ||
54 | * Author: | ||
55 | * | ||
56 | * Eric Rasmussen | ||
57 | * | ||
58 | * Modification History: | ||
59 | * | ||
60 | * er 28-Jan-1997 Initial Entry | ||
61 | */ | ||
62 | |||
63 | #ifndef __SMC37c669_H | ||
64 | #define __SMC37c669_H | ||
65 | |||
66 | /* | ||
67 | ** Macros for handling device IRQs | ||
68 | ** | ||
69 | ** The mask acts as a flag used in mapping actual ISA IRQs (0 - 15) | ||
70 | ** to device IRQs (A - H). | ||
71 | */ | ||
72 | #define SMC37c669_DEVICE_IRQ_MASK 0x80000000 | ||
73 | #define SMC37c669_DEVICE_IRQ( __i ) \ | ||
74 | ((SMC37c669_DEVICE_IRQ_MASK) | (__i)) | ||
75 | #define SMC37c669_IS_DEVICE_IRQ(__i) \ | ||
76 | (((__i) & (SMC37c669_DEVICE_IRQ_MASK)) == (SMC37c669_DEVICE_IRQ_MASK)) | ||
77 | #define SMC37c669_RAW_DEVICE_IRQ(__i) \ | ||
78 | ((__i) & ~(SMC37c669_DEVICE_IRQ_MASK)) | ||
79 | |||
80 | /* | ||
81 | ** Macros for handling device DRQs | ||
82 | ** | ||
83 | ** The mask acts as a flag used in mapping actual ISA DMA | ||
84 | ** channels to device DMA channels (A - C). | ||
85 | */ | ||
86 | #define SMC37c669_DEVICE_DRQ_MASK 0x80000000 | ||
87 | #define SMC37c669_DEVICE_DRQ(__d) \ | ||
88 | ((SMC37c669_DEVICE_DRQ_MASK) | (__d)) | ||
89 | #define SMC37c669_IS_DEVICE_DRQ(__d) \ | ||
90 | (((__d) & (SMC37c669_DEVICE_DRQ_MASK)) == (SMC37c669_DEVICE_DRQ_MASK)) | ||
91 | #define SMC37c669_RAW_DEVICE_DRQ(__d) \ | ||
92 | ((__d) & ~(SMC37c669_DEVICE_DRQ_MASK)) | ||
93 | |||
94 | #define SMC37c669_DEVICE_ID 0x3 | ||
95 | |||
96 | /* | ||
97 | ** SMC37c669 Device Function Definitions | ||
98 | */ | ||
99 | #define SERIAL_0 0 | ||
100 | #define SERIAL_1 1 | ||
101 | #define PARALLEL_0 2 | ||
102 | #define FLOPPY_0 3 | ||
103 | #define IDE_0 4 | ||
104 | #define NUM_FUNCS 5 | ||
105 | |||
106 | /* | ||
107 | ** Default Device Function Mappings | ||
108 | */ | ||
109 | #define COM1_BASE 0x3F8 | ||
110 | #define COM1_IRQ 4 | ||
111 | #define COM2_BASE 0x2F8 | ||
112 | #define COM2_IRQ 3 | ||
113 | #define PARP_BASE 0x3BC | ||
114 | #define PARP_IRQ 7 | ||
115 | #define PARP_DRQ 3 | ||
116 | #define FDC_BASE 0x3F0 | ||
117 | #define FDC_IRQ 6 | ||
118 | #define FDC_DRQ 2 | ||
119 | |||
120 | /* | ||
121 | ** Configuration On/Off Key Definitions | ||
122 | */ | ||
123 | #define SMC37c669_CONFIG_ON_KEY 0x55 | ||
124 | #define SMC37c669_CONFIG_OFF_KEY 0xAA | ||
125 | |||
126 | /* | ||
127 | ** SMC 37c669 Device IRQs | ||
128 | */ | ||
129 | #define SMC37c669_DEVICE_IRQ_A ( SMC37c669_DEVICE_IRQ( 0x01 ) ) | ||
130 | #define SMC37c669_DEVICE_IRQ_B ( SMC37c669_DEVICE_IRQ( 0x02 ) ) | ||
131 | #define SMC37c669_DEVICE_IRQ_C ( SMC37c669_DEVICE_IRQ( 0x03 ) ) | ||
132 | #define SMC37c669_DEVICE_IRQ_D ( SMC37c669_DEVICE_IRQ( 0x04 ) ) | ||
133 | #define SMC37c669_DEVICE_IRQ_E ( SMC37c669_DEVICE_IRQ( 0x05 ) ) | ||
134 | #define SMC37c669_DEVICE_IRQ_F ( SMC37c669_DEVICE_IRQ( 0x06 ) ) | ||
135 | /* SMC37c669_DEVICE_IRQ_G *** RESERVED ***/ | ||
136 | #define SMC37c669_DEVICE_IRQ_H ( SMC37c669_DEVICE_IRQ( 0x08 ) ) | ||
137 | |||
138 | /* | ||
139 | ** SMC 37c669 Device DMA Channel Definitions | ||
140 | */ | ||
141 | #define SMC37c669_DEVICE_DRQ_A ( SMC37c669_DEVICE_DRQ( 0x01 ) ) | ||
142 | #define SMC37c669_DEVICE_DRQ_B ( SMC37c669_DEVICE_DRQ( 0x02 ) ) | ||
143 | #define SMC37c669_DEVICE_DRQ_C ( SMC37c669_DEVICE_DRQ( 0x03 ) ) | ||
144 | |||
145 | /* | ||
146 | ** Configuration Register Index Definitions | ||
147 | */ | ||
148 | #define SMC37c669_CR00_INDEX 0x00 | ||
149 | #define SMC37c669_CR01_INDEX 0x01 | ||
150 | #define SMC37c669_CR02_INDEX 0x02 | ||
151 | #define SMC37c669_CR03_INDEX 0x03 | ||
152 | #define SMC37c669_CR04_INDEX 0x04 | ||
153 | #define SMC37c669_CR05_INDEX 0x05 | ||
154 | #define SMC37c669_CR06_INDEX 0x06 | ||
155 | #define SMC37c669_CR07_INDEX 0x07 | ||
156 | #define SMC37c669_CR08_INDEX 0x08 | ||
157 | #define SMC37c669_CR09_INDEX 0x09 | ||
158 | #define SMC37c669_CR0A_INDEX 0x0A | ||
159 | #define SMC37c669_CR0B_INDEX 0x0B | ||
160 | #define SMC37c669_CR0C_INDEX 0x0C | ||
161 | #define SMC37c669_CR0D_INDEX 0x0D | ||
162 | #define SMC37c669_CR0E_INDEX 0x0E | ||
163 | #define SMC37c669_CR0F_INDEX 0x0F | ||
164 | #define SMC37c669_CR10_INDEX 0x10 | ||
165 | #define SMC37c669_CR11_INDEX 0x11 | ||
166 | #define SMC37c669_CR12_INDEX 0x12 | ||
167 | #define SMC37c669_CR13_INDEX 0x13 | ||
168 | #define SMC37c669_CR14_INDEX 0x14 | ||
169 | #define SMC37c669_CR15_INDEX 0x15 | ||
170 | #define SMC37c669_CR16_INDEX 0x16 | ||
171 | #define SMC37c669_CR17_INDEX 0x17 | ||
172 | #define SMC37c669_CR18_INDEX 0x18 | ||
173 | #define SMC37c669_CR19_INDEX 0x19 | ||
174 | #define SMC37c669_CR1A_INDEX 0x1A | ||
175 | #define SMC37c669_CR1B_INDEX 0x1B | ||
176 | #define SMC37c669_CR1C_INDEX 0x1C | ||
177 | #define SMC37c669_CR1D_INDEX 0x1D | ||
178 | #define SMC37c669_CR1E_INDEX 0x1E | ||
179 | #define SMC37c669_CR1F_INDEX 0x1F | ||
180 | #define SMC37c669_CR20_INDEX 0x20 | ||
181 | #define SMC37c669_CR21_INDEX 0x21 | ||
182 | #define SMC37c669_CR22_INDEX 0x22 | ||
183 | #define SMC37c669_CR23_INDEX 0x23 | ||
184 | #define SMC37c669_CR24_INDEX 0x24 | ||
185 | #define SMC37c669_CR25_INDEX 0x25 | ||
186 | #define SMC37c669_CR26_INDEX 0x26 | ||
187 | #define SMC37c669_CR27_INDEX 0x27 | ||
188 | #define SMC37c669_CR28_INDEX 0x28 | ||
189 | #define SMC37c669_CR29_INDEX 0x29 | ||
190 | |||
191 | /* | ||
192 | ** Configuration Register Alias Definitions | ||
193 | */ | ||
194 | #define SMC37c669_DEVICE_ID_INDEX SMC37c669_CR0D_INDEX | ||
195 | #define SMC37c669_DEVICE_REVISION_INDEX SMC37c669_CR0E_INDEX | ||
196 | #define SMC37c669_FDC_BASE_ADDRESS_INDEX SMC37c669_CR20_INDEX | ||
197 | #define SMC37c669_IDE_BASE_ADDRESS_INDEX SMC37c669_CR21_INDEX | ||
198 | #define SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX SMC37c669_CR22_INDEX | ||
199 | #define SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX SMC37c669_CR23_INDEX | ||
200 | #define SMC37c669_SERIAL0_BASE_ADDRESS_INDEX SMC37c669_CR24_INDEX | ||
201 | #define SMC37c669_SERIAL1_BASE_ADDRESS_INDEX SMC37c669_CR25_INDEX | ||
202 | #define SMC37c669_PARALLEL_FDC_DRQ_INDEX SMC37c669_CR26_INDEX | ||
203 | #define SMC37c669_PARALLEL_FDC_IRQ_INDEX SMC37c669_CR27_INDEX | ||
204 | #define SMC37c669_SERIAL_IRQ_INDEX SMC37c669_CR28_INDEX | ||
205 | |||
206 | /* | ||
207 | ** Configuration Register Definitions | ||
208 | ** | ||
209 | ** The INDEX (write only) and DATA (read/write) ports are effective | ||
210 | ** only when the chip is in the Configuration State. | ||
211 | */ | ||
212 | typedef struct _SMC37c669_CONFIG_REGS { | ||
213 | unsigned char index_port; | ||
214 | unsigned char data_port; | ||
215 | } SMC37c669_CONFIG_REGS; | ||
216 | |||
217 | /* | ||
218 | ** CR00 - default value 0x28 | ||
219 | ** | ||
220 | ** IDE_EN (CR00<1:0>): | ||
221 | ** 0x - 30ua pull-ups on nIDEEN, nHDCS0, NHDCS1 | ||
222 | ** 11 - IRQ_H available as IRQ output, | ||
223 | ** IRRX2, IRTX2 available as alternate IR pins | ||
224 | ** 10 - nIDEEN, nHDCS0, nHDCS1 used to control IDE | ||
225 | ** | ||
226 | ** VALID (CR00<7>): | ||
227 | ** A high level on this software controlled bit can | ||
228 | ** be used to indicate that a valid configuration | ||
229 | ** cycle has occurred. The control software must | ||
230 | ** take care to set this bit at the appropriate times. | ||
231 | ** Set to zero after power up. This bit has no | ||
232 | ** effect on any other hardware in the chip. | ||
233 | ** | ||
234 | */ | ||
235 | typedef union _SMC37c669_CR00 { | ||
236 | unsigned char as_uchar; | ||
237 | struct { | ||
238 | unsigned ide_en : 2; /* See note above */ | ||
239 | unsigned reserved1 : 1; /* RAZ */ | ||
240 | unsigned fdc_pwr : 1; /* 1 = supply power to FDC */ | ||
241 | unsigned reserved2 : 3; /* Read as 010b */ | ||
242 | unsigned valid : 1; /* See note above */ | ||
243 | } by_field; | ||
244 | } SMC37c669_CR00; | ||
245 | |||
246 | /* | ||
247 | ** CR01 - default value 0x9C | ||
248 | */ | ||
249 | typedef union _SMC37c669_CR01 { | ||
250 | unsigned char as_uchar; | ||
251 | struct { | ||
252 | unsigned reserved1 : 2; /* RAZ */ | ||
253 | unsigned ppt_pwr : 1; /* 1 = supply power to PPT */ | ||
254 | unsigned ppt_mode : 1; /* 1 = Printer mode, 0 = EPP */ | ||
255 | unsigned reserved2 : 1; /* Read as 1 */ | ||
256 | unsigned reserved3 : 2; /* RAZ */ | ||
257 | unsigned lock_crx: 1; /* Lock CR00 - CR18 */ | ||
258 | } by_field; | ||
259 | } SMC37c669_CR01; | ||
260 | |||
261 | /* | ||
262 | ** CR02 - default value 0x88 | ||
263 | */ | ||
264 | typedef union _SMC37c669_CR02 { | ||
265 | unsigned char as_uchar; | ||
266 | struct { | ||
267 | unsigned reserved1 : 3; /* RAZ */ | ||
268 | unsigned uart1_pwr : 1; /* 1 = supply power to UART1 */ | ||
269 | unsigned reserved2 : 3; /* RAZ */ | ||
270 | unsigned uart2_pwr : 1; /* 1 = supply power to UART2 */ | ||
271 | } by_field; | ||
272 | } SMC37c669_CR02; | ||
273 | |||
274 | /* | ||
275 | ** CR03 - default value 0x78 | ||
276 | ** | ||
277 | ** CR03<7> CR03<2> Pin 94 | ||
278 | ** ------- ------- ------ | ||
279 | ** 0 X DRV2 (input) | ||
280 | ** 1 0 ADRX | ||
281 | ** 1 1 IRQ_B | ||
282 | ** | ||
283 | ** CR03<6> CR03<5> Op Mode | ||
284 | ** ------- ------- ------- | ||
285 | ** 0 0 Model 30 | ||
286 | ** 0 1 PS/2 | ||
287 | ** 1 0 Reserved | ||
288 | ** 1 1 AT Mode | ||
289 | */ | ||
290 | typedef union _SMC37c669_CR03 { | ||
291 | unsigned char as_uchar; | ||
292 | struct { | ||
293 | unsigned pwrgd_gamecs : 1; /* 1 = PWRGD, 0 = GAMECS */ | ||
294 | unsigned fdc_mode2 : 1; /* 1 = Enhanced Mode 2 */ | ||
295 | unsigned pin94_0 : 1; /* See note above */ | ||
296 | unsigned reserved1 : 1; /* RAZ */ | ||
297 | unsigned drvden : 1; /* 1 = high, 0 - output */ | ||
298 | unsigned op_mode : 2; /* See note above */ | ||
299 | unsigned pin94_1 : 1; /* See note above */ | ||
300 | } by_field; | ||
301 | } SMC37c669_CR03; | ||
302 | |||
303 | /* | ||
304 | ** CR04 - default value 0x00 | ||
305 | ** | ||
306 | ** PP_EXT_MODE: | ||
307 | ** If CR01<PP_MODE> = 0 and PP_EXT_MODE = | ||
308 | ** 00 - Standard and Bidirectional | ||
309 | ** 01 - EPP mode and SPP | ||
310 | ** 10 - ECP mode | ||
311 | ** In this mode, 2 drives can be supported | ||
312 | ** directly, 3 or 4 drives must use external | ||
313 | ** 4 drive support. SPP can be selected | ||
314 | ** through the ECR register of ECP as mode 000. | ||
315 | ** 11 - ECP mode and EPP mode | ||
316 | ** In this mode, 2 drives can be supported | ||
317 | ** directly, 3 or 4 drives must use external | ||
318 | ** 4 drive support. SPP can be selected | ||
319 | ** through the ECR register of ECP as mode 000. | ||
320 | ** In this mode, EPP can be selected through | ||
321 | ** the ECR register of ECP as mode 100. | ||
322 | ** | ||
323 | ** PP_FDC: | ||
324 | ** 00 - Normal | ||
325 | ** 01 - PPFD1 | ||
326 | ** 10 - PPFD2 | ||
327 | ** 11 - Reserved | ||
328 | ** | ||
329 | ** MIDI1: | ||
330 | ** Serial Clock Select: | ||
331 | ** A low level on this bit disables MIDI support, | ||
332 | ** clock = divide by 13. A high level on this | ||
333 | ** bit enables MIDI support, clock = divide by 12. | ||
334 | ** | ||
335 | ** MIDI operates at 31.25 Kbps which can be derived | ||
336 | ** from 125 KHz (24 MHz / 12 = 2 MHz, 2 MHz / 16 = 125 KHz) | ||
337 | ** | ||
338 | ** ALT_IO: | ||
339 | ** 0 - Use pins IRRX, IRTX | ||
340 | ** 1 - Use pins IRRX2, IRTX2 | ||
341 | ** | ||
342 | ** If this bit is set, the IR receive and transmit | ||
343 | ** functions will not be available on pins 25 and 26 | ||
344 | ** unless CR00<IDE_EN> = 11. | ||
345 | */ | ||
346 | typedef union _SMC37c669_CR04 { | ||
347 | unsigned char as_uchar; | ||
348 | struct { | ||
349 | unsigned ppt_ext_mode : 2; /* See note above */ | ||
350 | unsigned ppt_fdc : 2; /* See note above */ | ||
351 | unsigned midi1 : 1; /* See note above */ | ||
352 | unsigned midi2 : 1; /* See note above */ | ||
353 | unsigned epp_type : 1; /* 0 = EPP 1.9, 1 = EPP 1.7 */ | ||
354 | unsigned alt_io : 1; /* See note above */ | ||
355 | } by_field; | ||
356 | } SMC37c669_CR04; | ||
357 | |||
358 | /* | ||
359 | ** CR05 - default value 0x00 | ||
360 | ** | ||
361 | ** DEN_SEL: | ||
362 | ** 00 - Densel output normal | ||
363 | ** 01 - Reserved | ||
364 | ** 10 - Densel output 1 | ||
365 | ** 11 - Densel output 0 | ||
366 | ** | ||
367 | */ | ||
368 | typedef union _SMC37c669_CR05 { | ||
369 | unsigned char as_uchar; | ||
370 | struct { | ||
371 | unsigned reserved1 : 2; /* RAZ */ | ||
372 | unsigned fdc_dma_mode : 1; /* 0 = burst, 1 = non-burst */ | ||
373 | unsigned den_sel : 2; /* See note above */ | ||
374 | unsigned swap_drv : 1; /* Swap the FDC motor selects */ | ||
375 | unsigned extx4 : 1; /* 0 = 2 drive, 1 = external 4 drive decode */ | ||
376 | unsigned reserved2 : 1; /* RAZ */ | ||
377 | } by_field; | ||
378 | } SMC37c669_CR05; | ||
379 | |||
380 | /* | ||
381 | ** CR06 - default value 0xFF | ||
382 | */ | ||
383 | typedef union _SMC37c669_CR06 { | ||
384 | unsigned char as_uchar; | ||
385 | struct { | ||
386 | unsigned floppy_a : 2; /* Type of floppy drive A */ | ||
387 | unsigned floppy_b : 2; /* Type of floppy drive B */ | ||
388 | unsigned floppy_c : 2; /* Type of floppy drive C */ | ||
389 | unsigned floppy_d : 2; /* Type of floppy drive D */ | ||
390 | } by_field; | ||
391 | } SMC37c669_CR06; | ||
392 | |||
393 | /* | ||
394 | ** CR07 - default value 0x00 | ||
395 | ** | ||
396 | ** Auto Power Management CR07<7:4>: | ||
397 | ** 0 - Auto Powerdown disabled (default) | ||
398 | ** 1 - Auto Powerdown enabled | ||
399 | ** | ||
400 | ** This bit is reset to the default state by POR or | ||
401 | ** a hardware reset. | ||
402 | ** | ||
403 | */ | ||
404 | typedef union _SMC37c669_CR07 { | ||
405 | unsigned char as_uchar; | ||
406 | struct { | ||
407 | unsigned floppy_boot : 2; /* 0 = A:, 1 = B: */ | ||
408 | unsigned reserved1 : 2; /* RAZ */ | ||
409 | unsigned ppt_en : 1; /* See note above */ | ||
410 | unsigned uart1_en : 1; /* See note above */ | ||
411 | unsigned uart2_en : 1; /* See note above */ | ||
412 | unsigned fdc_en : 1; /* See note above */ | ||
413 | } by_field; | ||
414 | } SMC37c669_CR07; | ||
415 | |||
416 | /* | ||
417 | ** CR08 - default value 0x00 | ||
418 | */ | ||
419 | typedef union _SMC37c669_CR08 { | ||
420 | unsigned char as_uchar; | ||
421 | struct { | ||
422 | unsigned zero : 4; /* 0 */ | ||
423 | unsigned addrx7_4 : 4; /* ADR<7:3> for ADRx decode */ | ||
424 | } by_field; | ||
425 | } SMC37c669_CR08; | ||
426 | |||
427 | /* | ||
428 | ** CR09 - default value 0x00 | ||
429 | ** | ||
430 | ** ADRx_CONFIG: | ||
431 | ** 00 - ADRx disabled | ||
432 | ** 01 - 1 byte decode A<3:0> = 0000b | ||
433 | ** 10 - 8 byte block decode A<3:0> = 0XXXb | ||
434 | ** 11 - 16 byte block decode A<3:0> = XXXXb | ||
435 | ** | ||
436 | */ | ||
437 | typedef union _SMC37c669_CR09 { | ||
438 | unsigned char as_uchar; | ||
439 | struct { | ||
440 | unsigned adra8 : 3; /* ADR<10:8> for ADRx decode */ | ||
441 | unsigned reserved1 : 3; | ||
442 | unsigned adrx_config : 2; /* See note above */ | ||
443 | } by_field; | ||
444 | } SMC37c669_CR09; | ||
445 | |||
446 | /* | ||
447 | ** CR0A - default value 0x00 | ||
448 | */ | ||
449 | typedef union _SMC37c669_CR0A { | ||
450 | unsigned char as_uchar; | ||
451 | struct { | ||
452 | unsigned ecp_fifo_threshold : 4; | ||
453 | unsigned reserved1 : 4; | ||
454 | } by_field; | ||
455 | } SMC37c669_CR0A; | ||
456 | |||
457 | /* | ||
458 | ** CR0B - default value 0x00 | ||
459 | */ | ||
460 | typedef union _SMC37c669_CR0B { | ||
461 | unsigned char as_uchar; | ||
462 | struct { | ||
463 | unsigned fdd0_drtx : 2; /* FDD0 Data Rate Table */ | ||
464 | unsigned fdd1_drtx : 2; /* FDD1 Data Rate Table */ | ||
465 | unsigned fdd2_drtx : 2; /* FDD2 Data Rate Table */ | ||
466 | unsigned fdd3_drtx : 2; /* FDD3 Data Rate Table */ | ||
467 | } by_field; | ||
468 | } SMC37c669_CR0B; | ||
469 | |||
470 | /* | ||
471 | ** CR0C - default value 0x00 | ||
472 | ** | ||
473 | ** UART2_MODE: | ||
474 | ** 000 - Standard (default) | ||
475 | ** 001 - IrDA (HPSIR) | ||
476 | ** 010 - Amplitude Shift Keyed IR @500 KHz | ||
477 | ** 011 - Reserved | ||
478 | ** 1xx - Reserved | ||
479 | ** | ||
480 | */ | ||
481 | typedef union _SMC37c669_CR0C { | ||
482 | unsigned char as_uchar; | ||
483 | struct { | ||
484 | unsigned uart2_rcv_polarity : 1; /* 1 = invert RX */ | ||
485 | unsigned uart2_xmit_polarity : 1; /* 1 = invert TX */ | ||
486 | unsigned uart2_duplex : 1; /* 1 = full, 0 = half */ | ||
487 | unsigned uart2_mode : 3; /* See note above */ | ||
488 | unsigned uart1_speed : 1; /* 1 = high speed enabled */ | ||
489 | unsigned uart2_speed : 1; /* 1 = high speed enabled */ | ||
490 | } by_field; | ||
491 | } SMC37c669_CR0C; | ||
492 | |||
493 | /* | ||
494 | ** CR0D - default value 0x03 | ||
495 | ** | ||
496 | ** Device ID Register - read only | ||
497 | */ | ||
498 | typedef union _SMC37c669_CR0D { | ||
499 | unsigned char as_uchar; | ||
500 | struct { | ||
501 | unsigned device_id : 8; /* Returns 0x3 in this field */ | ||
502 | } by_field; | ||
503 | } SMC37c669_CR0D; | ||
504 | |||
505 | /* | ||
506 | ** CR0E - default value 0x02 | ||
507 | ** | ||
508 | ** Device Revision Register - read only | ||
509 | */ | ||
510 | typedef union _SMC37c669_CR0E { | ||
511 | unsigned char as_uchar; | ||
512 | struct { | ||
513 | unsigned device_rev : 8; /* Returns 0x2 in this field */ | ||
514 | } by_field; | ||
515 | } SMC37c669_CR0E; | ||
516 | |||
517 | /* | ||
518 | ** CR0F - default value 0x00 | ||
519 | */ | ||
520 | typedef union _SMC37c669_CR0F { | ||
521 | unsigned char as_uchar; | ||
522 | struct { | ||
523 | unsigned test0 : 1; /* Reserved - set to 0 */ | ||
524 | unsigned test1 : 1; /* Reserved - set to 0 */ | ||
525 | unsigned test2 : 1; /* Reserved - set to 0 */ | ||
526 | unsigned test3 : 1; /* Reserved - set t0 0 */ | ||
527 | unsigned test4 : 1; /* Reserved - set to 0 */ | ||
528 | unsigned test5 : 1; /* Reserved - set t0 0 */ | ||
529 | unsigned test6 : 1; /* Reserved - set t0 0 */ | ||
530 | unsigned test7 : 1; /* Reserved - set to 0 */ | ||
531 | } by_field; | ||
532 | } SMC37c669_CR0F; | ||
533 | |||
534 | /* | ||
535 | ** CR10 - default value 0x00 | ||
536 | */ | ||
537 | typedef union _SMC37c669_CR10 { | ||
538 | unsigned char as_uchar; | ||
539 | struct { | ||
540 | unsigned reserved1 : 3; /* RAZ */ | ||
541 | unsigned pll_gain : 1; /* 1 = 3V, 2 = 5V operation */ | ||
542 | unsigned pll_stop : 1; /* 1 = stop PLLs */ | ||
543 | unsigned ace_stop : 1; /* 1 = stop UART clocks */ | ||
544 | unsigned pll_clock_ctrl : 1; /* 0 = 14.318 MHz, 1 = 24 MHz */ | ||
545 | unsigned ir_test : 1; /* Enable IR test mode */ | ||
546 | } by_field; | ||
547 | } SMC37c669_CR10; | ||
548 | |||
549 | /* | ||
550 | ** CR11 - default value 0x00 | ||
551 | */ | ||
552 | typedef union _SMC37c669_CR11 { | ||
553 | unsigned char as_uchar; | ||
554 | struct { | ||
555 | unsigned ir_loopback : 1; /* Internal IR loop back */ | ||
556 | unsigned test_10ms : 1; /* Test 10ms autopowerdown FDC timeout */ | ||
557 | unsigned reserved1 : 6; /* RAZ */ | ||
558 | } by_field; | ||
559 | } SMC37c669_CR11; | ||
560 | |||
561 | /* | ||
562 | ** CR12 - CR1D are reserved registers | ||
563 | */ | ||
564 | |||
565 | /* | ||
566 | ** CR1E - default value 0x80 | ||
567 | ** | ||
568 | ** GAMECS: | ||
569 | ** 00 - GAMECS disabled | ||
570 | ** 01 - 1 byte decode ADR<3:0> = 0001b | ||
571 | ** 10 - 8 byte block decode ADR<3:0> = 0XXXb | ||
572 | ** 11 - 16 byte block decode ADR<3:0> = XXXXb | ||
573 | ** | ||
574 | */ | ||
575 | typedef union _SMC37c66_CR1E { | ||
576 | unsigned char as_uchar; | ||
577 | struct { | ||
578 | unsigned gamecs_config: 2; /* See note above */ | ||
579 | unsigned gamecs_addr9_4 : 6; /* GAMECS Addr<9:4> */ | ||
580 | } by_field; | ||
581 | } SMC37c669_CR1E; | ||
582 | |||
583 | /* | ||
584 | ** CR1F - default value 0x00 | ||
585 | ** | ||
586 | ** DT0 DT1 DRVDEN0 DRVDEN1 Drive Type | ||
587 | ** --- --- ------- ------- ---------- | ||
588 | ** 0 0 DENSEL DRATE0 4/2/1 MB 3.5" | ||
589 | ** 2/1 MB 5.25" | ||
590 | ** 2/1.6/1 MB 3.5" (3-mode) | ||
591 | ** 0 1 DRATE1 DRATE0 | ||
592 | ** 1 0 nDENSEL DRATE0 PS/2 | ||
593 | ** 1 1 DRATE0 DRATE1 | ||
594 | ** | ||
595 | ** Note: DENSEL, DRATE1, and DRATE0 map onto two output | ||
596 | ** pins - DRVDEN0 and DRVDEN1. | ||
597 | ** | ||
598 | */ | ||
599 | typedef union _SMC37c669_CR1F { | ||
600 | unsigned char as_uchar; | ||
601 | struct { | ||
602 | unsigned fdd0_drive_type : 2; /* FDD0 drive type */ | ||
603 | unsigned fdd1_drive_type : 2; /* FDD1 drive type */ | ||
604 | unsigned fdd2_drive_type : 2; /* FDD2 drive type */ | ||
605 | unsigned fdd3_drive_type : 2; /* FDD3 drive type */ | ||
606 | } by_field; | ||
607 | } SMC37c669_CR1F; | ||
608 | |||
609 | /* | ||
610 | ** CR20 - default value 0x3C | ||
611 | ** | ||
612 | ** FDC Base Address Register | ||
613 | ** - To disable this decode set Addr<9:8> = 0 | ||
614 | ** - A<10> = 0, A<3:0> = 0XXXb to access. | ||
615 | ** | ||
616 | */ | ||
617 | typedef union _SMC37c669_CR20 { | ||
618 | unsigned char as_uchar; | ||
619 | struct { | ||
620 | unsigned zero : 2; /* 0 */ | ||
621 | unsigned addr9_4 : 6; /* FDC Addr<9:4> */ | ||
622 | } by_field; | ||
623 | } SMC37c669_CR20; | ||
624 | |||
625 | /* | ||
626 | ** CR21 - default value 0x3C | ||
627 | ** | ||
628 | ** IDE Base Address Register | ||
629 | ** - To disable this decode set Addr<9:8> = 0 | ||
630 | ** - A<10> = 0, A<3:0> = 0XXXb to access. | ||
631 | ** | ||
632 | */ | ||
633 | typedef union _SMC37c669_CR21 { | ||
634 | unsigned char as_uchar; | ||
635 | struct { | ||
636 | unsigned zero : 2; /* 0 */ | ||
637 | unsigned addr9_4 : 6; /* IDE Addr<9:4> */ | ||
638 | } by_field; | ||
639 | } SMC37c669_CR21; | ||
640 | |||
641 | /* | ||
642 | ** CR22 - default value 0x3D | ||
643 | ** | ||
644 | ** IDE Alternate Status Base Address Register | ||
645 | ** - To disable this decode set Addr<9:8> = 0 | ||
646 | ** - A<10> = 0, A<3:0> = 0110b to access. | ||
647 | ** | ||
648 | */ | ||
649 | typedef union _SMC37c669_CR22 { | ||
650 | unsigned char as_uchar; | ||
651 | struct { | ||
652 | unsigned zero : 2; /* 0 */ | ||
653 | unsigned addr9_4 : 6; /* IDE Alt Status Addr<9:4> */ | ||
654 | } by_field; | ||
655 | } SMC37c669_CR22; | ||
656 | |||
657 | /* | ||
658 | ** CR23 - default value 0x00 | ||
659 | ** | ||
660 | ** Parallel Port Base Address Register | ||
661 | ** - To disable this decode set Addr<9:8> = 0 | ||
662 | ** - A<10> = 0 to access. | ||
663 | ** - If EPP is enabled, A<2:0> = XXXb to access. | ||
664 | ** If EPP is NOT enabled, A<1:0> = XXb to access | ||
665 | ** | ||
666 | */ | ||
667 | typedef union _SMC37c669_CR23 { | ||
668 | unsigned char as_uchar; | ||
669 | struct { | ||
670 | unsigned addr9_2 : 8; /* Parallel Port Addr<9:2> */ | ||
671 | } by_field; | ||
672 | } SMC37c669_CR23; | ||
673 | |||
674 | /* | ||
675 | ** CR24 - default value 0x00 | ||
676 | ** | ||
677 | ** UART1 Base Address Register | ||
678 | ** - To disable this decode set Addr<9:8> = 0 | ||
679 | ** - A<10> = 0, A<2:0> = XXXb to access. | ||
680 | ** | ||
681 | */ | ||
682 | typedef union _SMC37c669_CR24 { | ||
683 | unsigned char as_uchar; | ||
684 | struct { | ||
685 | unsigned zero : 1; /* 0 */ | ||
686 | unsigned addr9_3 : 7; /* UART1 Addr<9:3> */ | ||
687 | } by_field; | ||
688 | } SMC37c669_CR24; | ||
689 | |||
690 | /* | ||
691 | ** CR25 - default value 0x00 | ||
692 | ** | ||
693 | ** UART2 Base Address Register | ||
694 | ** - To disable this decode set Addr<9:8> = 0 | ||
695 | ** - A<10> = 0, A<2:0> = XXXb to access. | ||
696 | ** | ||
697 | */ | ||
698 | typedef union _SMC37c669_CR25 { | ||
699 | unsigned char as_uchar; | ||
700 | struct { | ||
701 | unsigned zero : 1; /* 0 */ | ||
702 | unsigned addr9_3 : 7; /* UART2 Addr<9:3> */ | ||
703 | } by_field; | ||
704 | } SMC37c669_CR25; | ||
705 | |||
706 | /* | ||
707 | ** CR26 - default value 0x00 | ||
708 | ** | ||
709 | ** Parallel Port / FDC DMA Select Register | ||
710 | ** | ||
711 | ** D3 - D0 DMA | ||
712 | ** D7 - D4 Selected | ||
713 | ** ------- -------- | ||
714 | ** 0000 None | ||
715 | ** 0001 DMA_A | ||
716 | ** 0010 DMA_B | ||
717 | ** 0011 DMA_C | ||
718 | ** | ||
719 | */ | ||
720 | typedef union _SMC37c669_CR26 { | ||
721 | unsigned char as_uchar; | ||
722 | struct { | ||
723 | unsigned ppt_drq : 4; /* See note above */ | ||
724 | unsigned fdc_drq : 4; /* See note above */ | ||
725 | } by_field; | ||
726 | } SMC37c669_CR26; | ||
727 | |||
728 | /* | ||
729 | ** CR27 - default value 0x00 | ||
730 | ** | ||
731 | ** Parallel Port / FDC IRQ Select Register | ||
732 | ** | ||
733 | ** D3 - D0 IRQ | ||
734 | ** D7 - D4 Selected | ||
735 | ** ------- -------- | ||
736 | ** 0000 None | ||
737 | ** 0001 IRQ_A | ||
738 | ** 0010 IRQ_B | ||
739 | ** 0011 IRQ_C | ||
740 | ** 0100 IRQ_D | ||
741 | ** 0101 IRQ_E | ||
742 | ** 0110 IRQ_F | ||
743 | ** 0111 Reserved | ||
744 | ** 1000 IRQ_H | ||
745 | ** | ||
746 | ** Any unselected IRQ REQ is in tristate | ||
747 | ** | ||
748 | */ | ||
749 | typedef union _SMC37c669_CR27 { | ||
750 | unsigned char as_uchar; | ||
751 | struct { | ||
752 | unsigned ppt_irq : 4; /* See note above */ | ||
753 | unsigned fdc_irq : 4; /* See note above */ | ||
754 | } by_field; | ||
755 | } SMC37c669_CR27; | ||
756 | |||
757 | /* | ||
758 | ** CR28 - default value 0x00 | ||
759 | ** | ||
760 | ** UART IRQ Select Register | ||
761 | ** | ||
762 | ** D3 - D0 IRQ | ||
763 | ** D7 - D4 Selected | ||
764 | ** ------- -------- | ||
765 | ** 0000 None | ||
766 | ** 0001 IRQ_A | ||
767 | ** 0010 IRQ_B | ||
768 | ** 0011 IRQ_C | ||
769 | ** 0100 IRQ_D | ||
770 | ** 0101 IRQ_E | ||
771 | ** 0110 IRQ_F | ||
772 | ** 0111 Reserved | ||
773 | ** 1000 IRQ_H | ||
774 | ** 1111 share with UART1 (only for UART2) | ||
775 | ** | ||
776 | ** Any unselected IRQ REQ is in tristate | ||
777 | ** | ||
778 | ** To share an IRQ between UART1 and UART2, set | ||
779 | ** UART1 to use the desired IRQ and set UART2 to | ||
780 | ** 0xF to enable sharing mechanism. | ||
781 | ** | ||
782 | */ | ||
783 | typedef union _SMC37c669_CR28 { | ||
784 | unsigned char as_uchar; | ||
785 | struct { | ||
786 | unsigned uart2_irq : 4; /* See note above */ | ||
787 | unsigned uart1_irq : 4; /* See note above */ | ||
788 | } by_field; | ||
789 | } SMC37c669_CR28; | ||
790 | |||
791 | /* | ||
792 | ** CR29 - default value 0x00 | ||
793 | ** | ||
794 | ** IRQIN IRQ Select Register | ||
795 | ** | ||
796 | ** D3 - D0 IRQ | ||
797 | ** D7 - D4 Selected | ||
798 | ** ------- -------- | ||
799 | ** 0000 None | ||
800 | ** 0001 IRQ_A | ||
801 | ** 0010 IRQ_B | ||
802 | ** 0011 IRQ_C | ||
803 | ** 0100 IRQ_D | ||
804 | ** 0101 IRQ_E | ||
805 | ** 0110 IRQ_F | ||
806 | ** 0111 Reserved | ||
807 | ** 1000 IRQ_H | ||
808 | ** | ||
809 | ** Any unselected IRQ REQ is in tristate | ||
810 | ** | ||
811 | */ | ||
812 | typedef union _SMC37c669_CR29 { | ||
813 | unsigned char as_uchar; | ||
814 | struct { | ||
815 | unsigned irqin_irq : 4; /* See note above */ | ||
816 | unsigned reserved1 : 4; /* RAZ */ | ||
817 | } by_field; | ||
818 | } SMC37c669_CR29; | ||
819 | |||
820 | /* | ||
821 | ** Aliases of Configuration Register formats (should match | ||
822 | ** the set of index aliases). | ||
823 | ** | ||
824 | ** Note that CR24 and CR25 have the same format and are the | ||
825 | ** base address registers for UART1 and UART2. Because of | ||
826 | ** this we only define 1 alias here - for CR24 - as the serial | ||
827 | ** base address register. | ||
828 | ** | ||
829 | ** Note that CR21 and CR22 have the same format and are the | ||
830 | ** base address and alternate status address registers for | ||
831 | ** the IDE controller. Because of this we only define 1 alias | ||
832 | ** here - for CR21 - as the IDE address register. | ||
833 | ** | ||
834 | */ | ||
835 | typedef SMC37c669_CR0D SMC37c669_DEVICE_ID_REGISTER; | ||
836 | typedef SMC37c669_CR0E SMC37c669_DEVICE_REVISION_REGISTER; | ||
837 | typedef SMC37c669_CR20 SMC37c669_FDC_BASE_ADDRESS_REGISTER; | ||
838 | typedef SMC37c669_CR21 SMC37c669_IDE_ADDRESS_REGISTER; | ||
839 | typedef SMC37c669_CR23 SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER; | ||
840 | typedef SMC37c669_CR24 SMC37c669_SERIAL_BASE_ADDRESS_REGISTER; | ||
841 | typedef SMC37c669_CR26 SMC37c669_PARALLEL_FDC_DRQ_REGISTER; | ||
842 | typedef SMC37c669_CR27 SMC37c669_PARALLEL_FDC_IRQ_REGISTER; | ||
843 | typedef SMC37c669_CR28 SMC37c669_SERIAL_IRQ_REGISTER; | ||
844 | |||
845 | /* | ||
846 | ** ISA/Device IRQ Translation Table Entry Definition | ||
847 | */ | ||
848 | typedef struct _SMC37c669_IRQ_TRANSLATION_ENTRY { | ||
849 | int device_irq; | ||
850 | int isa_irq; | ||
851 | } SMC37c669_IRQ_TRANSLATION_ENTRY; | ||
852 | |||
853 | /* | ||
854 | ** ISA/Device DMA Translation Table Entry Definition | ||
855 | */ | ||
856 | typedef struct _SMC37c669_DRQ_TRANSLATION_ENTRY { | ||
857 | int device_drq; | ||
858 | int isa_drq; | ||
859 | } SMC37c669_DRQ_TRANSLATION_ENTRY; | ||
860 | |||
861 | /* | ||
862 | ** External Interface Function Prototype Declarations | ||
863 | */ | ||
864 | |||
865 | SMC37c669_CONFIG_REGS *SMC37c669_detect( | ||
866 | int | ||
867 | ); | ||
868 | |||
869 | unsigned int SMC37c669_enable_device( | ||
870 | unsigned int func | ||
871 | ); | ||
872 | |||
873 | unsigned int SMC37c669_disable_device( | ||
874 | unsigned int func | ||
875 | ); | ||
876 | |||
877 | unsigned int SMC37c669_configure_device( | ||
878 | unsigned int func, | ||
879 | int port, | ||
880 | int irq, | ||
881 | int drq | ||
882 | ); | ||
883 | |||
884 | void SMC37c669_display_device_info( | ||
885 | void | ||
886 | ); | ||
887 | |||
888 | #endif /* __SMC37c669_H */ | ||
889 | |||
890 | /* file: smcc669.c | ||
891 | * | ||
892 | * Copyright (C) 1997 by | ||
893 | * Digital Equipment Corporation, Maynard, Massachusetts. | ||
894 | * All rights reserved. | ||
895 | * | ||
896 | * This software is furnished under a license and may be used and copied | ||
897 | * only in accordance of the terms of such license and with the | ||
898 | * inclusion of the above copyright notice. This software or any other | ||
899 | * copies thereof may not be provided or otherwise made available to any | ||
900 | * other person. No title to and ownership of the software is hereby | ||
901 | * transferred. | ||
902 | * | ||
903 | * The information in this software is subject to change without notice | ||
904 | * and should not be construed as a commitment by digital equipment | ||
905 | * corporation. | ||
906 | * | ||
907 | * Digital assumes no responsibility for the use or reliability of its | ||
908 | * software on equipment which is not supplied by digital. | ||
909 | */ | ||
910 | |||
911 | /* | ||
912 | *++ | ||
913 | * FACILITY: | ||
914 | * | ||
915 | * Alpha SRM Console Firmware | ||
916 | * | ||
917 | * MODULE DESCRIPTION: | ||
918 | * | ||
919 | * SMC37c669 Super I/O controller configuration routines. | ||
920 | * | ||
921 | * AUTHORS: | ||
922 | * | ||
923 | * Eric Rasmussen | ||
924 | * | ||
925 | * CREATION DATE: | ||
926 | * | ||
927 | * 28-Jan-1997 | ||
928 | * | ||
929 | * MODIFICATION HISTORY: | ||
930 | * | ||
931 | * er 01-May-1997 Fixed pointer conversion errors in | ||
932 | * SMC37c669_get_device_config(). | ||
933 | * er 28-Jan-1997 Initial version. | ||
934 | * | ||
935 | *-- | ||
936 | */ | ||
937 | #if 0 | ||
938 | /* $INCLUDE_OPTIONS$ */ | ||
939 | #include "cp$inc:platform_io.h" | ||
940 | /* $INCLUDE_OPTIONS_END$ */ | ||
941 | #include "cp$src:common.h" | ||
942 | #include "cp$inc:prototypes.h" | ||
943 | #include "cp$src:kernel_def.h" | ||
944 | #include "cp$src:msg_def.h" | ||
945 | #include "cp$src:smcc669_def.h" | ||
946 | /* Platform-specific includes */ | ||
947 | #include "cp$src:platform.h" | ||
948 | #endif | ||
949 | |||
950 | #ifndef TRUE | ||
951 | #define TRUE 1 | ||
952 | #endif | ||
953 | #ifndef FALSE | ||
954 | #define FALSE 0 | ||
955 | #endif | ||
956 | |||
957 | #define wb( _x_, _y_ ) outb( _y_, (unsigned int)((unsigned long)_x_) ) | ||
958 | #define rb( _x_ ) inb( (unsigned int)((unsigned long)_x_) ) | ||
959 | |||
960 | /* | ||
961 | ** Local storage for device configuration information. | ||
962 | ** | ||
963 | ** Since the SMC37c669 does not provide an explicit | ||
964 | ** mechanism for enabling/disabling individual device | ||
965 | ** functions, other than unmapping the device, local | ||
966 | ** storage for device configuration information is | ||
967 | ** allocated here for use in implementing our own | ||
968 | ** function enable/disable scheme. | ||
969 | */ | ||
970 | static struct DEVICE_CONFIG { | ||
971 | unsigned int port1; | ||
972 | unsigned int port2; | ||
973 | int irq; | ||
974 | int drq; | ||
975 | } local_config [NUM_FUNCS]; | ||
976 | |||
977 | /* | ||
978 | ** List of all possible addresses for the Super I/O chip | ||
979 | */ | ||
980 | static unsigned long SMC37c669_Addresses[] __initdata = | ||
981 | { | ||
982 | 0x3F0UL, /* Primary address */ | ||
983 | 0x370UL, /* Secondary address */ | ||
984 | 0UL /* End of list */ | ||
985 | }; | ||
986 | |||
987 | /* | ||
988 | ** Global Pointer to the Super I/O device | ||
989 | */ | ||
990 | static SMC37c669_CONFIG_REGS *SMC37c669 __initdata = NULL; | ||
991 | |||
992 | /* | ||
993 | ** IRQ Translation Table | ||
994 | ** | ||
995 | ** The IRQ translation table is a list of SMC37c669 device | ||
996 | ** and standard ISA IRQs. | ||
997 | ** | ||
998 | */ | ||
999 | static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_table __initdata; | ||
1000 | |||
1001 | /* | ||
1002 | ** The following definition is for the default IRQ | ||
1003 | ** translation table. | ||
1004 | */ | ||
1005 | static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_default_irq_table[] | ||
1006 | __initdata = | ||
1007 | { | ||
1008 | { SMC37c669_DEVICE_IRQ_A, -1 }, | ||
1009 | { SMC37c669_DEVICE_IRQ_B, -1 }, | ||
1010 | { SMC37c669_DEVICE_IRQ_C, 7 }, | ||
1011 | { SMC37c669_DEVICE_IRQ_D, 6 }, | ||
1012 | { SMC37c669_DEVICE_IRQ_E, 4 }, | ||
1013 | { SMC37c669_DEVICE_IRQ_F, 3 }, | ||
1014 | { SMC37c669_DEVICE_IRQ_H, -1 }, | ||
1015 | { -1, -1 } /* End of table */ | ||
1016 | }; | ||
1017 | |||
1018 | /* | ||
1019 | ** The following definition is for the MONET (XP1000) IRQ | ||
1020 | ** translation table. | ||
1021 | */ | ||
1022 | static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_monet_irq_table[] | ||
1023 | __initdata = | ||
1024 | { | ||
1025 | { SMC37c669_DEVICE_IRQ_A, -1 }, | ||
1026 | { SMC37c669_DEVICE_IRQ_B, -1 }, | ||
1027 | { SMC37c669_DEVICE_IRQ_C, 6 }, | ||
1028 | { SMC37c669_DEVICE_IRQ_D, 7 }, | ||
1029 | { SMC37c669_DEVICE_IRQ_E, 4 }, | ||
1030 | { SMC37c669_DEVICE_IRQ_F, 3 }, | ||
1031 | { SMC37c669_DEVICE_IRQ_H, -1 }, | ||
1032 | { -1, -1 } /* End of table */ | ||
1033 | }; | ||
1034 | |||
1035 | static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_tables[] __initdata = | ||
1036 | { | ||
1037 | SMC37c669_default_irq_table, | ||
1038 | SMC37c669_monet_irq_table | ||
1039 | }; | ||
1040 | |||
1041 | /* | ||
1042 | ** DRQ Translation Table | ||
1043 | ** | ||
1044 | ** The DRQ translation table is a list of SMC37c669 device and | ||
1045 | ** ISA DMA channels. | ||
1046 | ** | ||
1047 | */ | ||
1048 | static SMC37c669_DRQ_TRANSLATION_ENTRY *SMC37c669_drq_table __initdata; | ||
1049 | |||
1050 | /* | ||
1051 | ** The following definition is the default DRQ | ||
1052 | ** translation table. | ||
1053 | */ | ||
1054 | static SMC37c669_DRQ_TRANSLATION_ENTRY SMC37c669_default_drq_table[] | ||
1055 | __initdata = | ||
1056 | { | ||
1057 | { SMC37c669_DEVICE_DRQ_A, 2 }, | ||
1058 | { SMC37c669_DEVICE_DRQ_B, 3 }, | ||
1059 | { SMC37c669_DEVICE_DRQ_C, -1 }, | ||
1060 | { -1, -1 } /* End of table */ | ||
1061 | }; | ||
1062 | |||
1063 | /* | ||
1064 | ** Local Function Prototype Declarations | ||
1065 | */ | ||
1066 | |||
1067 | static unsigned int SMC37c669_is_device_enabled( | ||
1068 | unsigned int func | ||
1069 | ); | ||
1070 | |||
1071 | #if 0 | ||
1072 | static unsigned int SMC37c669_get_device_config( | ||
1073 | unsigned int func, | ||
1074 | int *port, | ||
1075 | int *irq, | ||
1076 | int *drq | ||
1077 | ); | ||
1078 | #endif | ||
1079 | |||
1080 | static void SMC37c669_config_mode( | ||
1081 | unsigned int enable | ||
1082 | ); | ||
1083 | |||
1084 | static unsigned char SMC37c669_read_config( | ||
1085 | unsigned char index | ||
1086 | ); | ||
1087 | |||
1088 | static void SMC37c669_write_config( | ||
1089 | unsigned char index, | ||
1090 | unsigned char data | ||
1091 | ); | ||
1092 | |||
1093 | static void SMC37c669_init_local_config( void ); | ||
1094 | |||
1095 | static struct DEVICE_CONFIG *SMC37c669_get_config( | ||
1096 | unsigned int func | ||
1097 | ); | ||
1098 | |||
1099 | static int SMC37c669_xlate_irq( | ||
1100 | int irq | ||
1101 | ); | ||
1102 | |||
1103 | static int SMC37c669_xlate_drq( | ||
1104 | int drq | ||
1105 | ); | ||
1106 | |||
1107 | static __cacheline_aligned DEFINE_SPINLOCK(smc_lock); | ||
1108 | |||
1109 | /* | ||
1110 | **++ | ||
1111 | ** FUNCTIONAL DESCRIPTION: | ||
1112 | ** | ||
1113 | ** This function detects the presence of an SMC37c669 Super I/O | ||
1114 | ** controller. | ||
1115 | ** | ||
1116 | ** FORMAL PARAMETERS: | ||
1117 | ** | ||
1118 | ** None | ||
1119 | ** | ||
1120 | ** RETURN VALUE: | ||
1121 | ** | ||
1122 | ** Returns a pointer to the device if found, otherwise, | ||
1123 | ** the NULL pointer is returned. | ||
1124 | ** | ||
1125 | ** SIDE EFFECTS: | ||
1126 | ** | ||
1127 | ** None | ||
1128 | ** | ||
1129 | **-- | ||
1130 | */ | ||
1131 | SMC37c669_CONFIG_REGS * __init SMC37c669_detect( int index ) | ||
1132 | { | ||
1133 | int i; | ||
1134 | SMC37c669_DEVICE_ID_REGISTER id; | ||
1135 | |||
1136 | for ( i = 0; SMC37c669_Addresses[i] != 0; i++ ) { | ||
1137 | /* | ||
1138 | ** Initialize the device pointer even though we don't yet know if | ||
1139 | ** the controller is at this address. The support functions access | ||
1140 | ** the controller through this device pointer so we need to set it | ||
1141 | ** even when we are looking ... | ||
1142 | */ | ||
1143 | SMC37c669 = ( SMC37c669_CONFIG_REGS * )SMC37c669_Addresses[i]; | ||
1144 | /* | ||
1145 | ** Enter configuration mode | ||
1146 | */ | ||
1147 | SMC37c669_config_mode( TRUE ); | ||
1148 | /* | ||
1149 | ** Read the device id | ||
1150 | */ | ||
1151 | id.as_uchar = SMC37c669_read_config( SMC37c669_DEVICE_ID_INDEX ); | ||
1152 | /* | ||
1153 | ** Exit configuration mode | ||
1154 | */ | ||
1155 | SMC37c669_config_mode( FALSE ); | ||
1156 | /* | ||
1157 | ** Does the device id match? If so, assume we have found an | ||
1158 | ** SMC37c669 controller at this address. | ||
1159 | */ | ||
1160 | if ( id.by_field.device_id == SMC37c669_DEVICE_ID ) { | ||
1161 | /* | ||
1162 | ** Initialize the IRQ and DRQ translation tables. | ||
1163 | */ | ||
1164 | SMC37c669_irq_table = SMC37c669_irq_tables[ index ]; | ||
1165 | SMC37c669_drq_table = SMC37c669_default_drq_table; | ||
1166 | /* | ||
1167 | ** erfix | ||
1168 | ** | ||
1169 | ** If the platform can't use the IRQ and DRQ defaults set up in this | ||
1170 | ** file, it should call a platform-specific external routine at this | ||
1171 | ** point to reset the IRQ and DRQ translation table pointers to point | ||
1172 | ** at the appropriate tables for the platform. If the defaults are | ||
1173 | ** acceptable, then the external routine should do nothing. | ||
1174 | */ | ||
1175 | |||
1176 | /* | ||
1177 | ** Put the chip back into configuration mode | ||
1178 | */ | ||
1179 | SMC37c669_config_mode( TRUE ); | ||
1180 | /* | ||
1181 | ** Initialize local storage for configuration information | ||
1182 | */ | ||
1183 | SMC37c669_init_local_config( ); | ||
1184 | /* | ||
1185 | ** Exit configuration mode | ||
1186 | */ | ||
1187 | SMC37c669_config_mode( FALSE ); | ||
1188 | /* | ||
1189 | ** SMC37c669 controller found, break out of search loop | ||
1190 | */ | ||
1191 | break; | ||
1192 | } | ||
1193 | else { | ||
1194 | /* | ||
1195 | ** Otherwise, we did not find an SMC37c669 controller at this | ||
1196 | ** address so set the device pointer to NULL. | ||
1197 | */ | ||
1198 | SMC37c669 = NULL; | ||
1199 | } | ||
1200 | } | ||
1201 | return SMC37c669; | ||
1202 | } | ||
1203 | |||
1204 | |||
1205 | /* | ||
1206 | **++ | ||
1207 | ** FUNCTIONAL DESCRIPTION: | ||
1208 | ** | ||
1209 | ** This function enables an SMC37c669 device function. | ||
1210 | ** | ||
1211 | ** FORMAL PARAMETERS: | ||
1212 | ** | ||
1213 | ** func: | ||
1214 | ** Which device function to enable | ||
1215 | ** | ||
1216 | ** RETURN VALUE: | ||
1217 | ** | ||
1218 | ** Returns TRUE is the device function was enabled, otherwise, FALSE | ||
1219 | ** | ||
1220 | ** SIDE EFFECTS: | ||
1221 | ** | ||
1222 | ** {@description or none@} | ||
1223 | ** | ||
1224 | ** DESIGN: | ||
1225 | ** | ||
1226 | ** Enabling a device function in the SMC37c669 controller involves | ||
1227 | ** setting all of its mappings (port, irq, drq ...). A local | ||
1228 | ** "shadow" copy of the device configuration is kept so we can | ||
1229 | ** just set each mapping to what the local copy says. | ||
1230 | ** | ||
1231 | ** This function ALWAYS updates the local shadow configuration of | ||
1232 | ** the device function being enabled, even if the device is always | ||
1233 | ** enabled. To avoid replication of code, functions such as | ||
1234 | ** configure_device set up the local copy and then call this | ||
1235 | ** function to the update the real device. | ||
1236 | ** | ||
1237 | **-- | ||
1238 | */ | ||
1239 | unsigned int __init SMC37c669_enable_device ( unsigned int func ) | ||
1240 | { | ||
1241 | unsigned int ret_val = FALSE; | ||
1242 | /* | ||
1243 | ** Put the device into configuration mode | ||
1244 | */ | ||
1245 | SMC37c669_config_mode( TRUE ); | ||
1246 | switch ( func ) { | ||
1247 | case SERIAL_0: | ||
1248 | { | ||
1249 | SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; | ||
1250 | SMC37c669_SERIAL_IRQ_REGISTER irq; | ||
1251 | /* | ||
1252 | ** Enable the serial 1 IRQ mapping | ||
1253 | */ | ||
1254 | irq.as_uchar = | ||
1255 | SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); | ||
1256 | |||
1257 | irq.by_field.uart1_irq = | ||
1258 | SMC37c669_RAW_DEVICE_IRQ( | ||
1259 | SMC37c669_xlate_irq( local_config[ func ].irq ) | ||
1260 | ); | ||
1261 | |||
1262 | SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); | ||
1263 | /* | ||
1264 | ** Enable the serial 1 port base address mapping | ||
1265 | */ | ||
1266 | base_addr.as_uchar = 0; | ||
1267 | base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3; | ||
1268 | |||
1269 | SMC37c669_write_config( | ||
1270 | SMC37c669_SERIAL0_BASE_ADDRESS_INDEX, | ||
1271 | base_addr.as_uchar | ||
1272 | ); | ||
1273 | ret_val = TRUE; | ||
1274 | break; | ||
1275 | } | ||
1276 | case SERIAL_1: | ||
1277 | { | ||
1278 | SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; | ||
1279 | SMC37c669_SERIAL_IRQ_REGISTER irq; | ||
1280 | /* | ||
1281 | ** Enable the serial 2 IRQ mapping | ||
1282 | */ | ||
1283 | irq.as_uchar = | ||
1284 | SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); | ||
1285 | |||
1286 | irq.by_field.uart2_irq = | ||
1287 | SMC37c669_RAW_DEVICE_IRQ( | ||
1288 | SMC37c669_xlate_irq( local_config[ func ].irq ) | ||
1289 | ); | ||
1290 | |||
1291 | SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); | ||
1292 | /* | ||
1293 | ** Enable the serial 2 port base address mapping | ||
1294 | */ | ||
1295 | base_addr.as_uchar = 0; | ||
1296 | base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3; | ||
1297 | |||
1298 | SMC37c669_write_config( | ||
1299 | SMC37c669_SERIAL1_BASE_ADDRESS_INDEX, | ||
1300 | base_addr.as_uchar | ||
1301 | ); | ||
1302 | ret_val = TRUE; | ||
1303 | break; | ||
1304 | } | ||
1305 | case PARALLEL_0: | ||
1306 | { | ||
1307 | SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr; | ||
1308 | SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; | ||
1309 | SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; | ||
1310 | /* | ||
1311 | ** Enable the parallel port DMA channel mapping | ||
1312 | */ | ||
1313 | drq.as_uchar = | ||
1314 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); | ||
1315 | |||
1316 | drq.by_field.ppt_drq = | ||
1317 | SMC37c669_RAW_DEVICE_DRQ( | ||
1318 | SMC37c669_xlate_drq( local_config[ func ].drq ) | ||
1319 | ); | ||
1320 | |||
1321 | SMC37c669_write_config( | ||
1322 | SMC37c669_PARALLEL_FDC_DRQ_INDEX, | ||
1323 | drq.as_uchar | ||
1324 | ); | ||
1325 | /* | ||
1326 | ** Enable the parallel port IRQ mapping | ||
1327 | */ | ||
1328 | irq.as_uchar = | ||
1329 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); | ||
1330 | |||
1331 | irq.by_field.ppt_irq = | ||
1332 | SMC37c669_RAW_DEVICE_IRQ( | ||
1333 | SMC37c669_xlate_irq( local_config[ func ].irq ) | ||
1334 | ); | ||
1335 | |||
1336 | SMC37c669_write_config( | ||
1337 | SMC37c669_PARALLEL_FDC_IRQ_INDEX, | ||
1338 | irq.as_uchar | ||
1339 | ); | ||
1340 | /* | ||
1341 | ** Enable the parallel port base address mapping | ||
1342 | */ | ||
1343 | base_addr.as_uchar = 0; | ||
1344 | base_addr.by_field.addr9_2 = local_config[ func ].port1 >> 2; | ||
1345 | |||
1346 | SMC37c669_write_config( | ||
1347 | SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX, | ||
1348 | base_addr.as_uchar | ||
1349 | ); | ||
1350 | ret_val = TRUE; | ||
1351 | break; | ||
1352 | } | ||
1353 | case FLOPPY_0: | ||
1354 | { | ||
1355 | SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr; | ||
1356 | SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; | ||
1357 | SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; | ||
1358 | /* | ||
1359 | ** Enable the floppy controller DMA channel mapping | ||
1360 | */ | ||
1361 | drq.as_uchar = | ||
1362 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); | ||
1363 | |||
1364 | drq.by_field.fdc_drq = | ||
1365 | SMC37c669_RAW_DEVICE_DRQ( | ||
1366 | SMC37c669_xlate_drq( local_config[ func ].drq ) | ||
1367 | ); | ||
1368 | |||
1369 | SMC37c669_write_config( | ||
1370 | SMC37c669_PARALLEL_FDC_DRQ_INDEX, | ||
1371 | drq.as_uchar | ||
1372 | ); | ||
1373 | /* | ||
1374 | ** Enable the floppy controller IRQ mapping | ||
1375 | */ | ||
1376 | irq.as_uchar = | ||
1377 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); | ||
1378 | |||
1379 | irq.by_field.fdc_irq = | ||
1380 | SMC37c669_RAW_DEVICE_IRQ( | ||
1381 | SMC37c669_xlate_irq( local_config[ func ].irq ) | ||
1382 | ); | ||
1383 | |||
1384 | SMC37c669_write_config( | ||
1385 | SMC37c669_PARALLEL_FDC_IRQ_INDEX, | ||
1386 | irq.as_uchar | ||
1387 | ); | ||
1388 | /* | ||
1389 | ** Enable the floppy controller base address mapping | ||
1390 | */ | ||
1391 | base_addr.as_uchar = 0; | ||
1392 | base_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4; | ||
1393 | |||
1394 | SMC37c669_write_config( | ||
1395 | SMC37c669_FDC_BASE_ADDRESS_INDEX, | ||
1396 | base_addr.as_uchar | ||
1397 | ); | ||
1398 | ret_val = TRUE; | ||
1399 | break; | ||
1400 | } | ||
1401 | case IDE_0: | ||
1402 | { | ||
1403 | SMC37c669_IDE_ADDRESS_REGISTER ide_addr; | ||
1404 | /* | ||
1405 | ** Enable the IDE alternate status base address mapping | ||
1406 | */ | ||
1407 | ide_addr.as_uchar = 0; | ||
1408 | ide_addr.by_field.addr9_4 = local_config[ func ].port2 >> 4; | ||
1409 | |||
1410 | SMC37c669_write_config( | ||
1411 | SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX, | ||
1412 | ide_addr.as_uchar | ||
1413 | ); | ||
1414 | /* | ||
1415 | ** Enable the IDE controller base address mapping | ||
1416 | */ | ||
1417 | ide_addr.as_uchar = 0; | ||
1418 | ide_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4; | ||
1419 | |||
1420 | SMC37c669_write_config( | ||
1421 | SMC37c669_IDE_BASE_ADDRESS_INDEX, | ||
1422 | ide_addr.as_uchar | ||
1423 | ); | ||
1424 | ret_val = TRUE; | ||
1425 | break; | ||
1426 | } | ||
1427 | } | ||
1428 | /* | ||
1429 | ** Exit configuration mode and return | ||
1430 | */ | ||
1431 | SMC37c669_config_mode( FALSE ); | ||
1432 | |||
1433 | return ret_val; | ||
1434 | } | ||
1435 | |||
1436 | |||
1437 | /* | ||
1438 | **++ | ||
1439 | ** FUNCTIONAL DESCRIPTION: | ||
1440 | ** | ||
1441 | ** This function disables a device function within the | ||
1442 | ** SMC37c669 Super I/O controller. | ||
1443 | ** | ||
1444 | ** FORMAL PARAMETERS: | ||
1445 | ** | ||
1446 | ** func: | ||
1447 | ** Which function to disable | ||
1448 | ** | ||
1449 | ** RETURN VALUE: | ||
1450 | ** | ||
1451 | ** Return TRUE if the device function was disabled, otherwise, FALSE | ||
1452 | ** | ||
1453 | ** SIDE EFFECTS: | ||
1454 | ** | ||
1455 | ** {@description or none@} | ||
1456 | ** | ||
1457 | ** DESIGN: | ||
1458 | ** | ||
1459 | ** Disabling a function in the SMC37c669 device involves | ||
1460 | ** disabling all the function's mappings (port, irq, drq ...). | ||
1461 | ** A shadow copy of the device configuration is maintained | ||
1462 | ** in local storage so we won't worry aboving saving the | ||
1463 | ** current configuration information. | ||
1464 | ** | ||
1465 | **-- | ||
1466 | */ | ||
1467 | unsigned int __init SMC37c669_disable_device ( unsigned int func ) | ||
1468 | { | ||
1469 | unsigned int ret_val = FALSE; | ||
1470 | |||
1471 | /* | ||
1472 | ** Put the device into configuration mode | ||
1473 | */ | ||
1474 | SMC37c669_config_mode( TRUE ); | ||
1475 | switch ( func ) { | ||
1476 | case SERIAL_0: | ||
1477 | { | ||
1478 | SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; | ||
1479 | SMC37c669_SERIAL_IRQ_REGISTER irq; | ||
1480 | /* | ||
1481 | ** Disable the serial 1 IRQ mapping | ||
1482 | */ | ||
1483 | irq.as_uchar = | ||
1484 | SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); | ||
1485 | |||
1486 | irq.by_field.uart1_irq = 0; | ||
1487 | |||
1488 | SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); | ||
1489 | /* | ||
1490 | ** Disable the serial 1 port base address mapping | ||
1491 | */ | ||
1492 | base_addr.as_uchar = 0; | ||
1493 | SMC37c669_write_config( | ||
1494 | SMC37c669_SERIAL0_BASE_ADDRESS_INDEX, | ||
1495 | base_addr.as_uchar | ||
1496 | ); | ||
1497 | ret_val = TRUE; | ||
1498 | break; | ||
1499 | } | ||
1500 | case SERIAL_1: | ||
1501 | { | ||
1502 | SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; | ||
1503 | SMC37c669_SERIAL_IRQ_REGISTER irq; | ||
1504 | /* | ||
1505 | ** Disable the serial 2 IRQ mapping | ||
1506 | */ | ||
1507 | irq.as_uchar = | ||
1508 | SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); | ||
1509 | |||
1510 | irq.by_field.uart2_irq = 0; | ||
1511 | |||
1512 | SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); | ||
1513 | /* | ||
1514 | ** Disable the serial 2 port base address mapping | ||
1515 | */ | ||
1516 | base_addr.as_uchar = 0; | ||
1517 | |||
1518 | SMC37c669_write_config( | ||
1519 | SMC37c669_SERIAL1_BASE_ADDRESS_INDEX, | ||
1520 | base_addr.as_uchar | ||
1521 | ); | ||
1522 | ret_val = TRUE; | ||
1523 | break; | ||
1524 | } | ||
1525 | case PARALLEL_0: | ||
1526 | { | ||
1527 | SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr; | ||
1528 | SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; | ||
1529 | SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; | ||
1530 | /* | ||
1531 | ** Disable the parallel port DMA channel mapping | ||
1532 | */ | ||
1533 | drq.as_uchar = | ||
1534 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); | ||
1535 | |||
1536 | drq.by_field.ppt_drq = 0; | ||
1537 | |||
1538 | SMC37c669_write_config( | ||
1539 | SMC37c669_PARALLEL_FDC_DRQ_INDEX, | ||
1540 | drq.as_uchar | ||
1541 | ); | ||
1542 | /* | ||
1543 | ** Disable the parallel port IRQ mapping | ||
1544 | */ | ||
1545 | irq.as_uchar = | ||
1546 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); | ||
1547 | |||
1548 | irq.by_field.ppt_irq = 0; | ||
1549 | |||
1550 | SMC37c669_write_config( | ||
1551 | SMC37c669_PARALLEL_FDC_IRQ_INDEX, | ||
1552 | irq.as_uchar | ||
1553 | ); | ||
1554 | /* | ||
1555 | ** Disable the parallel port base address mapping | ||
1556 | */ | ||
1557 | base_addr.as_uchar = 0; | ||
1558 | |||
1559 | SMC37c669_write_config( | ||
1560 | SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX, | ||
1561 | base_addr.as_uchar | ||
1562 | ); | ||
1563 | ret_val = TRUE; | ||
1564 | break; | ||
1565 | } | ||
1566 | case FLOPPY_0: | ||
1567 | { | ||
1568 | SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr; | ||
1569 | SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; | ||
1570 | SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; | ||
1571 | /* | ||
1572 | ** Disable the floppy controller DMA channel mapping | ||
1573 | */ | ||
1574 | drq.as_uchar = | ||
1575 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); | ||
1576 | |||
1577 | drq.by_field.fdc_drq = 0; | ||
1578 | |||
1579 | SMC37c669_write_config( | ||
1580 | SMC37c669_PARALLEL_FDC_DRQ_INDEX, | ||
1581 | drq.as_uchar | ||
1582 | ); | ||
1583 | /* | ||
1584 | ** Disable the floppy controller IRQ mapping | ||
1585 | */ | ||
1586 | irq.as_uchar = | ||
1587 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); | ||
1588 | |||
1589 | irq.by_field.fdc_irq = 0; | ||
1590 | |||
1591 | SMC37c669_write_config( | ||
1592 | SMC37c669_PARALLEL_FDC_IRQ_INDEX, | ||
1593 | irq.as_uchar | ||
1594 | ); | ||
1595 | /* | ||
1596 | ** Disable the floppy controller base address mapping | ||
1597 | */ | ||
1598 | base_addr.as_uchar = 0; | ||
1599 | |||
1600 | SMC37c669_write_config( | ||
1601 | SMC37c669_FDC_BASE_ADDRESS_INDEX, | ||
1602 | base_addr.as_uchar | ||
1603 | ); | ||
1604 | ret_val = TRUE; | ||
1605 | break; | ||
1606 | } | ||
1607 | case IDE_0: | ||
1608 | { | ||
1609 | SMC37c669_IDE_ADDRESS_REGISTER ide_addr; | ||
1610 | /* | ||
1611 | ** Disable the IDE alternate status base address mapping | ||
1612 | */ | ||
1613 | ide_addr.as_uchar = 0; | ||
1614 | |||
1615 | SMC37c669_write_config( | ||
1616 | SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX, | ||
1617 | ide_addr.as_uchar | ||
1618 | ); | ||
1619 | /* | ||
1620 | ** Disable the IDE controller base address mapping | ||
1621 | */ | ||
1622 | ide_addr.as_uchar = 0; | ||
1623 | |||
1624 | SMC37c669_write_config( | ||
1625 | SMC37c669_IDE_BASE_ADDRESS_INDEX, | ||
1626 | ide_addr.as_uchar | ||
1627 | ); | ||
1628 | ret_val = TRUE; | ||
1629 | break; | ||
1630 | } | ||
1631 | } | ||
1632 | /* | ||
1633 | ** Exit configuration mode and return | ||
1634 | */ | ||
1635 | SMC37c669_config_mode( FALSE ); | ||
1636 | |||
1637 | return ret_val; | ||
1638 | } | ||
1639 | |||
1640 | |||
1641 | /* | ||
1642 | **++ | ||
1643 | ** FUNCTIONAL DESCRIPTION: | ||
1644 | ** | ||
1645 | ** This function configures a device function within the | ||
1646 | ** SMC37c669 Super I/O controller. | ||
1647 | ** | ||
1648 | ** FORMAL PARAMETERS: | ||
1649 | ** | ||
1650 | ** func: | ||
1651 | ** Which device function | ||
1652 | ** | ||
1653 | ** port: | ||
1654 | ** I/O port for the function to use | ||
1655 | ** | ||
1656 | ** irq: | ||
1657 | ** IRQ for the device function to use | ||
1658 | ** | ||
1659 | ** drq: | ||
1660 | ** DMA channel for the device function to use | ||
1661 | ** | ||
1662 | ** RETURN VALUE: | ||
1663 | ** | ||
1664 | ** Returns TRUE if the device function was configured, | ||
1665 | ** otherwise, FALSE. | ||
1666 | ** | ||
1667 | ** SIDE EFFECTS: | ||
1668 | ** | ||
1669 | ** {@description or none@} | ||
1670 | ** | ||
1671 | ** DESIGN: | ||
1672 | ** | ||
1673 | ** If this function returns TRUE, the local shadow copy of | ||
1674 | ** the configuration is also updated. If the device function | ||
1675 | ** is currently disabled, only the local shadow copy is | ||
1676 | ** updated and the actual device function will be updated | ||
1677 | ** if/when it is enabled. | ||
1678 | ** | ||
1679 | **-- | ||
1680 | */ | ||
1681 | unsigned int __init SMC37c669_configure_device ( | ||
1682 | unsigned int func, | ||
1683 | int port, | ||
1684 | int irq, | ||
1685 | int drq ) | ||
1686 | { | ||
1687 | struct DEVICE_CONFIG *cp; | ||
1688 | |||
1689 | /* | ||
1690 | ** Check for a valid configuration | ||
1691 | */ | ||
1692 | if ( ( cp = SMC37c669_get_config ( func ) ) != NULL ) { | ||
1693 | /* | ||
1694 | ** Configuration is valid, update the local shadow copy | ||
1695 | */ | ||
1696 | if ( ( drq & ~0xFF ) == 0 ) { | ||
1697 | cp->drq = drq; | ||
1698 | } | ||
1699 | if ( ( irq & ~0xFF ) == 0 ) { | ||
1700 | cp->irq = irq; | ||
1701 | } | ||
1702 | if ( ( port & ~0xFFFF ) == 0 ) { | ||
1703 | cp->port1 = port; | ||
1704 | } | ||
1705 | /* | ||
1706 | ** If the device function is enabled, update the actual | ||
1707 | ** device configuration. | ||
1708 | */ | ||
1709 | if ( SMC37c669_is_device_enabled( func ) ) { | ||
1710 | SMC37c669_enable_device( func ); | ||
1711 | } | ||
1712 | return TRUE; | ||
1713 | } | ||
1714 | return FALSE; | ||
1715 | } | ||
1716 | |||
1717 | |||
1718 | /* | ||
1719 | **++ | ||
1720 | ** FUNCTIONAL DESCRIPTION: | ||
1721 | ** | ||
1722 | ** This function determines whether a device function | ||
1723 | ** within the SMC37c669 controller is enabled. | ||
1724 | ** | ||
1725 | ** FORMAL PARAMETERS: | ||
1726 | ** | ||
1727 | ** func: | ||
1728 | ** Which device function | ||
1729 | ** | ||
1730 | ** RETURN VALUE: | ||
1731 | ** | ||
1732 | ** Returns TRUE if the device function is enabled, otherwise, FALSE | ||
1733 | ** | ||
1734 | ** SIDE EFFECTS: | ||
1735 | ** | ||
1736 | ** {@description or none@} | ||
1737 | ** | ||
1738 | ** DESIGN: | ||
1739 | ** | ||
1740 | ** To check whether a device is enabled we will only look at | ||
1741 | ** the port base address mapping. According to the SMC37c669 | ||
1742 | ** specification, all of the port base address mappings are | ||
1743 | ** disabled if the addr<9:8> (bits <7:6> of the register) are | ||
1744 | ** zero. | ||
1745 | ** | ||
1746 | **-- | ||
1747 | */ | ||
1748 | static unsigned int __init SMC37c669_is_device_enabled ( unsigned int func ) | ||
1749 | { | ||
1750 | unsigned char base_addr = 0; | ||
1751 | unsigned int dev_ok = FALSE; | ||
1752 | unsigned int ret_val = FALSE; | ||
1753 | /* | ||
1754 | ** Enter configuration mode | ||
1755 | */ | ||
1756 | SMC37c669_config_mode( TRUE ); | ||
1757 | |||
1758 | switch ( func ) { | ||
1759 | case SERIAL_0: | ||
1760 | base_addr = | ||
1761 | SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX ); | ||
1762 | dev_ok = TRUE; | ||
1763 | break; | ||
1764 | case SERIAL_1: | ||
1765 | base_addr = | ||
1766 | SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX ); | ||
1767 | dev_ok = TRUE; | ||
1768 | break; | ||
1769 | case PARALLEL_0: | ||
1770 | base_addr = | ||
1771 | SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX ); | ||
1772 | dev_ok = TRUE; | ||
1773 | break; | ||
1774 | case FLOPPY_0: | ||
1775 | base_addr = | ||
1776 | SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX ); | ||
1777 | dev_ok = TRUE; | ||
1778 | break; | ||
1779 | case IDE_0: | ||
1780 | base_addr = | ||
1781 | SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX ); | ||
1782 | dev_ok = TRUE; | ||
1783 | break; | ||
1784 | } | ||
1785 | /* | ||
1786 | ** If we have a valid device, check base_addr<7:6> to see if the | ||
1787 | ** device is enabled (mapped). | ||
1788 | */ | ||
1789 | if ( ( dev_ok ) && ( ( base_addr & 0xC0 ) != 0 ) ) { | ||
1790 | /* | ||
1791 | ** The mapping is not disabled, so assume that the function is | ||
1792 | ** enabled. | ||
1793 | */ | ||
1794 | ret_val = TRUE; | ||
1795 | } | ||
1796 | /* | ||
1797 | ** Exit configuration mode | ||
1798 | */ | ||
1799 | SMC37c669_config_mode( FALSE ); | ||
1800 | |||
1801 | return ret_val; | ||
1802 | } | ||
1803 | |||
1804 | |||
1805 | #if 0 | ||
1806 | /* | ||
1807 | **++ | ||
1808 | ** FUNCTIONAL DESCRIPTION: | ||
1809 | ** | ||
1810 | ** This function retrieves the configuration information of a | ||
1811 | ** device function within the SMC37c699 Super I/O controller. | ||
1812 | ** | ||
1813 | ** FORMAL PARAMETERS: | ||
1814 | ** | ||
1815 | ** func: | ||
1816 | ** Which device function | ||
1817 | ** | ||
1818 | ** port: | ||
1819 | ** I/O port returned | ||
1820 | ** | ||
1821 | ** irq: | ||
1822 | ** IRQ returned | ||
1823 | ** | ||
1824 | ** drq: | ||
1825 | ** DMA channel returned | ||
1826 | ** | ||
1827 | ** RETURN VALUE: | ||
1828 | ** | ||
1829 | ** Returns TRUE if the device configuration was successfully | ||
1830 | ** retrieved, otherwise, FALSE. | ||
1831 | ** | ||
1832 | ** SIDE EFFECTS: | ||
1833 | ** | ||
1834 | ** The data pointed to by the port, irq, and drq parameters | ||
1835 | ** my be modified even if the configuration is not successfully | ||
1836 | ** retrieved. | ||
1837 | ** | ||
1838 | ** DESIGN: | ||
1839 | ** | ||
1840 | ** The device configuration is fetched from the local shadow | ||
1841 | ** copy. Any unused parameters will be set to -1. Any | ||
1842 | ** parameter which is not desired can specify the NULL | ||
1843 | ** pointer. | ||
1844 | ** | ||
1845 | **-- | ||
1846 | */ | ||
1847 | static unsigned int __init SMC37c669_get_device_config ( | ||
1848 | unsigned int func, | ||
1849 | int *port, | ||
1850 | int *irq, | ||
1851 | int *drq ) | ||
1852 | { | ||
1853 | struct DEVICE_CONFIG *cp; | ||
1854 | unsigned int ret_val = FALSE; | ||
1855 | /* | ||
1856 | ** Check for a valid device configuration | ||
1857 | */ | ||
1858 | if ( ( cp = SMC37c669_get_config( func ) ) != NULL ) { | ||
1859 | if ( drq != NULL ) { | ||
1860 | *drq = cp->drq; | ||
1861 | ret_val = TRUE; | ||
1862 | } | ||
1863 | if ( irq != NULL ) { | ||
1864 | *irq = cp->irq; | ||
1865 | ret_val = TRUE; | ||
1866 | } | ||
1867 | if ( port != NULL ) { | ||
1868 | *port = cp->port1; | ||
1869 | ret_val = TRUE; | ||
1870 | } | ||
1871 | } | ||
1872 | return ret_val; | ||
1873 | } | ||
1874 | #endif | ||
1875 | |||
1876 | |||
1877 | /* | ||
1878 | **++ | ||
1879 | ** FUNCTIONAL DESCRIPTION: | ||
1880 | ** | ||
1881 | ** This function displays the current state of the SMC37c699 | ||
1882 | ** Super I/O controller's device functions. | ||
1883 | ** | ||
1884 | ** FORMAL PARAMETERS: | ||
1885 | ** | ||
1886 | ** None | ||
1887 | ** | ||
1888 | ** RETURN VALUE: | ||
1889 | ** | ||
1890 | ** None | ||
1891 | ** | ||
1892 | ** SIDE EFFECTS: | ||
1893 | ** | ||
1894 | ** None | ||
1895 | ** | ||
1896 | **-- | ||
1897 | */ | ||
1898 | void __init SMC37c669_display_device_info ( void ) | ||
1899 | { | ||
1900 | if ( SMC37c669_is_device_enabled( SERIAL_0 ) ) { | ||
1901 | printk( " Serial 0: Enabled [ Port 0x%x, IRQ %d ]\n", | ||
1902 | local_config[ SERIAL_0 ].port1, | ||
1903 | local_config[ SERIAL_0 ].irq | ||
1904 | ); | ||
1905 | } | ||
1906 | else { | ||
1907 | printk( " Serial 0: Disabled\n" ); | ||
1908 | } | ||
1909 | |||
1910 | if ( SMC37c669_is_device_enabled( SERIAL_1 ) ) { | ||
1911 | printk( " Serial 1: Enabled [ Port 0x%x, IRQ %d ]\n", | ||
1912 | local_config[ SERIAL_1 ].port1, | ||
1913 | local_config[ SERIAL_1 ].irq | ||
1914 | ); | ||
1915 | } | ||
1916 | else { | ||
1917 | printk( " Serial 1: Disabled\n" ); | ||
1918 | } | ||
1919 | |||
1920 | if ( SMC37c669_is_device_enabled( PARALLEL_0 ) ) { | ||
1921 | printk( " Parallel: Enabled [ Port 0x%x, IRQ %d/%d ]\n", | ||
1922 | local_config[ PARALLEL_0 ].port1, | ||
1923 | local_config[ PARALLEL_0 ].irq, | ||
1924 | local_config[ PARALLEL_0 ].drq | ||
1925 | ); | ||
1926 | } | ||
1927 | else { | ||
1928 | printk( " Parallel: Disabled\n" ); | ||
1929 | } | ||
1930 | |||
1931 | if ( SMC37c669_is_device_enabled( FLOPPY_0 ) ) { | ||
1932 | printk( " Floppy Ctrl: Enabled [ Port 0x%x, IRQ %d/%d ]\n", | ||
1933 | local_config[ FLOPPY_0 ].port1, | ||
1934 | local_config[ FLOPPY_0 ].irq, | ||
1935 | local_config[ FLOPPY_0 ].drq | ||
1936 | ); | ||
1937 | } | ||
1938 | else { | ||
1939 | printk( " Floppy Ctrl: Disabled\n" ); | ||
1940 | } | ||
1941 | |||
1942 | if ( SMC37c669_is_device_enabled( IDE_0 ) ) { | ||
1943 | printk( " IDE 0: Enabled [ Port 0x%x, IRQ %d ]\n", | ||
1944 | local_config[ IDE_0 ].port1, | ||
1945 | local_config[ IDE_0 ].irq | ||
1946 | ); | ||
1947 | } | ||
1948 | else { | ||
1949 | printk( " IDE 0: Disabled\n" ); | ||
1950 | } | ||
1951 | } | ||
1952 | |||
1953 | |||
1954 | /* | ||
1955 | **++ | ||
1956 | ** FUNCTIONAL DESCRIPTION: | ||
1957 | ** | ||
1958 | ** This function puts the SMC37c669 Super I/O controller into, | ||
1959 | ** and takes it out of, configuration mode. | ||
1960 | ** | ||
1961 | ** FORMAL PARAMETERS: | ||
1962 | ** | ||
1963 | ** enable: | ||
1964 | ** TRUE to enter configuration mode, FALSE to exit. | ||
1965 | ** | ||
1966 | ** RETURN VALUE: | ||
1967 | ** | ||
1968 | ** None | ||
1969 | ** | ||
1970 | ** SIDE EFFECTS: | ||
1971 | ** | ||
1972 | ** The SMC37c669 controller may be left in configuration mode. | ||
1973 | ** | ||
1974 | **-- | ||
1975 | */ | ||
1976 | static void __init SMC37c669_config_mode( | ||
1977 | unsigned int enable ) | ||
1978 | { | ||
1979 | if ( enable ) { | ||
1980 | /* | ||
1981 | ** To enter configuration mode, two writes in succession to the index | ||
1982 | ** port are required. If a write to another address or port occurs | ||
1983 | ** between these two writes, the chip does not enter configuration | ||
1984 | ** mode. Therefore, a spinlock is placed around the two writes to | ||
1985 | ** guarantee that they complete uninterrupted. | ||
1986 | */ | ||
1987 | spin_lock(&smc_lock); | ||
1988 | wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); | ||
1989 | wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); | ||
1990 | spin_unlock(&smc_lock); | ||
1991 | } | ||
1992 | else { | ||
1993 | wb( &SMC37c669->index_port, SMC37c669_CONFIG_OFF_KEY ); | ||
1994 | } | ||
1995 | } | ||
1996 | |||
1997 | /* | ||
1998 | **++ | ||
1999 | ** FUNCTIONAL DESCRIPTION: | ||
2000 | ** | ||
2001 | ** This function reads an SMC37c669 Super I/O controller | ||
2002 | ** configuration register. This function assumes that the | ||
2003 | ** device is already in configuration mode. | ||
2004 | ** | ||
2005 | ** FORMAL PARAMETERS: | ||
2006 | ** | ||
2007 | ** index: | ||
2008 | ** Index value of configuration register to read | ||
2009 | ** | ||
2010 | ** RETURN VALUE: | ||
2011 | ** | ||
2012 | ** Data read from configuration register | ||
2013 | ** | ||
2014 | ** SIDE EFFECTS: | ||
2015 | ** | ||
2016 | ** None | ||
2017 | ** | ||
2018 | **-- | ||
2019 | */ | ||
2020 | static unsigned char __init SMC37c669_read_config( | ||
2021 | unsigned char index ) | ||
2022 | { | ||
2023 | unsigned char data; | ||
2024 | |||
2025 | wb( &SMC37c669->index_port, index ); | ||
2026 | data = rb( &SMC37c669->data_port ); | ||
2027 | return data; | ||
2028 | } | ||
2029 | |||
2030 | /* | ||
2031 | **++ | ||
2032 | ** FUNCTIONAL DESCRIPTION: | ||
2033 | ** | ||
2034 | ** This function writes an SMC37c669 Super I/O controller | ||
2035 | ** configuration register. This function assumes that the | ||
2036 | ** device is already in configuration mode. | ||
2037 | ** | ||
2038 | ** FORMAL PARAMETERS: | ||
2039 | ** | ||
2040 | ** index: | ||
2041 | ** Index of configuration register to write | ||
2042 | ** | ||
2043 | ** data: | ||
2044 | ** Data to be written | ||
2045 | ** | ||
2046 | ** RETURN VALUE: | ||
2047 | ** | ||
2048 | ** None | ||
2049 | ** | ||
2050 | ** SIDE EFFECTS: | ||
2051 | ** | ||
2052 | ** None | ||
2053 | ** | ||
2054 | **-- | ||
2055 | */ | ||
2056 | static void __init SMC37c669_write_config( | ||
2057 | unsigned char index, | ||
2058 | unsigned char data ) | ||
2059 | { | ||
2060 | wb( &SMC37c669->index_port, index ); | ||
2061 | wb( &SMC37c669->data_port, data ); | ||
2062 | } | ||
2063 | |||
2064 | |||
2065 | /* | ||
2066 | **++ | ||
2067 | ** FUNCTIONAL DESCRIPTION: | ||
2068 | ** | ||
2069 | ** This function initializes the local device | ||
2070 | ** configuration storage. This function assumes | ||
2071 | ** that the device is already in configuration | ||
2072 | ** mode. | ||
2073 | ** | ||
2074 | ** FORMAL PARAMETERS: | ||
2075 | ** | ||
2076 | ** None | ||
2077 | ** | ||
2078 | ** RETURN VALUE: | ||
2079 | ** | ||
2080 | ** None | ||
2081 | ** | ||
2082 | ** SIDE EFFECTS: | ||
2083 | ** | ||
2084 | ** Local storage for device configuration information | ||
2085 | ** is initialized. | ||
2086 | ** | ||
2087 | **-- | ||
2088 | */ | ||
2089 | static void __init SMC37c669_init_local_config ( void ) | ||
2090 | { | ||
2091 | SMC37c669_SERIAL_BASE_ADDRESS_REGISTER uart_base; | ||
2092 | SMC37c669_SERIAL_IRQ_REGISTER uart_irqs; | ||
2093 | SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER ppt_base; | ||
2094 | SMC37c669_PARALLEL_FDC_IRQ_REGISTER ppt_fdc_irqs; | ||
2095 | SMC37c669_PARALLEL_FDC_DRQ_REGISTER ppt_fdc_drqs; | ||
2096 | SMC37c669_FDC_BASE_ADDRESS_REGISTER fdc_base; | ||
2097 | SMC37c669_IDE_ADDRESS_REGISTER ide_base; | ||
2098 | SMC37c669_IDE_ADDRESS_REGISTER ide_alt; | ||
2099 | |||
2100 | /* | ||
2101 | ** Get serial port 1 base address | ||
2102 | */ | ||
2103 | uart_base.as_uchar = | ||
2104 | SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX ); | ||
2105 | /* | ||
2106 | ** Get IRQs for serial ports 1 & 2 | ||
2107 | */ | ||
2108 | uart_irqs.as_uchar = | ||
2109 | SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); | ||
2110 | /* | ||
2111 | ** Store local configuration information for serial port 1 | ||
2112 | */ | ||
2113 | local_config[SERIAL_0].port1 = uart_base.by_field.addr9_3 << 3; | ||
2114 | local_config[SERIAL_0].irq = | ||
2115 | SMC37c669_xlate_irq( | ||
2116 | SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart1_irq ) | ||
2117 | ); | ||
2118 | /* | ||
2119 | ** Get serial port 2 base address | ||
2120 | */ | ||
2121 | uart_base.as_uchar = | ||
2122 | SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX ); | ||
2123 | /* | ||
2124 | ** Store local configuration information for serial port 2 | ||
2125 | */ | ||
2126 | local_config[SERIAL_1].port1 = uart_base.by_field.addr9_3 << 3; | ||
2127 | local_config[SERIAL_1].irq = | ||
2128 | SMC37c669_xlate_irq( | ||
2129 | SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart2_irq ) | ||
2130 | ); | ||
2131 | /* | ||
2132 | ** Get parallel port base address | ||
2133 | */ | ||
2134 | ppt_base.as_uchar = | ||
2135 | SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX ); | ||
2136 | /* | ||
2137 | ** Get IRQs for parallel port and floppy controller | ||
2138 | */ | ||
2139 | ppt_fdc_irqs.as_uchar = | ||
2140 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); | ||
2141 | /* | ||
2142 | ** Get DRQs for parallel port and floppy controller | ||
2143 | */ | ||
2144 | ppt_fdc_drqs.as_uchar = | ||
2145 | SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); | ||
2146 | /* | ||
2147 | ** Store local configuration information for parallel port | ||
2148 | */ | ||
2149 | local_config[PARALLEL_0].port1 = ppt_base.by_field.addr9_2 << 2; | ||
2150 | local_config[PARALLEL_0].irq = | ||
2151 | SMC37c669_xlate_irq( | ||
2152 | SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.ppt_irq ) | ||
2153 | ); | ||
2154 | local_config[PARALLEL_0].drq = | ||
2155 | SMC37c669_xlate_drq( | ||
2156 | SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.ppt_drq ) | ||
2157 | ); | ||
2158 | /* | ||
2159 | ** Get floppy controller base address | ||
2160 | */ | ||
2161 | fdc_base.as_uchar = | ||
2162 | SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX ); | ||
2163 | /* | ||
2164 | ** Store local configuration information for floppy controller | ||
2165 | */ | ||
2166 | local_config[FLOPPY_0].port1 = fdc_base.by_field.addr9_4 << 4; | ||
2167 | local_config[FLOPPY_0].irq = | ||
2168 | SMC37c669_xlate_irq( | ||
2169 | SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.fdc_irq ) | ||
2170 | ); | ||
2171 | local_config[FLOPPY_0].drq = | ||
2172 | SMC37c669_xlate_drq( | ||
2173 | SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.fdc_drq ) | ||
2174 | ); | ||
2175 | /* | ||
2176 | ** Get IDE controller base address | ||
2177 | */ | ||
2178 | ide_base.as_uchar = | ||
2179 | SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX ); | ||
2180 | /* | ||
2181 | ** Get IDE alternate status base address | ||
2182 | */ | ||
2183 | ide_alt.as_uchar = | ||
2184 | SMC37c669_read_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX ); | ||
2185 | /* | ||
2186 | ** Store local configuration information for IDE controller | ||
2187 | */ | ||
2188 | local_config[IDE_0].port1 = ide_base.by_field.addr9_4 << 4; | ||
2189 | local_config[IDE_0].port2 = ide_alt.by_field.addr9_4 << 4; | ||
2190 | local_config[IDE_0].irq = 14; | ||
2191 | } | ||
2192 | |||
2193 | |||
2194 | /* | ||
2195 | **++ | ||
2196 | ** FUNCTIONAL DESCRIPTION: | ||
2197 | ** | ||
2198 | ** This function returns a pointer to the local shadow | ||
2199 | ** configuration of the requested device function. | ||
2200 | ** | ||
2201 | ** FORMAL PARAMETERS: | ||
2202 | ** | ||
2203 | ** func: | ||
2204 | ** Which device function | ||
2205 | ** | ||
2206 | ** RETURN VALUE: | ||
2207 | ** | ||
2208 | ** Returns a pointer to the DEVICE_CONFIG structure for the | ||
2209 | ** requested function, otherwise, NULL. | ||
2210 | ** | ||
2211 | ** SIDE EFFECTS: | ||
2212 | ** | ||
2213 | ** {@description or none@} | ||
2214 | ** | ||
2215 | **-- | ||
2216 | */ | ||
2217 | static struct DEVICE_CONFIG * __init SMC37c669_get_config( unsigned int func ) | ||
2218 | { | ||
2219 | struct DEVICE_CONFIG *cp = NULL; | ||
2220 | |||
2221 | switch ( func ) { | ||
2222 | case SERIAL_0: | ||
2223 | cp = &local_config[ SERIAL_0 ]; | ||
2224 | break; | ||
2225 | case SERIAL_1: | ||
2226 | cp = &local_config[ SERIAL_1 ]; | ||
2227 | break; | ||
2228 | case PARALLEL_0: | ||
2229 | cp = &local_config[ PARALLEL_0 ]; | ||
2230 | break; | ||
2231 | case FLOPPY_0: | ||
2232 | cp = &local_config[ FLOPPY_0 ]; | ||
2233 | break; | ||
2234 | case IDE_0: | ||
2235 | cp = &local_config[ IDE_0 ]; | ||
2236 | break; | ||
2237 | } | ||
2238 | return cp; | ||
2239 | } | ||
2240 | |||
2241 | /* | ||
2242 | **++ | ||
2243 | ** FUNCTIONAL DESCRIPTION: | ||
2244 | ** | ||
2245 | ** This function translates IRQs back and forth between ISA | ||
2246 | ** IRQs and SMC37c669 device IRQs. | ||
2247 | ** | ||
2248 | ** FORMAL PARAMETERS: | ||
2249 | ** | ||
2250 | ** irq: | ||
2251 | ** The IRQ to translate | ||
2252 | ** | ||
2253 | ** RETURN VALUE: | ||
2254 | ** | ||
2255 | ** Returns the translated IRQ, otherwise, returns -1. | ||
2256 | ** | ||
2257 | ** SIDE EFFECTS: | ||
2258 | ** | ||
2259 | ** {@description or none@} | ||
2260 | ** | ||
2261 | **-- | ||
2262 | */ | ||
2263 | static int __init SMC37c669_xlate_irq ( int irq ) | ||
2264 | { | ||
2265 | int i, translated_irq = -1; | ||
2266 | |||
2267 | if ( SMC37c669_IS_DEVICE_IRQ( irq ) ) { | ||
2268 | /* | ||
2269 | ** We are translating a device IRQ to an ISA IRQ | ||
2270 | */ | ||
2271 | for ( i = 0; ( SMC37c669_irq_table[i].device_irq != -1 ) || ( SMC37c669_irq_table[i].isa_irq != -1 ); i++ ) { | ||
2272 | if ( irq == SMC37c669_irq_table[i].device_irq ) { | ||
2273 | translated_irq = SMC37c669_irq_table[i].isa_irq; | ||
2274 | break; | ||
2275 | } | ||
2276 | } | ||
2277 | } | ||
2278 | else { | ||
2279 | /* | ||
2280 | ** We are translating an ISA IRQ to a device IRQ | ||
2281 | */ | ||
2282 | for ( i = 0; ( SMC37c669_irq_table[i].isa_irq != -1 ) || ( SMC37c669_irq_table[i].device_irq != -1 ); i++ ) { | ||
2283 | if ( irq == SMC37c669_irq_table[i].isa_irq ) { | ||
2284 | translated_irq = SMC37c669_irq_table[i].device_irq; | ||
2285 | break; | ||
2286 | } | ||
2287 | } | ||
2288 | } | ||
2289 | return translated_irq; | ||
2290 | } | ||
2291 | |||
2292 | |||
2293 | /* | ||
2294 | **++ | ||
2295 | ** FUNCTIONAL DESCRIPTION: | ||
2296 | ** | ||
2297 | ** This function translates DMA channels back and forth between | ||
2298 | ** ISA DMA channels and SMC37c669 device DMA channels. | ||
2299 | ** | ||
2300 | ** FORMAL PARAMETERS: | ||
2301 | ** | ||
2302 | ** drq: | ||
2303 | ** The DMA channel to translate | ||
2304 | ** | ||
2305 | ** RETURN VALUE: | ||
2306 | ** | ||
2307 | ** Returns the translated DMA channel, otherwise, returns -1 | ||
2308 | ** | ||
2309 | ** SIDE EFFECTS: | ||
2310 | ** | ||
2311 | ** {@description or none@} | ||
2312 | ** | ||
2313 | **-- | ||
2314 | */ | ||
2315 | static int __init SMC37c669_xlate_drq ( int drq ) | ||
2316 | { | ||
2317 | int i, translated_drq = -1; | ||
2318 | |||
2319 | if ( SMC37c669_IS_DEVICE_DRQ( drq ) ) { | ||
2320 | /* | ||
2321 | ** We are translating a device DMA channel to an ISA DMA channel | ||
2322 | */ | ||
2323 | for ( i = 0; ( SMC37c669_drq_table[i].device_drq != -1 ) || ( SMC37c669_drq_table[i].isa_drq != -1 ); i++ ) { | ||
2324 | if ( drq == SMC37c669_drq_table[i].device_drq ) { | ||
2325 | translated_drq = SMC37c669_drq_table[i].isa_drq; | ||
2326 | break; | ||
2327 | } | ||
2328 | } | ||
2329 | } | ||
2330 | else { | ||
2331 | /* | ||
2332 | ** We are translating an ISA DMA channel to a device DMA channel | ||
2333 | */ | ||
2334 | for ( i = 0; ( SMC37c669_drq_table[i].isa_drq != -1 ) || ( SMC37c669_drq_table[i].device_drq != -1 ); i++ ) { | ||
2335 | if ( drq == SMC37c669_drq_table[i].isa_drq ) { | ||
2336 | translated_drq = SMC37c669_drq_table[i].device_drq; | ||
2337 | break; | ||
2338 | } | ||
2339 | } | ||
2340 | } | ||
2341 | return translated_drq; | ||
2342 | } | ||
2343 | |||
2344 | #if 0 | ||
2345 | int __init smcc669_init ( void ) | ||
2346 | { | ||
2347 | struct INODE *ip; | ||
2348 | |||
2349 | allocinode( smc_ddb.name, 1, &ip ); | ||
2350 | ip->dva = &smc_ddb; | ||
2351 | ip->attr = ATTR$M_WRITE | ATTR$M_READ; | ||
2352 | ip->len[0] = 0x30; | ||
2353 | ip->misc = 0; | ||
2354 | INODE_UNLOCK( ip ); | ||
2355 | |||
2356 | return msg_success; | ||
2357 | } | ||
2358 | |||
2359 | int __init smcc669_open( struct FILE *fp, char *info, char *next, char *mode ) | ||
2360 | { | ||
2361 | struct INODE *ip; | ||
2362 | /* | ||
2363 | ** Allow multiple readers but only one writer. ip->misc keeps track | ||
2364 | ** of the number of writers | ||
2365 | */ | ||
2366 | ip = fp->ip; | ||
2367 | INODE_LOCK( ip ); | ||
2368 | if ( fp->mode & ATTR$M_WRITE ) { | ||
2369 | if ( ip->misc ) { | ||
2370 | INODE_UNLOCK( ip ); | ||
2371 | return msg_failure; /* too many writers */ | ||
2372 | } | ||
2373 | ip->misc++; | ||
2374 | } | ||
2375 | /* | ||
2376 | ** Treat the information field as a byte offset | ||
2377 | */ | ||
2378 | *fp->offset = xtoi( info ); | ||
2379 | INODE_UNLOCK( ip ); | ||
2380 | |||
2381 | return msg_success; | ||
2382 | } | ||
2383 | |||
2384 | int __init smcc669_close( struct FILE *fp ) | ||
2385 | { | ||
2386 | struct INODE *ip; | ||
2387 | |||
2388 | ip = fp->ip; | ||
2389 | if ( fp->mode & ATTR$M_WRITE ) { | ||
2390 | INODE_LOCK( ip ); | ||
2391 | ip->misc--; | ||
2392 | INODE_UNLOCK( ip ); | ||
2393 | } | ||
2394 | return msg_success; | ||
2395 | } | ||
2396 | |||
2397 | int __init smcc669_read( struct FILE *fp, int size, int number, unsigned char *buf ) | ||
2398 | { | ||
2399 | int i; | ||
2400 | int length; | ||
2401 | int nbytes; | ||
2402 | struct INODE *ip; | ||
2403 | |||
2404 | /* | ||
2405 | ** Always access a byte at a time | ||
2406 | */ | ||
2407 | ip = fp->ip; | ||
2408 | length = size * number; | ||
2409 | nbytes = 0; | ||
2410 | |||
2411 | SMC37c669_config_mode( TRUE ); | ||
2412 | for ( i = 0; i < length; i++ ) { | ||
2413 | if ( !inrange( *fp->offset, 0, ip->len[0] ) ) | ||
2414 | break; | ||
2415 | *buf++ = SMC37c669_read_config( *fp->offset ); | ||
2416 | *fp->offset += 1; | ||
2417 | nbytes++; | ||
2418 | } | ||
2419 | SMC37c669_config_mode( FALSE ); | ||
2420 | return nbytes; | ||
2421 | } | ||
2422 | |||
2423 | int __init smcc669_write( struct FILE *fp, int size, int number, unsigned char *buf ) | ||
2424 | { | ||
2425 | int i; | ||
2426 | int length; | ||
2427 | int nbytes; | ||
2428 | struct INODE *ip; | ||
2429 | /* | ||
2430 | ** Always access a byte at a time | ||
2431 | */ | ||
2432 | ip = fp->ip; | ||
2433 | length = size * number; | ||
2434 | nbytes = 0; | ||
2435 | |||
2436 | SMC37c669_config_mode( TRUE ); | ||
2437 | for ( i = 0; i < length; i++ ) { | ||
2438 | if ( !inrange( *fp->offset, 0, ip->len[0] ) ) | ||
2439 | break; | ||
2440 | SMC37c669_write_config( *fp->offset, *buf ); | ||
2441 | *fp->offset += 1; | ||
2442 | buf++; | ||
2443 | nbytes++; | ||
2444 | } | ||
2445 | SMC37c669_config_mode( FALSE ); | ||
2446 | return nbytes; | ||
2447 | } | ||
2448 | #endif | ||
2449 | |||
2450 | void __init | ||
2451 | SMC37c669_dump_registers(void) | ||
2452 | { | ||
2453 | int i; | ||
2454 | for (i = 0; i <= 0x29; i++) | ||
2455 | printk("-- CR%02x : %02x\n", i, SMC37c669_read_config(i)); | ||
2456 | } | ||
2457 | /*+ | ||
2458 | * ============================================================================ | ||
2459 | * = SMC_init - SMC37c669 Super I/O controller initialization = | ||
2460 | * ============================================================================ | ||
2461 | * | ||
2462 | * OVERVIEW: | ||
2463 | * | ||
2464 | * This routine configures and enables device functions on the | ||
2465 | * SMC37c669 Super I/O controller. | ||
2466 | * | ||
2467 | * FORM OF CALL: | ||
2468 | * | ||
2469 | * SMC_init( ); | ||
2470 | * | ||
2471 | * RETURNS: | ||
2472 | * | ||
2473 | * Nothing | ||
2474 | * | ||
2475 | * ARGUMENTS: | ||
2476 | * | ||
2477 | * None | ||
2478 | * | ||
2479 | * SIDE EFFECTS: | ||
2480 | * | ||
2481 | * None | ||
2482 | * | ||
2483 | */ | ||
2484 | void __init SMC669_Init ( int index ) | ||
2485 | { | ||
2486 | SMC37c669_CONFIG_REGS *SMC_base; | ||
2487 | unsigned long flags; | ||
2488 | |||
2489 | local_irq_save(flags); | ||
2490 | if ( ( SMC_base = SMC37c669_detect( index ) ) != NULL ) { | ||
2491 | #if SMC_DEBUG | ||
2492 | SMC37c669_config_mode( TRUE ); | ||
2493 | SMC37c669_dump_registers( ); | ||
2494 | SMC37c669_config_mode( FALSE ); | ||
2495 | SMC37c669_display_device_info( ); | ||
2496 | #endif | ||
2497 | SMC37c669_disable_device( SERIAL_0 ); | ||
2498 | SMC37c669_configure_device( | ||
2499 | SERIAL_0, | ||
2500 | COM1_BASE, | ||
2501 | COM1_IRQ, | ||
2502 | -1 | ||
2503 | ); | ||
2504 | SMC37c669_enable_device( SERIAL_0 ); | ||
2505 | |||
2506 | SMC37c669_disable_device( SERIAL_1 ); | ||
2507 | SMC37c669_configure_device( | ||
2508 | SERIAL_1, | ||
2509 | COM2_BASE, | ||
2510 | COM2_IRQ, | ||
2511 | -1 | ||
2512 | ); | ||
2513 | SMC37c669_enable_device( SERIAL_1 ); | ||
2514 | |||
2515 | SMC37c669_disable_device( PARALLEL_0 ); | ||
2516 | SMC37c669_configure_device( | ||
2517 | PARALLEL_0, | ||
2518 | PARP_BASE, | ||
2519 | PARP_IRQ, | ||
2520 | PARP_DRQ | ||
2521 | ); | ||
2522 | SMC37c669_enable_device( PARALLEL_0 ); | ||
2523 | |||
2524 | SMC37c669_disable_device( FLOPPY_0 ); | ||
2525 | SMC37c669_configure_device( | ||
2526 | FLOPPY_0, | ||
2527 | FDC_BASE, | ||
2528 | FDC_IRQ, | ||
2529 | FDC_DRQ | ||
2530 | ); | ||
2531 | SMC37c669_enable_device( FLOPPY_0 ); | ||
2532 | |||
2533 | /* Wake up sometimes forgotten floppy, especially on DP264. */ | ||
2534 | outb(0xc, 0x3f2); | ||
2535 | |||
2536 | SMC37c669_disable_device( IDE_0 ); | ||
2537 | |||
2538 | #if SMC_DEBUG | ||
2539 | SMC37c669_config_mode( TRUE ); | ||
2540 | SMC37c669_dump_registers( ); | ||
2541 | SMC37c669_config_mode( FALSE ); | ||
2542 | SMC37c669_display_device_info( ); | ||
2543 | #endif | ||
2544 | local_irq_restore(flags); | ||
2545 | printk( "SMC37c669 Super I/O Controller found @ 0x%lx\n", | ||
2546 | (unsigned long) SMC_base ); | ||
2547 | } | ||
2548 | else { | ||
2549 | local_irq_restore(flags); | ||
2550 | #if SMC_DEBUG | ||
2551 | printk( "No SMC37c669 Super I/O Controller found\n" ); | ||
2552 | #endif | ||
2553 | } | ||
2554 | } | ||
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c new file mode 100644 index 000000000000..421e51ea6bb7 --- /dev/null +++ b/arch/alpha/kernel/smc37c93x.c | |||
@@ -0,0 +1,277 @@ | |||
1 | /* | ||
2 | * SMC 37C93X initialization code | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/kernel.h> | ||
7 | |||
8 | #include <linux/slab.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/delay.h> | ||
12 | |||
13 | #include <asm/hwrpb.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/segment.h> | ||
16 | |||
17 | #define SMC_DEBUG 0 | ||
18 | |||
19 | #if SMC_DEBUG | ||
20 | # define DBG_DEVS(args) printk args | ||
21 | #else | ||
22 | # define DBG_DEVS(args) | ||
23 | #endif | ||
24 | |||
25 | #define KB 1024 | ||
26 | #define MB (1024*KB) | ||
27 | #define GB (1024*MB) | ||
28 | |||
29 | /* device "activate" register contents */ | ||
30 | #define DEVICE_ON 1 | ||
31 | #define DEVICE_OFF 0 | ||
32 | |||
33 | /* configuration on/off keys */ | ||
34 | #define CONFIG_ON_KEY 0x55 | ||
35 | #define CONFIG_OFF_KEY 0xaa | ||
36 | |||
37 | /* configuration space device definitions */ | ||
38 | #define FDC 0 | ||
39 | #define IDE1 1 | ||
40 | #define IDE2 2 | ||
41 | #define PARP 3 | ||
42 | #define SER1 4 | ||
43 | #define SER2 5 | ||
44 | #define RTCL 6 | ||
45 | #define KYBD 7 | ||
46 | #define AUXIO 8 | ||
47 | |||
48 | /* Chip register offsets from base */ | ||
49 | #define CONFIG_CONTROL 0x02 | ||
50 | #define INDEX_ADDRESS 0x03 | ||
51 | #define LOGICAL_DEVICE_NUMBER 0x07 | ||
52 | #define DEVICE_ID 0x20 | ||
53 | #define DEVICE_REV 0x21 | ||
54 | #define POWER_CONTROL 0x22 | ||
55 | #define POWER_MGMT 0x23 | ||
56 | #define OSC 0x24 | ||
57 | |||
58 | #define ACTIVATE 0x30 | ||
59 | #define ADDR_HI 0x60 | ||
60 | #define ADDR_LO 0x61 | ||
61 | #define INTERRUPT_SEL 0x70 | ||
62 | #define INTERRUPT_SEL_2 0x72 /* KYBD/MOUS only */ | ||
63 | #define DMA_CHANNEL_SEL 0x74 /* FDC/PARP only */ | ||
64 | |||
65 | #define FDD_MODE_REGISTER 0x90 | ||
66 | #define FDD_OPTION_REGISTER 0x91 | ||
67 | |||
68 | /* values that we read back that are expected ... */ | ||
69 | #define VALID_DEVICE_ID 2 | ||
70 | |||
71 | /* default device addresses */ | ||
72 | #define KYBD_INTERRUPT 1 | ||
73 | #define MOUS_INTERRUPT 12 | ||
74 | #define COM2_BASE 0x2f8 | ||
75 | #define COM2_INTERRUPT 3 | ||
76 | #define COM1_BASE 0x3f8 | ||
77 | #define COM1_INTERRUPT 4 | ||
78 | #define PARP_BASE 0x3bc | ||
79 | #define PARP_INTERRUPT 7 | ||
80 | |||
81 | static unsigned long __init SMCConfigState(unsigned long baseAddr) | ||
82 | { | ||
83 | unsigned char devId; | ||
84 | unsigned char devRev; | ||
85 | |||
86 | unsigned long configPort; | ||
87 | unsigned long indexPort; | ||
88 | unsigned long dataPort; | ||
89 | |||
90 | int i; | ||
91 | |||
92 | configPort = indexPort = baseAddr; | ||
93 | dataPort = configPort + 1; | ||
94 | |||
95 | #define NUM_RETRIES 5 | ||
96 | |||
97 | for (i = 0; i < NUM_RETRIES; i++) | ||
98 | { | ||
99 | outb(CONFIG_ON_KEY, configPort); | ||
100 | outb(CONFIG_ON_KEY, configPort); | ||
101 | outb(DEVICE_ID, indexPort); | ||
102 | devId = inb(dataPort); | ||
103 | if (devId == VALID_DEVICE_ID) { | ||
104 | outb(DEVICE_REV, indexPort); | ||
105 | devRev = inb(dataPort); | ||
106 | break; | ||
107 | } | ||
108 | else | ||
109 | udelay(100); | ||
110 | } | ||
111 | return (i != NUM_RETRIES) ? baseAddr : 0L; | ||
112 | } | ||
113 | |||
114 | static void __init SMCRunState(unsigned long baseAddr) | ||
115 | { | ||
116 | outb(CONFIG_OFF_KEY, baseAddr); | ||
117 | } | ||
118 | |||
119 | static unsigned long __init SMCDetectUltraIO(void) | ||
120 | { | ||
121 | unsigned long baseAddr; | ||
122 | |||
123 | baseAddr = 0x3F0; | ||
124 | if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x3F0 ) { | ||
125 | return( baseAddr ); | ||
126 | } | ||
127 | baseAddr = 0x370; | ||
128 | if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x370 ) { | ||
129 | return( baseAddr ); | ||
130 | } | ||
131 | return( ( unsigned long )0 ); | ||
132 | } | ||
133 | |||
134 | static void __init SMCEnableDevice(unsigned long baseAddr, | ||
135 | unsigned long device, | ||
136 | unsigned long portaddr, | ||
137 | unsigned long interrupt) | ||
138 | { | ||
139 | unsigned long indexPort; | ||
140 | unsigned long dataPort; | ||
141 | |||
142 | indexPort = baseAddr; | ||
143 | dataPort = baseAddr + 1; | ||
144 | |||
145 | outb(LOGICAL_DEVICE_NUMBER, indexPort); | ||
146 | outb(device, dataPort); | ||
147 | |||
148 | outb(ADDR_LO, indexPort); | ||
149 | outb(( portaddr & 0xFF ), dataPort); | ||
150 | |||
151 | outb(ADDR_HI, indexPort); | ||
152 | outb((portaddr >> 8) & 0xFF, dataPort); | ||
153 | |||
154 | outb(INTERRUPT_SEL, indexPort); | ||
155 | outb(interrupt, dataPort); | ||
156 | |||
157 | outb(ACTIVATE, indexPort); | ||
158 | outb(DEVICE_ON, dataPort); | ||
159 | } | ||
160 | |||
161 | static void __init SMCEnableKYBD(unsigned long baseAddr) | ||
162 | { | ||
163 | unsigned long indexPort; | ||
164 | unsigned long dataPort; | ||
165 | |||
166 | indexPort = baseAddr; | ||
167 | dataPort = baseAddr + 1; | ||
168 | |||
169 | outb(LOGICAL_DEVICE_NUMBER, indexPort); | ||
170 | outb(KYBD, dataPort); | ||
171 | |||
172 | outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */ | ||
173 | outb(KYBD_INTERRUPT, dataPort); | ||
174 | |||
175 | outb(INTERRUPT_SEL_2, indexPort); /* Secondary interrupt select */ | ||
176 | outb(MOUS_INTERRUPT, dataPort); | ||
177 | |||
178 | outb(ACTIVATE, indexPort); | ||
179 | outb(DEVICE_ON, dataPort); | ||
180 | } | ||
181 | |||
182 | static void __init SMCEnableFDC(unsigned long baseAddr) | ||
183 | { | ||
184 | unsigned long indexPort; | ||
185 | unsigned long dataPort; | ||
186 | |||
187 | unsigned char oldValue; | ||
188 | |||
189 | indexPort = baseAddr; | ||
190 | dataPort = baseAddr + 1; | ||
191 | |||
192 | outb(LOGICAL_DEVICE_NUMBER, indexPort); | ||
193 | outb(FDC, dataPort); | ||
194 | |||
195 | outb(FDD_MODE_REGISTER, indexPort); | ||
196 | oldValue = inb(dataPort); | ||
197 | |||
198 | oldValue |= 0x0E; /* Enable burst mode */ | ||
199 | outb(oldValue, dataPort); | ||
200 | |||
201 | outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */ | ||
202 | outb(0x06, dataPort ); | ||
203 | |||
204 | outb(DMA_CHANNEL_SEL, indexPort); /* DMA channel select */ | ||
205 | outb(0x02, dataPort); | ||
206 | |||
207 | outb(ACTIVATE, indexPort); | ||
208 | outb(DEVICE_ON, dataPort); | ||
209 | } | ||
210 | |||
211 | #if SMC_DEBUG | ||
212 | static void __init SMCReportDeviceStatus(unsigned long baseAddr) | ||
213 | { | ||
214 | unsigned long indexPort; | ||
215 | unsigned long dataPort; | ||
216 | unsigned char currentControl; | ||
217 | |||
218 | indexPort = baseAddr; | ||
219 | dataPort = baseAddr + 1; | ||
220 | |||
221 | outb(POWER_CONTROL, indexPort); | ||
222 | currentControl = inb(dataPort); | ||
223 | |||
224 | printk(currentControl & (1 << FDC) | ||
225 | ? "\t+FDC Enabled\n" : "\t-FDC Disabled\n"); | ||
226 | printk(currentControl & (1 << IDE1) | ||
227 | ? "\t+IDE1 Enabled\n" : "\t-IDE1 Disabled\n"); | ||
228 | printk(currentControl & (1 << IDE2) | ||
229 | ? "\t+IDE2 Enabled\n" : "\t-IDE2 Disabled\n"); | ||
230 | printk(currentControl & (1 << PARP) | ||
231 | ? "\t+PARP Enabled\n" : "\t-PARP Disabled\n"); | ||
232 | printk(currentControl & (1 << SER1) | ||
233 | ? "\t+SER1 Enabled\n" : "\t-SER1 Disabled\n"); | ||
234 | printk(currentControl & (1 << SER2) | ||
235 | ? "\t+SER2 Enabled\n" : "\t-SER2 Disabled\n"); | ||
236 | |||
237 | printk( "\n" ); | ||
238 | } | ||
239 | #endif | ||
240 | |||
241 | int __init SMC93x_Init(void) | ||
242 | { | ||
243 | unsigned long SMCUltraBase; | ||
244 | unsigned long flags; | ||
245 | |||
246 | local_irq_save(flags); | ||
247 | if ((SMCUltraBase = SMCDetectUltraIO()) != 0UL) { | ||
248 | #if SMC_DEBUG | ||
249 | SMCReportDeviceStatus(SMCUltraBase); | ||
250 | #endif | ||
251 | SMCEnableDevice(SMCUltraBase, SER1, COM1_BASE, COM1_INTERRUPT); | ||
252 | DBG_DEVS(("SMC FDC37C93X: SER1 done\n")); | ||
253 | SMCEnableDevice(SMCUltraBase, SER2, COM2_BASE, COM2_INTERRUPT); | ||
254 | DBG_DEVS(("SMC FDC37C93X: SER2 done\n")); | ||
255 | SMCEnableDevice(SMCUltraBase, PARP, PARP_BASE, PARP_INTERRUPT); | ||
256 | DBG_DEVS(("SMC FDC37C93X: PARP done\n")); | ||
257 | /* On PC164, IDE on the SMC is not enabled; | ||
258 | CMD646 (PCI) on MB */ | ||
259 | SMCEnableKYBD(SMCUltraBase); | ||
260 | DBG_DEVS(("SMC FDC37C93X: KYB done\n")); | ||
261 | SMCEnableFDC(SMCUltraBase); | ||
262 | DBG_DEVS(("SMC FDC37C93X: FDC done\n")); | ||
263 | #if SMC_DEBUG | ||
264 | SMCReportDeviceStatus(SMCUltraBase); | ||
265 | #endif | ||
266 | SMCRunState(SMCUltraBase); | ||
267 | local_irq_restore(flags); | ||
268 | printk("SMC FDC37C93X Ultra I/O Controller found @ 0x%lx\n", | ||
269 | SMCUltraBase); | ||
270 | return 1; | ||
271 | } | ||
272 | else { | ||
273 | local_irq_restore(flags); | ||
274 | DBG_DEVS(("No SMC FDC37C93X Ultra I/O Controller found\n")); | ||
275 | return 0; | ||
276 | } | ||
277 | } | ||
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c new file mode 100644 index 000000000000..8f1e78551b1e --- /dev/null +++ b/arch/alpha/kernel/smp.c | |||
@@ -0,0 +1,1163 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/smp.c | ||
3 | * | ||
4 | * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com) | ||
5 | * Renamed modified smp_call_function to smp_call_function_on_cpu() | ||
6 | * Created an function that conforms to the old calling convention | ||
7 | * of smp_call_function(). | ||
8 | * | ||
9 | * This is helpful for DCPI. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/kernel_stat.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/threads.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/cache.h> | ||
28 | #include <linux/profile.h> | ||
29 | #include <linux/bitops.h> | ||
30 | |||
31 | #include <asm/hwrpb.h> | ||
32 | #include <asm/ptrace.h> | ||
33 | #include <asm/atomic.h> | ||
34 | |||
35 | #include <asm/io.h> | ||
36 | #include <asm/irq.h> | ||
37 | #include <asm/pgtable.h> | ||
38 | #include <asm/pgalloc.h> | ||
39 | #include <asm/mmu_context.h> | ||
40 | #include <asm/tlbflush.h> | ||
41 | |||
42 | #include "proto.h" | ||
43 | #include "irq_impl.h" | ||
44 | |||
45 | |||
46 | #define DEBUG_SMP 0 | ||
47 | #if DEBUG_SMP | ||
48 | #define DBGS(args) printk args | ||
49 | #else | ||
50 | #define DBGS(args) | ||
51 | #endif | ||
52 | |||
53 | /* A collection of per-processor data. */ | ||
54 | struct cpuinfo_alpha cpu_data[NR_CPUS]; | ||
55 | |||
56 | /* A collection of single bit ipi messages. */ | ||
57 | static struct { | ||
58 | unsigned long bits ____cacheline_aligned; | ||
59 | } ipi_data[NR_CPUS] __cacheline_aligned; | ||
60 | |||
61 | enum ipi_message_type { | ||
62 | IPI_RESCHEDULE, | ||
63 | IPI_CALL_FUNC, | ||
64 | IPI_CPU_STOP, | ||
65 | }; | ||
66 | |||
67 | /* Set to a secondary's cpuid when it comes online. */ | ||
68 | static int smp_secondary_alive __initdata = 0; | ||
69 | |||
70 | /* Which cpus ids came online. */ | ||
71 | cpumask_t cpu_present_mask; | ||
72 | cpumask_t cpu_online_map; | ||
73 | |||
74 | EXPORT_SYMBOL(cpu_online_map); | ||
75 | |||
76 | /* cpus reported in the hwrpb */ | ||
77 | static unsigned long hwrpb_cpu_present_mask __initdata = 0; | ||
78 | |||
79 | int smp_num_probed; /* Internal processor count */ | ||
80 | int smp_num_cpus = 1; /* Number that came online. */ | ||
81 | |||
82 | extern void calibrate_delay(void); | ||
83 | |||
84 | |||
85 | |||
86 | /* | ||
87 | * Called by both boot and secondaries to move global data into | ||
88 | * per-processor storage. | ||
89 | */ | ||
90 | static inline void __init | ||
91 | smp_store_cpu_info(int cpuid) | ||
92 | { | ||
93 | cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy; | ||
94 | cpu_data[cpuid].last_asn = ASN_FIRST_VERSION; | ||
95 | cpu_data[cpuid].need_new_asn = 0; | ||
96 | cpu_data[cpuid].asn_lock = 0; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Ideally sets up per-cpu profiling hooks. Doesn't do much now... | ||
101 | */ | ||
102 | static inline void __init | ||
103 | smp_setup_percpu_timer(int cpuid) | ||
104 | { | ||
105 | cpu_data[cpuid].prof_counter = 1; | ||
106 | cpu_data[cpuid].prof_multiplier = 1; | ||
107 | } | ||
108 | |||
109 | static void __init | ||
110 | wait_boot_cpu_to_stop(int cpuid) | ||
111 | { | ||
112 | unsigned long stop = jiffies + 10*HZ; | ||
113 | |||
114 | while (time_before(jiffies, stop)) { | ||
115 | if (!smp_secondary_alive) | ||
116 | return; | ||
117 | barrier(); | ||
118 | } | ||
119 | |||
120 | printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid); | ||
121 | for (;;) | ||
122 | barrier(); | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Where secondaries begin a life of C. | ||
127 | */ | ||
128 | void __init | ||
129 | smp_callin(void) | ||
130 | { | ||
131 | int cpuid = hard_smp_processor_id(); | ||
132 | |||
133 | if (cpu_test_and_set(cpuid, cpu_online_map)) { | ||
134 | printk("??, cpu 0x%x already present??\n", cpuid); | ||
135 | BUG(); | ||
136 | } | ||
137 | |||
138 | /* Turn on machine checks. */ | ||
139 | wrmces(7); | ||
140 | |||
141 | /* Set trap vectors. */ | ||
142 | trap_init(); | ||
143 | |||
144 | /* Set interrupt vector. */ | ||
145 | wrent(entInt, 0); | ||
146 | |||
147 | /* Get our local ticker going. */ | ||
148 | smp_setup_percpu_timer(cpuid); | ||
149 | |||
150 | /* Call platform-specific callin, if specified */ | ||
151 | if (alpha_mv.smp_callin) alpha_mv.smp_callin(); | ||
152 | |||
153 | /* All kernel threads share the same mm context. */ | ||
154 | atomic_inc(&init_mm.mm_count); | ||
155 | current->active_mm = &init_mm; | ||
156 | |||
157 | /* Must have completely accurate bogos. */ | ||
158 | local_irq_enable(); | ||
159 | |||
160 | /* Wait boot CPU to stop with irq enabled before running | ||
161 | calibrate_delay. */ | ||
162 | wait_boot_cpu_to_stop(cpuid); | ||
163 | mb(); | ||
164 | calibrate_delay(); | ||
165 | |||
166 | smp_store_cpu_info(cpuid); | ||
167 | /* Allow master to continue only after we written loops_per_jiffy. */ | ||
168 | wmb(); | ||
169 | smp_secondary_alive = 1; | ||
170 | |||
171 | DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n", | ||
172 | cpuid, current, current->active_mm)); | ||
173 | |||
174 | /* Do nothing. */ | ||
175 | cpu_idle(); | ||
176 | } | ||
177 | |||
178 | /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ | ||
179 | static int __init | ||
180 | wait_for_txrdy (unsigned long cpumask) | ||
181 | { | ||
182 | unsigned long timeout; | ||
183 | |||
184 | if (!(hwrpb->txrdy & cpumask)) | ||
185 | return 0; | ||
186 | |||
187 | timeout = jiffies + 10*HZ; | ||
188 | while (time_before(jiffies, timeout)) { | ||
189 | if (!(hwrpb->txrdy & cpumask)) | ||
190 | return 0; | ||
191 | udelay(10); | ||
192 | barrier(); | ||
193 | } | ||
194 | |||
195 | return -1; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Send a message to a secondary's console. "START" is one such | ||
200 | * interesting message. ;-) | ||
201 | */ | ||
202 | static void __init | ||
203 | send_secondary_console_msg(char *str, int cpuid) | ||
204 | { | ||
205 | struct percpu_struct *cpu; | ||
206 | register char *cp1, *cp2; | ||
207 | unsigned long cpumask; | ||
208 | size_t len; | ||
209 | |||
210 | cpu = (struct percpu_struct *) | ||
211 | ((char*)hwrpb | ||
212 | + hwrpb->processor_offset | ||
213 | + cpuid * hwrpb->processor_size); | ||
214 | |||
215 | cpumask = (1UL << cpuid); | ||
216 | if (wait_for_txrdy(cpumask)) | ||
217 | goto timeout; | ||
218 | |||
219 | cp2 = str; | ||
220 | len = strlen(cp2); | ||
221 | *(unsigned int *)&cpu->ipc_buffer[0] = len; | ||
222 | cp1 = (char *) &cpu->ipc_buffer[1]; | ||
223 | memcpy(cp1, cp2, len); | ||
224 | |||
225 | /* atomic test and set */ | ||
226 | wmb(); | ||
227 | set_bit(cpuid, &hwrpb->rxrdy); | ||
228 | |||
229 | if (wait_for_txrdy(cpumask)) | ||
230 | goto timeout; | ||
231 | return; | ||
232 | |||
233 | timeout: | ||
234 | printk("Processor %x not ready\n", cpuid); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * A secondary console wants to send a message. Receive it. | ||
239 | */ | ||
240 | static void | ||
241 | recv_secondary_console_msg(void) | ||
242 | { | ||
243 | int mycpu, i, cnt; | ||
244 | unsigned long txrdy = hwrpb->txrdy; | ||
245 | char *cp1, *cp2, buf[80]; | ||
246 | struct percpu_struct *cpu; | ||
247 | |||
248 | DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy)); | ||
249 | |||
250 | mycpu = hard_smp_processor_id(); | ||
251 | |||
252 | for (i = 0; i < NR_CPUS; i++) { | ||
253 | if (!(txrdy & (1UL << i))) | ||
254 | continue; | ||
255 | |||
256 | DBGS(("recv_secondary_console_msg: " | ||
257 | "TXRDY contains CPU %d.\n", i)); | ||
258 | |||
259 | cpu = (struct percpu_struct *) | ||
260 | ((char*)hwrpb | ||
261 | + hwrpb->processor_offset | ||
262 | + i * hwrpb->processor_size); | ||
263 | |||
264 | DBGS(("recv_secondary_console_msg: on %d from %d" | ||
265 | " HALT_REASON 0x%lx FLAGS 0x%lx\n", | ||
266 | mycpu, i, cpu->halt_reason, cpu->flags)); | ||
267 | |||
268 | cnt = cpu->ipc_buffer[0] >> 32; | ||
269 | if (cnt <= 0 || cnt >= 80) | ||
270 | strcpy(buf, "<<< BOGUS MSG >>>"); | ||
271 | else { | ||
272 | cp1 = (char *) &cpu->ipc_buffer[11]; | ||
273 | cp2 = buf; | ||
274 | strcpy(cp2, cp1); | ||
275 | |||
276 | while ((cp2 = strchr(cp2, '\r')) != 0) { | ||
277 | *cp2 = ' '; | ||
278 | if (cp2[1] == '\n') | ||
279 | cp2[1] = ' '; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | DBGS((KERN_INFO "recv_secondary_console_msg: on %d " | ||
284 | "message is '%s'\n", mycpu, buf)); | ||
285 | } | ||
286 | |||
287 | hwrpb->txrdy = 0; | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Convince the console to have a secondary cpu begin execution. | ||
292 | */ | ||
293 | static int __init | ||
294 | secondary_cpu_start(int cpuid, struct task_struct *idle) | ||
295 | { | ||
296 | struct percpu_struct *cpu; | ||
297 | struct pcb_struct *hwpcb, *ipcb; | ||
298 | unsigned long timeout; | ||
299 | |||
300 | cpu = (struct percpu_struct *) | ||
301 | ((char*)hwrpb | ||
302 | + hwrpb->processor_offset | ||
303 | + cpuid * hwrpb->processor_size); | ||
304 | hwpcb = (struct pcb_struct *) cpu->hwpcb; | ||
305 | ipcb = &idle->thread_info->pcb; | ||
306 | |||
307 | /* Initialize the CPU's HWPCB to something just good enough for | ||
308 | us to get started. Immediately after starting, we'll swpctx | ||
309 | to the target idle task's pcb. Reuse the stack in the mean | ||
310 | time. Precalculate the target PCBB. */ | ||
311 | hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16; | ||
312 | hwpcb->usp = 0; | ||
313 | hwpcb->ptbr = ipcb->ptbr; | ||
314 | hwpcb->pcc = 0; | ||
315 | hwpcb->asn = 0; | ||
316 | hwpcb->unique = virt_to_phys(ipcb); | ||
317 | hwpcb->flags = ipcb->flags; | ||
318 | hwpcb->res1 = hwpcb->res2 = 0; | ||
319 | |||
320 | #if 0 | ||
321 | DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n", | ||
322 | hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique)); | ||
323 | #endif | ||
324 | DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n", | ||
325 | cpuid, idle->state, ipcb->flags)); | ||
326 | |||
327 | /* Setup HWRPB fields that SRM uses to activate secondary CPU */ | ||
328 | hwrpb->CPU_restart = __smp_callin; | ||
329 | hwrpb->CPU_restart_data = (unsigned long) __smp_callin; | ||
330 | |||
331 | /* Recalculate and update the HWRPB checksum */ | ||
332 | hwrpb_update_checksum(hwrpb); | ||
333 | |||
334 | /* | ||
335 | * Send a "start" command to the specified processor. | ||
336 | */ | ||
337 | |||
338 | /* SRM III 3.4.1.3 */ | ||
339 | cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */ | ||
340 | cpu->flags &= ~1; /* turn off Bootstrap In Progress */ | ||
341 | wmb(); | ||
342 | |||
343 | send_secondary_console_msg("START\r\n", cpuid); | ||
344 | |||
345 | /* Wait 10 seconds for an ACK from the console. */ | ||
346 | timeout = jiffies + 10*HZ; | ||
347 | while (time_before(jiffies, timeout)) { | ||
348 | if (cpu->flags & 1) | ||
349 | goto started; | ||
350 | udelay(10); | ||
351 | barrier(); | ||
352 | } | ||
353 | printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid); | ||
354 | return -1; | ||
355 | |||
356 | started: | ||
357 | DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid)); | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Bring one cpu online. | ||
363 | */ | ||
364 | static int __init | ||
365 | smp_boot_one_cpu(int cpuid) | ||
366 | { | ||
367 | struct task_struct *idle; | ||
368 | unsigned long timeout; | ||
369 | |||
370 | /* Cook up an idler for this guy. Note that the address we | ||
371 | give to kernel_thread is irrelevant -- it's going to start | ||
372 | where HWRPB.CPU_restart says to start. But this gets all | ||
373 | the other task-y sort of data structures set up like we | ||
374 | wish. We can't use kernel_thread since we must avoid | ||
375 | rescheduling the child. */ | ||
376 | idle = fork_idle(cpuid); | ||
377 | if (IS_ERR(idle)) | ||
378 | panic("failed fork for CPU %d", cpuid); | ||
379 | |||
380 | DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", | ||
381 | cpuid, idle->state, idle->flags)); | ||
382 | |||
383 | /* Signal the secondary to wait a moment. */ | ||
384 | smp_secondary_alive = -1; | ||
385 | |||
386 | /* Whirrr, whirrr, whirrrrrrrrr... */ | ||
387 | if (secondary_cpu_start(cpuid, idle)) | ||
388 | return -1; | ||
389 | |||
390 | /* Notify the secondary CPU it can run calibrate_delay. */ | ||
391 | mb(); | ||
392 | smp_secondary_alive = 0; | ||
393 | |||
394 | /* We've been acked by the console; wait one second for | ||
395 | the task to start up for real. */ | ||
396 | timeout = jiffies + 1*HZ; | ||
397 | while (time_before(jiffies, timeout)) { | ||
398 | if (smp_secondary_alive == 1) | ||
399 | goto alive; | ||
400 | udelay(10); | ||
401 | barrier(); | ||
402 | } | ||
403 | |||
404 | /* We failed to boot the CPU. */ | ||
405 | |||
406 | printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid); | ||
407 | return -1; | ||
408 | |||
409 | alive: | ||
410 | /* Another "Red Snapper". */ | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Called from setup_arch. Detect an SMP system and which processors | ||
416 | * are present. | ||
417 | */ | ||
418 | void __init | ||
419 | setup_smp(void) | ||
420 | { | ||
421 | struct percpu_struct *cpubase, *cpu; | ||
422 | unsigned long i; | ||
423 | |||
424 | if (boot_cpuid != 0) { | ||
425 | printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n", | ||
426 | boot_cpuid); | ||
427 | } | ||
428 | |||
429 | if (hwrpb->nr_processors > 1) { | ||
430 | int boot_cpu_palrev; | ||
431 | |||
432 | DBGS(("setup_smp: nr_processors %ld\n", | ||
433 | hwrpb->nr_processors)); | ||
434 | |||
435 | cpubase = (struct percpu_struct *) | ||
436 | ((char*)hwrpb + hwrpb->processor_offset); | ||
437 | boot_cpu_palrev = cpubase->pal_revision; | ||
438 | |||
439 | for (i = 0; i < hwrpb->nr_processors; i++) { | ||
440 | cpu = (struct percpu_struct *) | ||
441 | ((char *)cpubase + i*hwrpb->processor_size); | ||
442 | if ((cpu->flags & 0x1cc) == 0x1cc) { | ||
443 | smp_num_probed++; | ||
444 | /* Assume here that "whami" == index */ | ||
445 | hwrpb_cpu_present_mask |= (1UL << i); | ||
446 | cpu->pal_revision = boot_cpu_palrev; | ||
447 | } | ||
448 | |||
449 | DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n", | ||
450 | i, cpu->flags, cpu->type)); | ||
451 | DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n", | ||
452 | i, cpu->pal_revision)); | ||
453 | } | ||
454 | } else { | ||
455 | smp_num_probed = 1; | ||
456 | hwrpb_cpu_present_mask = (1UL << boot_cpuid); | ||
457 | } | ||
458 | cpu_present_mask = cpumask_of_cpu(boot_cpuid); | ||
459 | |||
460 | printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", | ||
461 | smp_num_probed, hwrpb_cpu_present_mask); | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Called by smp_init prepare the secondaries | ||
466 | */ | ||
467 | void __init | ||
468 | smp_prepare_cpus(unsigned int max_cpus) | ||
469 | { | ||
470 | int cpu_count, i; | ||
471 | |||
472 | /* Take care of some initial bookkeeping. */ | ||
473 | memset(ipi_data, 0, sizeof(ipi_data)); | ||
474 | |||
475 | current_thread_info()->cpu = boot_cpuid; | ||
476 | |||
477 | smp_store_cpu_info(boot_cpuid); | ||
478 | smp_setup_percpu_timer(boot_cpuid); | ||
479 | |||
480 | /* Nothing to do on a UP box, or when told not to. */ | ||
481 | if (smp_num_probed == 1 || max_cpus == 0) { | ||
482 | cpu_present_mask = cpumask_of_cpu(boot_cpuid); | ||
483 | printk(KERN_INFO "SMP mode deactivated.\n"); | ||
484 | return; | ||
485 | } | ||
486 | |||
487 | printk(KERN_INFO "SMP starting up secondaries.\n"); | ||
488 | |||
489 | cpu_count = 1; | ||
490 | for (i = 0; (i < NR_CPUS) && (cpu_count < max_cpus); i++) { | ||
491 | if (i == boot_cpuid) | ||
492 | continue; | ||
493 | |||
494 | if (((hwrpb_cpu_present_mask >> i) & 1) == 0) | ||
495 | continue; | ||
496 | |||
497 | cpu_set(i, cpu_possible_map); | ||
498 | cpu_count++; | ||
499 | } | ||
500 | |||
501 | smp_num_cpus = cpu_count; | ||
502 | } | ||
503 | |||
504 | void __devinit | ||
505 | smp_prepare_boot_cpu(void) | ||
506 | { | ||
507 | /* | ||
508 | * Mark the boot cpu (current cpu) as both present and online | ||
509 | */ | ||
510 | cpu_set(smp_processor_id(), cpu_present_mask); | ||
511 | cpu_set(smp_processor_id(), cpu_online_map); | ||
512 | } | ||
513 | |||
514 | int __devinit | ||
515 | __cpu_up(unsigned int cpu) | ||
516 | { | ||
517 | smp_boot_one_cpu(cpu); | ||
518 | |||
519 | return cpu_online(cpu) ? 0 : -ENOSYS; | ||
520 | } | ||
521 | |||
522 | void __init | ||
523 | smp_cpus_done(unsigned int max_cpus) | ||
524 | { | ||
525 | int cpu; | ||
526 | unsigned long bogosum = 0; | ||
527 | |||
528 | for(cpu = 0; cpu < NR_CPUS; cpu++) | ||
529 | if (cpu_online(cpu)) | ||
530 | bogosum += cpu_data[cpu].loops_per_jiffy; | ||
531 | |||
532 | printk(KERN_INFO "SMP: Total of %d processors activated " | ||
533 | "(%lu.%02lu BogoMIPS).\n", | ||
534 | num_online_cpus(), | ||
535 | (bogosum + 2500) / (500000/HZ), | ||
536 | ((bogosum + 2500) / (5000/HZ)) % 100); | ||
537 | } | ||
538 | |||
539 | |||
540 | void | ||
541 | smp_percpu_timer_interrupt(struct pt_regs *regs) | ||
542 | { | ||
543 | int cpu = smp_processor_id(); | ||
544 | unsigned long user = user_mode(regs); | ||
545 | struct cpuinfo_alpha *data = &cpu_data[cpu]; | ||
546 | |||
547 | /* Record kernel PC. */ | ||
548 | profile_tick(CPU_PROFILING, regs); | ||
549 | |||
550 | if (!--data->prof_counter) { | ||
551 | /* We need to make like a normal interrupt -- otherwise | ||
552 | timer interrupts ignore the global interrupt lock, | ||
553 | which would be a Bad Thing. */ | ||
554 | irq_enter(); | ||
555 | |||
556 | update_process_times(user); | ||
557 | |||
558 | data->prof_counter = data->prof_multiplier; | ||
559 | |||
560 | irq_exit(); | ||
561 | } | ||
562 | } | ||
563 | |||
564 | int __init | ||
565 | setup_profiling_timer(unsigned int multiplier) | ||
566 | { | ||
567 | return -EINVAL; | ||
568 | } | ||
569 | |||
570 | |||
571 | static void | ||
572 | send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) | ||
573 | { | ||
574 | int i; | ||
575 | |||
576 | mb(); | ||
577 | for_each_cpu_mask(i, to_whom) | ||
578 | set_bit(operation, &ipi_data[i].bits); | ||
579 | |||
580 | mb(); | ||
581 | for_each_cpu_mask(i, to_whom) | ||
582 | wripir(i); | ||
583 | } | ||
584 | |||
585 | /* Structure and data for smp_call_function. This is designed to | ||
586 | minimize static memory requirements. Plus it looks cleaner. */ | ||
587 | |||
588 | struct smp_call_struct { | ||
589 | void (*func) (void *info); | ||
590 | void *info; | ||
591 | long wait; | ||
592 | atomic_t unstarted_count; | ||
593 | atomic_t unfinished_count; | ||
594 | }; | ||
595 | |||
596 | static struct smp_call_struct *smp_call_function_data; | ||
597 | |||
598 | /* Atomicly drop data into a shared pointer. The pointer is free if | ||
599 | it is initially locked. If retry, spin until free. */ | ||
600 | |||
601 | static int | ||
602 | pointer_lock (void *lock, void *data, int retry) | ||
603 | { | ||
604 | void *old, *tmp; | ||
605 | |||
606 | mb(); | ||
607 | again: | ||
608 | /* Compare and swap with zero. */ | ||
609 | asm volatile ( | ||
610 | "1: ldq_l %0,%1\n" | ||
611 | " mov %3,%2\n" | ||
612 | " bne %0,2f\n" | ||
613 | " stq_c %2,%1\n" | ||
614 | " beq %2,1b\n" | ||
615 | "2:" | ||
616 | : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) | ||
617 | : "r"(data) | ||
618 | : "memory"); | ||
619 | |||
620 | if (old == 0) | ||
621 | return 0; | ||
622 | if (! retry) | ||
623 | return -EBUSY; | ||
624 | |||
625 | while (*(void **)lock) | ||
626 | barrier(); | ||
627 | goto again; | ||
628 | } | ||
629 | |||
630 | void | ||
631 | handle_ipi(struct pt_regs *regs) | ||
632 | { | ||
633 | int this_cpu = smp_processor_id(); | ||
634 | unsigned long *pending_ipis = &ipi_data[this_cpu].bits; | ||
635 | unsigned long ops; | ||
636 | |||
637 | #if 0 | ||
638 | DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n", | ||
639 | this_cpu, *pending_ipis, regs->pc)); | ||
640 | #endif | ||
641 | |||
642 | mb(); /* Order interrupt and bit testing. */ | ||
643 | while ((ops = xchg(pending_ipis, 0)) != 0) { | ||
644 | mb(); /* Order bit clearing and data access. */ | ||
645 | do { | ||
646 | unsigned long which; | ||
647 | |||
648 | which = ops & -ops; | ||
649 | ops &= ~which; | ||
650 | which = __ffs(which); | ||
651 | |||
652 | switch (which) { | ||
653 | case IPI_RESCHEDULE: | ||
654 | /* Reschedule callback. Everything to be done | ||
655 | is done by the interrupt return path. */ | ||
656 | break; | ||
657 | |||
658 | case IPI_CALL_FUNC: | ||
659 | { | ||
660 | struct smp_call_struct *data; | ||
661 | void (*func)(void *info); | ||
662 | void *info; | ||
663 | int wait; | ||
664 | |||
665 | data = smp_call_function_data; | ||
666 | func = data->func; | ||
667 | info = data->info; | ||
668 | wait = data->wait; | ||
669 | |||
670 | /* Notify the sending CPU that the data has been | ||
671 | received, and execution is about to begin. */ | ||
672 | mb(); | ||
673 | atomic_dec (&data->unstarted_count); | ||
674 | |||
675 | /* At this point the structure may be gone unless | ||
676 | wait is true. */ | ||
677 | (*func)(info); | ||
678 | |||
679 | /* Notify the sending CPU that the task is done. */ | ||
680 | mb(); | ||
681 | if (wait) atomic_dec (&data->unfinished_count); | ||
682 | break; | ||
683 | } | ||
684 | |||
685 | case IPI_CPU_STOP: | ||
686 | halt(); | ||
687 | |||
688 | default: | ||
689 | printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", | ||
690 | this_cpu, which); | ||
691 | break; | ||
692 | } | ||
693 | } while (ops); | ||
694 | |||
695 | mb(); /* Order data access and bit testing. */ | ||
696 | } | ||
697 | |||
698 | cpu_data[this_cpu].ipi_count++; | ||
699 | |||
700 | if (hwrpb->txrdy) | ||
701 | recv_secondary_console_msg(); | ||
702 | } | ||
703 | |||
704 | void | ||
705 | smp_send_reschedule(int cpu) | ||
706 | { | ||
707 | #ifdef DEBUG_IPI_MSG | ||
708 | if (cpu == hard_smp_processor_id()) | ||
709 | printk(KERN_WARNING | ||
710 | "smp_send_reschedule: Sending IPI to self.\n"); | ||
711 | #endif | ||
712 | send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); | ||
713 | } | ||
714 | |||
715 | void | ||
716 | smp_send_stop(void) | ||
717 | { | ||
718 | cpumask_t to_whom = cpu_possible_map; | ||
719 | cpu_clear(smp_processor_id(), to_whom); | ||
720 | #ifdef DEBUG_IPI_MSG | ||
721 | if (hard_smp_processor_id() != boot_cpu_id) | ||
722 | printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); | ||
723 | #endif | ||
724 | send_ipi_message(to_whom, IPI_CPU_STOP); | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * Run a function on all other CPUs. | ||
729 | * <func> The function to run. This must be fast and non-blocking. | ||
730 | * <info> An arbitrary pointer to pass to the function. | ||
731 | * <retry> If true, keep retrying until ready. | ||
732 | * <wait> If true, wait until function has completed on other CPUs. | ||
733 | * [RETURNS] 0 on success, else a negative status code. | ||
734 | * | ||
735 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
736 | * or are or have executed. | ||
737 | * You must not call this function with disabled interrupts or from a | ||
738 | * hardware interrupt handler or from a bottom half handler. | ||
739 | */ | ||
740 | |||
741 | int | ||
742 | smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, | ||
743 | int wait, cpumask_t to_whom) | ||
744 | { | ||
745 | struct smp_call_struct data; | ||
746 | unsigned long timeout; | ||
747 | int num_cpus_to_call; | ||
748 | |||
749 | /* Can deadlock when called with interrupts disabled */ | ||
750 | WARN_ON(irqs_disabled()); | ||
751 | |||
752 | data.func = func; | ||
753 | data.info = info; | ||
754 | data.wait = wait; | ||
755 | |||
756 | cpu_clear(smp_processor_id(), to_whom); | ||
757 | num_cpus_to_call = cpus_weight(to_whom); | ||
758 | |||
759 | atomic_set(&data.unstarted_count, num_cpus_to_call); | ||
760 | atomic_set(&data.unfinished_count, num_cpus_to_call); | ||
761 | |||
762 | /* Acquire the smp_call_function_data mutex. */ | ||
763 | if (pointer_lock(&smp_call_function_data, &data, retry)) | ||
764 | return -EBUSY; | ||
765 | |||
766 | /* Send a message to the requested CPUs. */ | ||
767 | send_ipi_message(to_whom, IPI_CALL_FUNC); | ||
768 | |||
769 | /* Wait for a minimal response. */ | ||
770 | timeout = jiffies + HZ; | ||
771 | while (atomic_read (&data.unstarted_count) > 0 | ||
772 | && time_before (jiffies, timeout)) | ||
773 | barrier(); | ||
774 | |||
775 | /* If there's no response yet, log a message but allow a longer | ||
776 | * timeout period -- if we get a response this time, log | ||
777 | * a message saying when we got it.. | ||
778 | */ | ||
779 | if (atomic_read(&data.unstarted_count) > 0) { | ||
780 | long start_time = jiffies; | ||
781 | printk(KERN_ERR "%s: initial timeout -- trying long wait\n", | ||
782 | __FUNCTION__); | ||
783 | timeout = jiffies + 30 * HZ; | ||
784 | while (atomic_read(&data.unstarted_count) > 0 | ||
785 | && time_before(jiffies, timeout)) | ||
786 | barrier(); | ||
787 | if (atomic_read(&data.unstarted_count) <= 0) { | ||
788 | long delta = jiffies - start_time; | ||
789 | printk(KERN_ERR | ||
790 | "%s: response %ld.%ld seconds into long wait\n", | ||
791 | __FUNCTION__, delta / HZ, | ||
792 | (100 * (delta - ((delta / HZ) * HZ))) / HZ); | ||
793 | } | ||
794 | } | ||
795 | |||
796 | /* We either got one or timed out -- clear the lock. */ | ||
797 | mb(); | ||
798 | smp_call_function_data = NULL; | ||
799 | |||
800 | /* | ||
801 | * If after both the initial and long timeout periods we still don't | ||
802 | * have a response, something is very wrong... | ||
803 | */ | ||
804 | BUG_ON(atomic_read (&data.unstarted_count) > 0); | ||
805 | |||
806 | /* Wait for a complete response, if needed. */ | ||
807 | if (wait) { | ||
808 | while (atomic_read (&data.unfinished_count) > 0) | ||
809 | barrier(); | ||
810 | } | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | |||
815 | int | ||
816 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
817 | { | ||
818 | return smp_call_function_on_cpu (func, info, retry, wait, | ||
819 | cpu_online_map); | ||
820 | } | ||
821 | |||
822 | static void | ||
823 | ipi_imb(void *ignored) | ||
824 | { | ||
825 | imb(); | ||
826 | } | ||
827 | |||
828 | void | ||
829 | smp_imb(void) | ||
830 | { | ||
831 | /* Must wait other processors to flush their icache before continue. */ | ||
832 | if (on_each_cpu(ipi_imb, NULL, 1, 1)) | ||
833 | printk(KERN_CRIT "smp_imb: timed out\n"); | ||
834 | } | ||
835 | |||
836 | static void | ||
837 | ipi_flush_tlb_all(void *ignored) | ||
838 | { | ||
839 | tbia(); | ||
840 | } | ||
841 | |||
842 | void | ||
843 | flush_tlb_all(void) | ||
844 | { | ||
845 | /* Although we don't have any data to pass, we do want to | ||
846 | synchronize with the other processors. */ | ||
847 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { | ||
848 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); | ||
849 | } | ||
850 | } | ||
851 | |||
852 | #define asn_locked() (cpu_data[smp_processor_id()].asn_lock) | ||
853 | |||
854 | static void | ||
855 | ipi_flush_tlb_mm(void *x) | ||
856 | { | ||
857 | struct mm_struct *mm = (struct mm_struct *) x; | ||
858 | if (mm == current->active_mm && !asn_locked()) | ||
859 | flush_tlb_current(mm); | ||
860 | else | ||
861 | flush_tlb_other(mm); | ||
862 | } | ||
863 | |||
864 | void | ||
865 | flush_tlb_mm(struct mm_struct *mm) | ||
866 | { | ||
867 | preempt_disable(); | ||
868 | |||
869 | if (mm == current->active_mm) { | ||
870 | flush_tlb_current(mm); | ||
871 | if (atomic_read(&mm->mm_users) <= 1) { | ||
872 | int cpu, this_cpu = smp_processor_id(); | ||
873 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
874 | if (!cpu_online(cpu) || cpu == this_cpu) | ||
875 | continue; | ||
876 | if (mm->context[cpu]) | ||
877 | mm->context[cpu] = 0; | ||
878 | } | ||
879 | preempt_enable(); | ||
880 | return; | ||
881 | } | ||
882 | } | ||
883 | |||
884 | if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { | ||
885 | printk(KERN_CRIT "flush_tlb_mm: timed out\n"); | ||
886 | } | ||
887 | |||
888 | preempt_enable(); | ||
889 | } | ||
890 | |||
891 | struct flush_tlb_page_struct { | ||
892 | struct vm_area_struct *vma; | ||
893 | struct mm_struct *mm; | ||
894 | unsigned long addr; | ||
895 | }; | ||
896 | |||
897 | static void | ||
898 | ipi_flush_tlb_page(void *x) | ||
899 | { | ||
900 | struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x; | ||
901 | struct mm_struct * mm = data->mm; | ||
902 | |||
903 | if (mm == current->active_mm && !asn_locked()) | ||
904 | flush_tlb_current_page(mm, data->vma, data->addr); | ||
905 | else | ||
906 | flush_tlb_other(mm); | ||
907 | } | ||
908 | |||
909 | void | ||
910 | flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | ||
911 | { | ||
912 | struct flush_tlb_page_struct data; | ||
913 | struct mm_struct *mm = vma->vm_mm; | ||
914 | |||
915 | preempt_disable(); | ||
916 | |||
917 | if (mm == current->active_mm) { | ||
918 | flush_tlb_current_page(mm, vma, addr); | ||
919 | if (atomic_read(&mm->mm_users) <= 1) { | ||
920 | int cpu, this_cpu = smp_processor_id(); | ||
921 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
922 | if (!cpu_online(cpu) || cpu == this_cpu) | ||
923 | continue; | ||
924 | if (mm->context[cpu]) | ||
925 | mm->context[cpu] = 0; | ||
926 | } | ||
927 | preempt_enable(); | ||
928 | return; | ||
929 | } | ||
930 | } | ||
931 | |||
932 | data.vma = vma; | ||
933 | data.mm = mm; | ||
934 | data.addr = addr; | ||
935 | |||
936 | if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { | ||
937 | printk(KERN_CRIT "flush_tlb_page: timed out\n"); | ||
938 | } | ||
939 | |||
940 | preempt_enable(); | ||
941 | } | ||
942 | |||
943 | void | ||
944 | flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
945 | { | ||
946 | /* On the Alpha we always flush the whole user tlb. */ | ||
947 | flush_tlb_mm(vma->vm_mm); | ||
948 | } | ||
949 | |||
950 | static void | ||
951 | ipi_flush_icache_page(void *x) | ||
952 | { | ||
953 | struct mm_struct *mm = (struct mm_struct *) x; | ||
954 | if (mm == current->active_mm && !asn_locked()) | ||
955 | __load_new_mm_context(mm); | ||
956 | else | ||
957 | flush_tlb_other(mm); | ||
958 | } | ||
959 | |||
960 | void | ||
961 | flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
962 | unsigned long addr, int len) | ||
963 | { | ||
964 | struct mm_struct *mm = vma->vm_mm; | ||
965 | |||
966 | if ((vma->vm_flags & VM_EXEC) == 0) | ||
967 | return; | ||
968 | |||
969 | preempt_disable(); | ||
970 | |||
971 | if (mm == current->active_mm) { | ||
972 | __load_new_mm_context(mm); | ||
973 | if (atomic_read(&mm->mm_users) <= 1) { | ||
974 | int cpu, this_cpu = smp_processor_id(); | ||
975 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
976 | if (!cpu_online(cpu) || cpu == this_cpu) | ||
977 | continue; | ||
978 | if (mm->context[cpu]) | ||
979 | mm->context[cpu] = 0; | ||
980 | } | ||
981 | preempt_enable(); | ||
982 | return; | ||
983 | } | ||
984 | } | ||
985 | |||
986 | if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { | ||
987 | printk(KERN_CRIT "flush_icache_page: timed out\n"); | ||
988 | } | ||
989 | |||
990 | preempt_enable(); | ||
991 | } | ||
992 | |||
993 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
994 | void | ||
995 | _raw_spin_unlock(spinlock_t * lock) | ||
996 | { | ||
997 | mb(); | ||
998 | lock->lock = 0; | ||
999 | |||
1000 | lock->on_cpu = -1; | ||
1001 | lock->previous = NULL; | ||
1002 | lock->task = NULL; | ||
1003 | lock->base_file = "none"; | ||
1004 | lock->line_no = 0; | ||
1005 | } | ||
1006 | |||
1007 | void | ||
1008 | debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no) | ||
1009 | { | ||
1010 | long tmp; | ||
1011 | long stuck; | ||
1012 | void *inline_pc = __builtin_return_address(0); | ||
1013 | unsigned long started = jiffies; | ||
1014 | int printed = 0; | ||
1015 | int cpu = smp_processor_id(); | ||
1016 | |||
1017 | stuck = 1L << 30; | ||
1018 | try_again: | ||
1019 | |||
1020 | /* Use sub-sections to put the actual loop at the end | ||
1021 | of this object file's text section so as to perfect | ||
1022 | branch prediction. */ | ||
1023 | __asm__ __volatile__( | ||
1024 | "1: ldl_l %0,%1\n" | ||
1025 | " subq %2,1,%2\n" | ||
1026 | " blbs %0,2f\n" | ||
1027 | " or %0,1,%0\n" | ||
1028 | " stl_c %0,%1\n" | ||
1029 | " beq %0,3f\n" | ||
1030 | "4: mb\n" | ||
1031 | ".subsection 2\n" | ||
1032 | "2: ldl %0,%1\n" | ||
1033 | " subq %2,1,%2\n" | ||
1034 | "3: blt %2,4b\n" | ||
1035 | " blbs %0,2b\n" | ||
1036 | " br 1b\n" | ||
1037 | ".previous" | ||
1038 | : "=r" (tmp), "=m" (lock->lock), "=r" (stuck) | ||
1039 | : "1" (lock->lock), "2" (stuck) : "memory"); | ||
1040 | |||
1041 | if (stuck < 0) { | ||
1042 | printk(KERN_WARNING | ||
1043 | "%s:%d spinlock stuck in %s at %p(%d)" | ||
1044 | " owner %s at %p(%d) %s:%d\n", | ||
1045 | base_file, line_no, | ||
1046 | current->comm, inline_pc, cpu, | ||
1047 | lock->task->comm, lock->previous, | ||
1048 | lock->on_cpu, lock->base_file, lock->line_no); | ||
1049 | stuck = 1L << 36; | ||
1050 | printed = 1; | ||
1051 | goto try_again; | ||
1052 | } | ||
1053 | |||
1054 | /* Exiting. Got the lock. */ | ||
1055 | lock->on_cpu = cpu; | ||
1056 | lock->previous = inline_pc; | ||
1057 | lock->task = current; | ||
1058 | lock->base_file = base_file; | ||
1059 | lock->line_no = line_no; | ||
1060 | |||
1061 | if (printed) { | ||
1062 | printk(KERN_WARNING | ||
1063 | "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n", | ||
1064 | base_file, line_no, current->comm, inline_pc, | ||
1065 | cpu, jiffies - started); | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | int | ||
1070 | debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) | ||
1071 | { | ||
1072 | int ret; | ||
1073 | if ((ret = !test_and_set_bit(0, lock))) { | ||
1074 | lock->on_cpu = smp_processor_id(); | ||
1075 | lock->previous = __builtin_return_address(0); | ||
1076 | lock->task = current; | ||
1077 | } else { | ||
1078 | lock->base_file = base_file; | ||
1079 | lock->line_no = line_no; | ||
1080 | } | ||
1081 | return ret; | ||
1082 | } | ||
1083 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
1084 | |||
1085 | #ifdef CONFIG_DEBUG_RWLOCK | ||
1086 | void _raw_write_lock(rwlock_t * lock) | ||
1087 | { | ||
1088 | long regx, regy; | ||
1089 | int stuck_lock, stuck_reader; | ||
1090 | void *inline_pc = __builtin_return_address(0); | ||
1091 | |||
1092 | try_again: | ||
1093 | |||
1094 | stuck_lock = 1<<30; | ||
1095 | stuck_reader = 1<<30; | ||
1096 | |||
1097 | __asm__ __volatile__( | ||
1098 | "1: ldl_l %1,%0\n" | ||
1099 | " blbs %1,6f\n" | ||
1100 | " blt %1,8f\n" | ||
1101 | " mov 1,%1\n" | ||
1102 | " stl_c %1,%0\n" | ||
1103 | " beq %1,6f\n" | ||
1104 | "4: mb\n" | ||
1105 | ".subsection 2\n" | ||
1106 | "6: blt %3,4b # debug\n" | ||
1107 | " subl %3,1,%3 # debug\n" | ||
1108 | " ldl %1,%0\n" | ||
1109 | " blbs %1,6b\n" | ||
1110 | "8: blt %4,4b # debug\n" | ||
1111 | " subl %4,1,%4 # debug\n" | ||
1112 | " ldl %1,%0\n" | ||
1113 | " blt %1,8b\n" | ||
1114 | " br 1b\n" | ||
1115 | ".previous" | ||
1116 | : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy), | ||
1117 | "=&r" (stuck_lock), "=&r" (stuck_reader) | ||
1118 | : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory"); | ||
1119 | |||
1120 | if (stuck_lock < 0) { | ||
1121 | printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc); | ||
1122 | goto try_again; | ||
1123 | } | ||
1124 | if (stuck_reader < 0) { | ||
1125 | printk(KERN_WARNING "write_lock stuck on readers at %p\n", | ||
1126 | inline_pc); | ||
1127 | goto try_again; | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | void _raw_read_lock(rwlock_t * lock) | ||
1132 | { | ||
1133 | long regx; | ||
1134 | int stuck_lock; | ||
1135 | void *inline_pc = __builtin_return_address(0); | ||
1136 | |||
1137 | try_again: | ||
1138 | |||
1139 | stuck_lock = 1<<30; | ||
1140 | |||
1141 | __asm__ __volatile__( | ||
1142 | "1: ldl_l %1,%0;" | ||
1143 | " blbs %1,6f;" | ||
1144 | " subl %1,2,%1;" | ||
1145 | " stl_c %1,%0;" | ||
1146 | " beq %1,6f;" | ||
1147 | "4: mb\n" | ||
1148 | ".subsection 2\n" | ||
1149 | "6: ldl %1,%0;" | ||
1150 | " blt %2,4b # debug\n" | ||
1151 | " subl %2,1,%2 # debug\n" | ||
1152 | " blbs %1,6b;" | ||
1153 | " br 1b\n" | ||
1154 | ".previous" | ||
1155 | : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock) | ||
1156 | : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory"); | ||
1157 | |||
1158 | if (stuck_lock < 0) { | ||
1159 | printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc); | ||
1160 | goto try_again; | ||
1161 | } | ||
1162 | } | ||
1163 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c new file mode 100644 index 000000000000..5c98fc83e238 --- /dev/null +++ b/arch/alpha/kernel/srm_env.c | |||
@@ -0,0 +1,335 @@ | |||
1 | /* | ||
2 | * srm_env.c - Access to SRM environment | ||
3 | * variables through linux' procfs | ||
4 | * | ||
5 | * Copyright (C) 2001-2002 Jan-Benedict Glaw <jbglaw@lug-owl.de> | ||
6 | * | ||
7 | * This driver is at all a modified version of Erik Mouw's | ||
8 | * Documentation/DocBook/procfs_example.c, so: thank | ||
9 | * you, Erik! He can be reached via email at | ||
10 | * <J.A.K.Mouw@its.tudelft.nl>. It is based on an idea | ||
11 | * provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They | ||
12 | * included a patch like this as well. Thanks for idea! | ||
13 | * | ||
14 | * This program is free software; you can redistribute | ||
15 | * it and/or modify it under the terms of the GNU General | ||
16 | * Public License version 2 as published by the Free Software | ||
17 | * Foundation. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be | ||
20 | * useful, but WITHOUT ANY WARRANTY; without even the implied | ||
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR | ||
22 | * PURPOSE. See the GNU General Public License for more | ||
23 | * details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public | ||
26 | * License along with this program; if not, write to the | ||
27 | * Free Software Foundation, Inc., 59 Temple Place, | ||
28 | * Suite 330, Boston, MA 02111-1307 USA | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | /* | ||
33 | * Changelog | ||
34 | * ~~~~~~~~~ | ||
35 | * | ||
36 | * Thu, 22 Aug 2002 15:10:43 +0200 | ||
37 | * - Update Config.help entry. I got a number of emails asking | ||
38 | * me to tell their senders if they could make use of this | ||
39 | * piece of code... So: "SRM is something like BIOS for your | ||
40 | * Alpha" | ||
41 | * - Update code formatting a bit to better conform CodingStyle | ||
42 | * rules. | ||
43 | * - So this is v0.0.5, with no changes (except formatting) | ||
44 | * | ||
45 | * Wed, 22 May 2002 00:11:21 +0200 | ||
46 | * - Fix typo on comment (SRC -> SRM) | ||
47 | * - Call this "Version 0.0.4" | ||
48 | * | ||
49 | * Tue, 9 Apr 2002 18:44:40 +0200 | ||
50 | * - Implement access by variable name and additionally | ||
51 | * by number. This is done by creating two subdirectories | ||
52 | * where one holds all names (like the old directory | ||
53 | * did) and the other holding 256 files named like "0", | ||
54 | * "1" and so on. | ||
55 | * - Call this "Version 0.0.3" | ||
56 | * | ||
57 | */ | ||
58 | |||
59 | #include <linux/kernel.h> | ||
60 | #include <linux/config.h> | ||
61 | #include <linux/module.h> | ||
62 | #include <linux/init.h> | ||
63 | #include <linux/proc_fs.h> | ||
64 | #include <asm/console.h> | ||
65 | #include <asm/uaccess.h> | ||
66 | #include <asm/machvec.h> | ||
67 | |||
68 | #define BASE_DIR "srm_environment" /* Subdir in /proc/ */ | ||
69 | #define NAMED_DIR "named_variables" /* Subdir for known variables */ | ||
70 | #define NUMBERED_DIR "numbered_variables" /* Subdir for all variables */ | ||
71 | #define VERSION "0.0.5" /* Module version */ | ||
72 | #define NAME "srm_env" /* Module name */ | ||
73 | |||
74 | MODULE_AUTHOR("Jan-Benedict Glaw <jbglaw@lug-owl.de>"); | ||
75 | MODULE_DESCRIPTION("Accessing Alpha SRM environment through procfs interface"); | ||
76 | MODULE_LICENSE("GPL"); | ||
77 | |||
78 | typedef struct _srm_env { | ||
79 | char *name; | ||
80 | unsigned long id; | ||
81 | struct proc_dir_entry *proc_entry; | ||
82 | } srm_env_t; | ||
83 | |||
84 | static struct proc_dir_entry *base_dir; | ||
85 | static struct proc_dir_entry *named_dir; | ||
86 | static struct proc_dir_entry *numbered_dir; | ||
87 | static char number[256][4]; | ||
88 | |||
89 | static srm_env_t srm_named_entries[] = { | ||
90 | { "auto_action", ENV_AUTO_ACTION }, | ||
91 | { "boot_dev", ENV_BOOT_DEV }, | ||
92 | { "bootdef_dev", ENV_BOOTDEF_DEV }, | ||
93 | { "booted_dev", ENV_BOOTED_DEV }, | ||
94 | { "boot_file", ENV_BOOT_FILE }, | ||
95 | { "booted_file", ENV_BOOTED_FILE }, | ||
96 | { "boot_osflags", ENV_BOOT_OSFLAGS }, | ||
97 | { "booted_osflags", ENV_BOOTED_OSFLAGS }, | ||
98 | { "boot_reset", ENV_BOOT_RESET }, | ||
99 | { "dump_dev", ENV_DUMP_DEV }, | ||
100 | { "enable_audit", ENV_ENABLE_AUDIT }, | ||
101 | { "license", ENV_LICENSE }, | ||
102 | { "char_set", ENV_CHAR_SET }, | ||
103 | { "language", ENV_LANGUAGE }, | ||
104 | { "tty_dev", ENV_TTY_DEV }, | ||
105 | { NULL, 0 }, | ||
106 | }; | ||
107 | static srm_env_t srm_numbered_entries[256]; | ||
108 | |||
109 | |||
110 | |||
111 | static int | ||
112 | srm_env_read(char *page, char **start, off_t off, int count, int *eof, | ||
113 | void *data) | ||
114 | { | ||
115 | int nbytes; | ||
116 | unsigned long ret; | ||
117 | srm_env_t *entry; | ||
118 | |||
119 | if(off != 0) | ||
120 | return -EFAULT; | ||
121 | |||
122 | entry = (srm_env_t *) data; | ||
123 | ret = callback_getenv(entry->id, page, count); | ||
124 | |||
125 | if((ret >> 61) == 0) | ||
126 | nbytes = (int) ret; | ||
127 | else | ||
128 | nbytes = -EFAULT; | ||
129 | |||
130 | return nbytes; | ||
131 | } | ||
132 | |||
133 | |||
134 | static int | ||
135 | srm_env_write(struct file *file, const char __user *buffer, unsigned long count, | ||
136 | void *data) | ||
137 | { | ||
138 | int res; | ||
139 | srm_env_t *entry; | ||
140 | char *buf = (char *) __get_free_page(GFP_USER); | ||
141 | unsigned long ret1, ret2; | ||
142 | |||
143 | entry = (srm_env_t *) data; | ||
144 | |||
145 | if (!buf) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | res = -EINVAL; | ||
149 | if (count >= PAGE_SIZE) | ||
150 | goto out; | ||
151 | |||
152 | res = -EFAULT; | ||
153 | if (copy_from_user(buf, buffer, count)) | ||
154 | goto out; | ||
155 | buf[count] = '\0'; | ||
156 | |||
157 | ret1 = callback_setenv(entry->id, buf, count); | ||
158 | if ((ret1 >> 61) == 0) { | ||
159 | do | ||
160 | ret2 = callback_save_env(); | ||
161 | while((ret2 >> 61) == 1); | ||
162 | res = (int) ret1; | ||
163 | } | ||
164 | |||
165 | out: | ||
166 | free_page((unsigned long)buf); | ||
167 | return res; | ||
168 | } | ||
169 | |||
170 | static void | ||
171 | srm_env_cleanup(void) | ||
172 | { | ||
173 | srm_env_t *entry; | ||
174 | unsigned long var_num; | ||
175 | |||
176 | if(base_dir) { | ||
177 | /* | ||
178 | * Remove named entries | ||
179 | */ | ||
180 | if(named_dir) { | ||
181 | entry = srm_named_entries; | ||
182 | while(entry->name != NULL && entry->id != 0) { | ||
183 | if(entry->proc_entry) { | ||
184 | remove_proc_entry(entry->name, | ||
185 | named_dir); | ||
186 | entry->proc_entry = NULL; | ||
187 | } | ||
188 | entry++; | ||
189 | } | ||
190 | remove_proc_entry(NAMED_DIR, base_dir); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Remove numbered entries | ||
195 | */ | ||
196 | if(numbered_dir) { | ||
197 | for(var_num = 0; var_num <= 255; var_num++) { | ||
198 | entry = &srm_numbered_entries[var_num]; | ||
199 | |||
200 | if(entry->proc_entry) { | ||
201 | remove_proc_entry(entry->name, | ||
202 | numbered_dir); | ||
203 | entry->proc_entry = NULL; | ||
204 | entry->name = NULL; | ||
205 | } | ||
206 | } | ||
207 | remove_proc_entry(NUMBERED_DIR, base_dir); | ||
208 | } | ||
209 | |||
210 | remove_proc_entry(BASE_DIR, NULL); | ||
211 | } | ||
212 | |||
213 | return; | ||
214 | } | ||
215 | |||
216 | |||
217 | static int __init | ||
218 | srm_env_init(void) | ||
219 | { | ||
220 | srm_env_t *entry; | ||
221 | unsigned long var_num; | ||
222 | |||
223 | /* | ||
224 | * Check system | ||
225 | */ | ||
226 | if(!alpha_using_srm) { | ||
227 | printk(KERN_INFO "%s: This Alpha system doesn't " | ||
228 | "know about SRM (or you've booted " | ||
229 | "SRM->MILO->Linux, which gets " | ||
230 | "misdetected)...\n", __FUNCTION__); | ||
231 | return -ENODEV; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * Init numbers | ||
236 | */ | ||
237 | for(var_num = 0; var_num <= 255; var_num++) | ||
238 | sprintf(number[var_num], "%ld", var_num); | ||
239 | |||
240 | /* | ||
241 | * Create base directory | ||
242 | */ | ||
243 | base_dir = proc_mkdir(BASE_DIR, NULL); | ||
244 | if(base_dir == NULL) { | ||
245 | printk(KERN_ERR "Couldn't create base dir /proc/%s\n", | ||
246 | BASE_DIR); | ||
247 | goto cleanup; | ||
248 | } | ||
249 | base_dir->owner = THIS_MODULE; | ||
250 | |||
251 | /* | ||
252 | * Create per-name subdirectory | ||
253 | */ | ||
254 | named_dir = proc_mkdir(NAMED_DIR, base_dir); | ||
255 | if(named_dir == NULL) { | ||
256 | printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n", | ||
257 | BASE_DIR, NAMED_DIR); | ||
258 | goto cleanup; | ||
259 | } | ||
260 | named_dir->owner = THIS_MODULE; | ||
261 | |||
262 | /* | ||
263 | * Create per-number subdirectory | ||
264 | */ | ||
265 | numbered_dir = proc_mkdir(NUMBERED_DIR, base_dir); | ||
266 | if(numbered_dir == NULL) { | ||
267 | printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n", | ||
268 | BASE_DIR, NUMBERED_DIR); | ||
269 | goto cleanup; | ||
270 | |||
271 | } | ||
272 | numbered_dir->owner = THIS_MODULE; | ||
273 | |||
274 | /* | ||
275 | * Create all named nodes | ||
276 | */ | ||
277 | entry = srm_named_entries; | ||
278 | while(entry->name != NULL && entry->id != 0) { | ||
279 | entry->proc_entry = create_proc_entry(entry->name, | ||
280 | 0644, named_dir); | ||
281 | if(entry->proc_entry == NULL) | ||
282 | goto cleanup; | ||
283 | |||
284 | entry->proc_entry->data = (void *) entry; | ||
285 | entry->proc_entry->owner = THIS_MODULE; | ||
286 | entry->proc_entry->read_proc = srm_env_read; | ||
287 | entry->proc_entry->write_proc = srm_env_write; | ||
288 | |||
289 | entry++; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Create all numbered nodes | ||
294 | */ | ||
295 | for(var_num = 0; var_num <= 255; var_num++) { | ||
296 | entry = &srm_numbered_entries[var_num]; | ||
297 | entry->name = number[var_num]; | ||
298 | |||
299 | entry->proc_entry = create_proc_entry(entry->name, | ||
300 | 0644, numbered_dir); | ||
301 | if(entry->proc_entry == NULL) | ||
302 | goto cleanup; | ||
303 | |||
304 | entry->id = var_num; | ||
305 | entry->proc_entry->data = (void *) entry; | ||
306 | entry->proc_entry->owner = THIS_MODULE; | ||
307 | entry->proc_entry->read_proc = srm_env_read; | ||
308 | entry->proc_entry->write_proc = srm_env_write; | ||
309 | } | ||
310 | |||
311 | printk(KERN_INFO "%s: version %s loaded successfully\n", NAME, | ||
312 | VERSION); | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | cleanup: | ||
317 | srm_env_cleanup(); | ||
318 | |||
319 | return -ENOMEM; | ||
320 | } | ||
321 | |||
322 | |||
323 | static void __exit | ||
324 | srm_env_exit(void) | ||
325 | { | ||
326 | srm_env_cleanup(); | ||
327 | printk(KERN_INFO "%s: unloaded successfully\n", NAME); | ||
328 | |||
329 | return; | ||
330 | } | ||
331 | |||
332 | |||
333 | module_init(srm_env_init); | ||
334 | module_exit(srm_env_exit); | ||
335 | |||
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c new file mode 100644 index 000000000000..3b30d4f1fc42 --- /dev/null +++ b/arch/alpha/kernel/srmcons.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/srmcons.c | ||
3 | * | ||
4 | * Callback based driver for SRM Console console device. | ||
5 | * (TTY driver and console driver) | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/console.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/timer.h> | ||
17 | #include <linux/tty.h> | ||
18 | #include <linux/tty_driver.h> | ||
19 | #include <linux/tty_flip.h> | ||
20 | |||
21 | #include <asm/console.h> | ||
22 | #include <asm/uaccess.h> | ||
23 | |||
24 | |||
25 | static DEFINE_SPINLOCK(srmcons_callback_lock); | ||
26 | static int srm_is_registered_console = 0; | ||
27 | |||
28 | /* | ||
29 | * The TTY driver | ||
30 | */ | ||
31 | #define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */ | ||
32 | |||
33 | struct srmcons_private { | ||
34 | struct tty_struct *tty; | ||
35 | struct timer_list timer; | ||
36 | spinlock_t lock; | ||
37 | }; | ||
38 | |||
39 | typedef union _srmcons_result { | ||
40 | struct { | ||
41 | unsigned long c :61; | ||
42 | unsigned long status :3; | ||
43 | } bits; | ||
44 | long as_long; | ||
45 | } srmcons_result; | ||
46 | |||
47 | /* called with callback_lock held */ | ||
48 | static int | ||
49 | srmcons_do_receive_chars(struct tty_struct *tty) | ||
50 | { | ||
51 | srmcons_result result; | ||
52 | int count = 0, loops = 0; | ||
53 | |||
54 | do { | ||
55 | result.as_long = callback_getc(0); | ||
56 | if (result.bits.status < 2) { | ||
57 | tty_insert_flip_char(tty, (char)result.bits.c, 0); | ||
58 | count++; | ||
59 | } | ||
60 | } while((result.bits.status & 1) && (++loops < 10)); | ||
61 | |||
62 | if (count) | ||
63 | tty_schedule_flip(tty); | ||
64 | |||
65 | return count; | ||
66 | } | ||
67 | |||
68 | static void | ||
69 | srmcons_receive_chars(unsigned long data) | ||
70 | { | ||
71 | struct srmcons_private *srmconsp = (struct srmcons_private *)data; | ||
72 | unsigned long flags; | ||
73 | int incr = 10; | ||
74 | |||
75 | local_irq_save(flags); | ||
76 | if (spin_trylock(&srmcons_callback_lock)) { | ||
77 | if (!srmcons_do_receive_chars(srmconsp->tty)) | ||
78 | incr = 100; | ||
79 | spin_unlock(&srmcons_callback_lock); | ||
80 | } | ||
81 | |||
82 | spin_lock(&srmconsp->lock); | ||
83 | if (srmconsp->tty) { | ||
84 | srmconsp->timer.expires = jiffies + incr; | ||
85 | add_timer(&srmconsp->timer); | ||
86 | } | ||
87 | spin_unlock(&srmconsp->lock); | ||
88 | |||
89 | local_irq_restore(flags); | ||
90 | } | ||
91 | |||
92 | /* called with callback_lock held */ | ||
93 | static int | ||
94 | srmcons_do_write(struct tty_struct *tty, const char *buf, int count) | ||
95 | { | ||
96 | static char str_cr[1] = "\r"; | ||
97 | long c, remaining = count; | ||
98 | srmcons_result result; | ||
99 | char *cur; | ||
100 | int need_cr; | ||
101 | |||
102 | for (cur = (char *)buf; remaining > 0; ) { | ||
103 | need_cr = 0; | ||
104 | /* | ||
105 | * Break it up into reasonable size chunks to allow a chance | ||
106 | * for input to get in | ||
107 | */ | ||
108 | for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++) | ||
109 | if (cur[c] == '\n') | ||
110 | need_cr = 1; | ||
111 | |||
112 | while (c > 0) { | ||
113 | result.as_long = callback_puts(0, cur, c); | ||
114 | c -= result.bits.c; | ||
115 | remaining -= result.bits.c; | ||
116 | cur += result.bits.c; | ||
117 | |||
118 | /* | ||
119 | * Check for pending input iff a tty was provided | ||
120 | */ | ||
121 | if (tty) | ||
122 | srmcons_do_receive_chars(tty); | ||
123 | } | ||
124 | |||
125 | while (need_cr) { | ||
126 | result.as_long = callback_puts(0, str_cr, 1); | ||
127 | if (result.bits.c > 0) | ||
128 | need_cr = 0; | ||
129 | } | ||
130 | } | ||
131 | return count; | ||
132 | } | ||
133 | |||
134 | static int | ||
135 | srmcons_write(struct tty_struct *tty, | ||
136 | const unsigned char *buf, int count) | ||
137 | { | ||
138 | unsigned long flags; | ||
139 | |||
140 | spin_lock_irqsave(&srmcons_callback_lock, flags); | ||
141 | srmcons_do_write(tty, (const char *) buf, count); | ||
142 | spin_unlock_irqrestore(&srmcons_callback_lock, flags); | ||
143 | |||
144 | return count; | ||
145 | } | ||
146 | |||
147 | static int | ||
148 | srmcons_write_room(struct tty_struct *tty) | ||
149 | { | ||
150 | return 512; | ||
151 | } | ||
152 | |||
153 | static int | ||
154 | srmcons_chars_in_buffer(struct tty_struct *tty) | ||
155 | { | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int | ||
160 | srmcons_get_private_struct(struct srmcons_private **ps) | ||
161 | { | ||
162 | static struct srmcons_private *srmconsp = NULL; | ||
163 | static DEFINE_SPINLOCK(srmconsp_lock); | ||
164 | unsigned long flags; | ||
165 | int retval = 0; | ||
166 | |||
167 | if (srmconsp == NULL) { | ||
168 | spin_lock_irqsave(&srmconsp_lock, flags); | ||
169 | |||
170 | srmconsp = kmalloc(sizeof(*srmconsp), GFP_KERNEL); | ||
171 | if (srmconsp == NULL) | ||
172 | retval = -ENOMEM; | ||
173 | else { | ||
174 | srmconsp->tty = NULL; | ||
175 | spin_lock_init(&srmconsp->lock); | ||
176 | init_timer(&srmconsp->timer); | ||
177 | } | ||
178 | |||
179 | spin_unlock_irqrestore(&srmconsp_lock, flags); | ||
180 | } | ||
181 | |||
182 | *ps = srmconsp; | ||
183 | return retval; | ||
184 | } | ||
185 | |||
186 | static int | ||
187 | srmcons_open(struct tty_struct *tty, struct file *filp) | ||
188 | { | ||
189 | struct srmcons_private *srmconsp; | ||
190 | unsigned long flags; | ||
191 | int retval; | ||
192 | |||
193 | retval = srmcons_get_private_struct(&srmconsp); | ||
194 | if (retval) | ||
195 | return retval; | ||
196 | |||
197 | spin_lock_irqsave(&srmconsp->lock, flags); | ||
198 | |||
199 | if (!srmconsp->tty) { | ||
200 | tty->driver_data = srmconsp; | ||
201 | |||
202 | srmconsp->tty = tty; | ||
203 | srmconsp->timer.function = srmcons_receive_chars; | ||
204 | srmconsp->timer.data = (unsigned long)srmconsp; | ||
205 | srmconsp->timer.expires = jiffies + 10; | ||
206 | add_timer(&srmconsp->timer); | ||
207 | } | ||
208 | |||
209 | spin_unlock_irqrestore(&srmconsp->lock, flags); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static void | ||
215 | srmcons_close(struct tty_struct *tty, struct file *filp) | ||
216 | { | ||
217 | struct srmcons_private *srmconsp = tty->driver_data; | ||
218 | unsigned long flags; | ||
219 | |||
220 | spin_lock_irqsave(&srmconsp->lock, flags); | ||
221 | |||
222 | if (tty->count == 1) { | ||
223 | srmconsp->tty = NULL; | ||
224 | del_timer(&srmconsp->timer); | ||
225 | } | ||
226 | |||
227 | spin_unlock_irqrestore(&srmconsp->lock, flags); | ||
228 | } | ||
229 | |||
230 | |||
231 | static struct tty_driver *srmcons_driver; | ||
232 | |||
233 | static struct tty_operations srmcons_ops = { | ||
234 | .open = srmcons_open, | ||
235 | .close = srmcons_close, | ||
236 | .write = srmcons_write, | ||
237 | .write_room = srmcons_write_room, | ||
238 | .chars_in_buffer= srmcons_chars_in_buffer, | ||
239 | }; | ||
240 | |||
241 | static int __init | ||
242 | srmcons_init(void) | ||
243 | { | ||
244 | if (srm_is_registered_console) { | ||
245 | struct tty_driver *driver; | ||
246 | int err; | ||
247 | |||
248 | driver = alloc_tty_driver(MAX_SRM_CONSOLE_DEVICES); | ||
249 | if (!driver) | ||
250 | return -ENOMEM; | ||
251 | driver->driver_name = "srm"; | ||
252 | driver->name = "srm"; | ||
253 | driver->major = 0; /* dynamic */ | ||
254 | driver->minor_start = 0; | ||
255 | driver->type = TTY_DRIVER_TYPE_SYSTEM; | ||
256 | driver->subtype = SYSTEM_TYPE_SYSCONS; | ||
257 | driver->init_termios = tty_std_termios; | ||
258 | tty_set_operations(driver, &srmcons_ops); | ||
259 | err = tty_register_driver(driver); | ||
260 | if (err) { | ||
261 | put_tty_driver(driver); | ||
262 | return err; | ||
263 | } | ||
264 | srmcons_driver = driver; | ||
265 | } | ||
266 | |||
267 | return -ENODEV; | ||
268 | } | ||
269 | |||
270 | module_init(srmcons_init); | ||
271 | |||
272 | |||
273 | /* | ||
274 | * The console driver | ||
275 | */ | ||
276 | static void | ||
277 | srm_console_write(struct console *co, const char *s, unsigned count) | ||
278 | { | ||
279 | unsigned long flags; | ||
280 | |||
281 | spin_lock_irqsave(&srmcons_callback_lock, flags); | ||
282 | srmcons_do_write(NULL, s, count); | ||
283 | spin_unlock_irqrestore(&srmcons_callback_lock, flags); | ||
284 | } | ||
285 | |||
286 | static struct tty_driver * | ||
287 | srm_console_device(struct console *co, int *index) | ||
288 | { | ||
289 | *index = co->index; | ||
290 | return srmcons_driver; | ||
291 | } | ||
292 | |||
293 | static int __init | ||
294 | srm_console_setup(struct console *co, char *options) | ||
295 | { | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static struct console srmcons = { | ||
300 | .name = "srm", | ||
301 | .write = srm_console_write, | ||
302 | .device = srm_console_device, | ||
303 | .setup = srm_console_setup, | ||
304 | .flags = CON_PRINTBUFFER, | ||
305 | .index = -1, | ||
306 | }; | ||
307 | |||
308 | void __init | ||
309 | register_srm_console(void) | ||
310 | { | ||
311 | if (!srm_is_registered_console) { | ||
312 | callback_open_console(); | ||
313 | register_console(&srmcons); | ||
314 | srm_is_registered_console = 1; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | void __init | ||
319 | unregister_srm_console(void) | ||
320 | { | ||
321 | if (srm_is_registered_console) { | ||
322 | callback_close_console(); | ||
323 | unregister_console(&srmcons); | ||
324 | srm_is_registered_console = 0; | ||
325 | } | ||
326 | } | ||
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c new file mode 100644 index 000000000000..145dcde143ae --- /dev/null +++ b/arch/alpha/kernel/sys_alcor.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_alcor.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the ALCOR and XLT (XL-300/366/433). | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/reboot.h> | ||
19 | #include <linux/bitops.h> | ||
20 | |||
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/dma.h> | ||
25 | #include <asm/mmu_context.h> | ||
26 | #include <asm/irq.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/core_cia.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | |||
31 | #include "proto.h" | ||
32 | #include "irq_impl.h" | ||
33 | #include "pci_impl.h" | ||
34 | #include "machvec_impl.h" | ||
35 | |||
36 | |||
37 | /* Note mask bit is true for ENABLED irqs. */ | ||
38 | static unsigned long cached_irq_mask; | ||
39 | |||
40 | static inline void | ||
41 | alcor_update_irq_hw(unsigned long mask) | ||
42 | { | ||
43 | *(vuip)GRU_INT_MASK = mask; | ||
44 | mb(); | ||
45 | } | ||
46 | |||
47 | static inline void | ||
48 | alcor_enable_irq(unsigned int irq) | ||
49 | { | ||
50 | alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | ||
51 | } | ||
52 | |||
53 | static void | ||
54 | alcor_disable_irq(unsigned int irq) | ||
55 | { | ||
56 | alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | ||
57 | } | ||
58 | |||
59 | static void | ||
60 | alcor_mask_and_ack_irq(unsigned int irq) | ||
61 | { | ||
62 | alcor_disable_irq(irq); | ||
63 | |||
64 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ | ||
65 | *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); | ||
66 | *(vuip)GRU_INT_CLEAR = 0; mb(); | ||
67 | } | ||
68 | |||
69 | static unsigned int | ||
70 | alcor_startup_irq(unsigned int irq) | ||
71 | { | ||
72 | alcor_enable_irq(irq); | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static void | ||
77 | alcor_isa_mask_and_ack_irq(unsigned int irq) | ||
78 | { | ||
79 | i8259a_mask_and_ack_irq(irq); | ||
80 | |||
81 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ | ||
82 | *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); | ||
83 | *(vuip)GRU_INT_CLEAR = 0; mb(); | ||
84 | } | ||
85 | |||
86 | static void | ||
87 | alcor_end_irq(unsigned int irq) | ||
88 | { | ||
89 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
90 | alcor_enable_irq(irq); | ||
91 | } | ||
92 | |||
93 | static struct hw_interrupt_type alcor_irq_type = { | ||
94 | .typename = "ALCOR", | ||
95 | .startup = alcor_startup_irq, | ||
96 | .shutdown = alcor_disable_irq, | ||
97 | .enable = alcor_enable_irq, | ||
98 | .disable = alcor_disable_irq, | ||
99 | .ack = alcor_mask_and_ack_irq, | ||
100 | .end = alcor_end_irq, | ||
101 | }; | ||
102 | |||
103 | static void | ||
104 | alcor_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
105 | { | ||
106 | unsigned long pld; | ||
107 | unsigned int i; | ||
108 | |||
109 | /* Read the interrupt summary register of the GRU */ | ||
110 | pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS; | ||
111 | |||
112 | /* | ||
113 | * Now for every possible bit set, work through them and call | ||
114 | * the appropriate interrupt handler. | ||
115 | */ | ||
116 | while (pld) { | ||
117 | i = ffz(~pld); | ||
118 | pld &= pld - 1; /* clear least bit set */ | ||
119 | if (i == 31) { | ||
120 | isa_device_interrupt(vector, regs); | ||
121 | } else { | ||
122 | handle_irq(16 + i, regs); | ||
123 | } | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static void __init | ||
128 | alcor_init_irq(void) | ||
129 | { | ||
130 | long i; | ||
131 | |||
132 | if (alpha_using_srm) | ||
133 | alpha_mv.device_interrupt = srm_device_interrupt; | ||
134 | |||
135 | *(vuip)GRU_INT_MASK = 0; mb(); /* all disabled */ | ||
136 | *(vuip)GRU_INT_EDGE = 0; mb(); /* all are level */ | ||
137 | *(vuip)GRU_INT_HILO = 0x80000000U; mb(); /* ISA only HI */ | ||
138 | *(vuip)GRU_INT_CLEAR = 0; mb(); /* all clear */ | ||
139 | |||
140 | for (i = 16; i < 48; ++i) { | ||
141 | /* On Alcor, at least, lines 20..30 are not connected | ||
142 | and can generate spurrious interrupts if we turn them | ||
143 | on while IRQ probing. */ | ||
144 | if (i >= 16+20 && i <= 16+30) | ||
145 | continue; | ||
146 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
147 | irq_desc[i].handler = &alcor_irq_type; | ||
148 | } | ||
149 | i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; | ||
150 | |||
151 | init_i8259a_irqs(); | ||
152 | common_init_isa_dma(); | ||
153 | |||
154 | setup_irq(16+31, &isa_cascade_irqaction); | ||
155 | } | ||
156 | |||
157 | |||
158 | /* | ||
159 | * PCI Fixup configuration. | ||
160 | * | ||
161 | * Summary @ GRU_INT_REQ: | ||
162 | * Bit Meaning | ||
163 | * 0 Interrupt Line A from slot 2 | ||
164 | * 1 Interrupt Line B from slot 2 | ||
165 | * 2 Interrupt Line C from slot 2 | ||
166 | * 3 Interrupt Line D from slot 2 | ||
167 | * 4 Interrupt Line A from slot 1 | ||
168 | * 5 Interrupt line B from slot 1 | ||
169 | * 6 Interrupt Line C from slot 1 | ||
170 | * 7 Interrupt Line D from slot 1 | ||
171 | * 8 Interrupt Line A from slot 0 | ||
172 | * 9 Interrupt Line B from slot 0 | ||
173 | *10 Interrupt Line C from slot 0 | ||
174 | *11 Interrupt Line D from slot 0 | ||
175 | *12 Interrupt Line A from slot 4 | ||
176 | *13 Interrupt Line B from slot 4 | ||
177 | *14 Interrupt Line C from slot 4 | ||
178 | *15 Interrupt Line D from slot 4 | ||
179 | *16 Interrupt Line D from slot 3 | ||
180 | *17 Interrupt Line D from slot 3 | ||
181 | *18 Interrupt Line D from slot 3 | ||
182 | *19 Interrupt Line D from slot 3 | ||
183 | *20-30 Reserved | ||
184 | *31 EISA interrupt | ||
185 | * | ||
186 | * The device to slot mapping looks like: | ||
187 | * | ||
188 | * Slot Device | ||
189 | * 6 built-in TULIP (XLT only) | ||
190 | * 7 PCI on board slot 0 | ||
191 | * 8 PCI on board slot 3 | ||
192 | * 9 PCI on board slot 4 | ||
193 | * 10 PCEB (PCI-EISA bridge) | ||
194 | * 11 PCI on board slot 2 | ||
195 | * 12 PCI on board slot 1 | ||
196 | * | ||
197 | * | ||
198 | * This two layered interrupt approach means that we allocate IRQ 16 and | ||
199 | * above for PCI interrupts. The IRQ relates to which bit the interrupt | ||
200 | * comes in on. This makes interrupt processing much easier. | ||
201 | */ | ||
202 | |||
203 | static int __init | ||
204 | alcor_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
205 | { | ||
206 | static char irq_tab[7][5] __initdata = { | ||
207 | /*INT INTA INTB INTC INTD */ | ||
208 | /* note: IDSEL 17 is XLT only */ | ||
209 | {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ | ||
210 | { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */ | ||
211 | {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */ | ||
212 | {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */ | ||
213 | { -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */ | ||
214 | { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */ | ||
215 | { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ | ||
216 | }; | ||
217 | const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5; | ||
218 | return COMMON_TABLE_LOOKUP; | ||
219 | } | ||
220 | |||
221 | static void | ||
222 | alcor_kill_arch(int mode) | ||
223 | { | ||
224 | cia_kill_arch(mode); | ||
225 | |||
226 | #ifndef ALPHA_RESTORE_SRM_SETUP | ||
227 | switch(mode) { | ||
228 | case LINUX_REBOOT_CMD_RESTART: | ||
229 | /* Who said DEC engineer's have no sense of humor? ;-) */ | ||
230 | if (alpha_using_srm) { | ||
231 | *(vuip) GRU_RESET = 0x0000dead; | ||
232 | mb(); | ||
233 | } | ||
234 | break; | ||
235 | case LINUX_REBOOT_CMD_HALT: | ||
236 | break; | ||
237 | case LINUX_REBOOT_CMD_POWER_OFF: | ||
238 | break; | ||
239 | } | ||
240 | |||
241 | halt(); | ||
242 | #endif | ||
243 | } | ||
244 | |||
245 | static void __init | ||
246 | alcor_init_pci(void) | ||
247 | { | ||
248 | struct pci_dev *dev; | ||
249 | |||
250 | cia_init_pci(); | ||
251 | |||
252 | /* | ||
253 | * Now we can look to see if we are really running on an XLT-type | ||
254 | * motherboard, by looking for a 21040 TULIP in slot 6, which is | ||
255 | * built into XLT and BRET/MAVERICK, but not available on ALCOR. | ||
256 | */ | ||
257 | dev = pci_find_device(PCI_VENDOR_ID_DEC, | ||
258 | PCI_DEVICE_ID_DEC_TULIP, | ||
259 | NULL); | ||
260 | if (dev && dev->devfn == PCI_DEVFN(6,0)) { | ||
261 | alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS; | ||
262 | printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n", | ||
263 | __FUNCTION__); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | |||
268 | /* | ||
269 | * The System Vectors | ||
270 | */ | ||
271 | |||
272 | struct alpha_machine_vector alcor_mv __initmv = { | ||
273 | .vector_name = "Alcor", | ||
274 | DO_EV5_MMU, | ||
275 | DO_DEFAULT_RTC, | ||
276 | DO_CIA_IO, | ||
277 | .machine_check = cia_machine_check, | ||
278 | .max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS, | ||
279 | .min_io_address = EISA_DEFAULT_IO_BASE, | ||
280 | .min_mem_address = CIA_DEFAULT_MEM_BASE, | ||
281 | |||
282 | .nr_irqs = 48, | ||
283 | .device_interrupt = alcor_device_interrupt, | ||
284 | |||
285 | .init_arch = cia_init_arch, | ||
286 | .init_irq = alcor_init_irq, | ||
287 | .init_rtc = common_init_rtc, | ||
288 | .init_pci = alcor_init_pci, | ||
289 | .kill_arch = alcor_kill_arch, | ||
290 | .pci_map_irq = alcor_map_irq, | ||
291 | .pci_swizzle = common_swizzle, | ||
292 | |||
293 | .sys = { .cia = { | ||
294 | .gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS | ||
295 | }} | ||
296 | }; | ||
297 | ALIAS_MV(alcor) | ||
298 | |||
299 | struct alpha_machine_vector xlt_mv __initmv = { | ||
300 | .vector_name = "XLT", | ||
301 | DO_EV5_MMU, | ||
302 | DO_DEFAULT_RTC, | ||
303 | DO_CIA_IO, | ||
304 | .machine_check = cia_machine_check, | ||
305 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
306 | .min_io_address = EISA_DEFAULT_IO_BASE, | ||
307 | .min_mem_address = CIA_DEFAULT_MEM_BASE, | ||
308 | |||
309 | .nr_irqs = 48, | ||
310 | .device_interrupt = alcor_device_interrupt, | ||
311 | |||
312 | .init_arch = cia_init_arch, | ||
313 | .init_irq = alcor_init_irq, | ||
314 | .init_rtc = common_init_rtc, | ||
315 | .init_pci = alcor_init_pci, | ||
316 | .kill_arch = alcor_kill_arch, | ||
317 | .pci_map_irq = alcor_map_irq, | ||
318 | .pci_swizzle = common_swizzle, | ||
319 | |||
320 | .sys = { .cia = { | ||
321 | .gru_int_req_bits = XLT_GRU_INT_REQ_BITS | ||
322 | }} | ||
323 | }; | ||
324 | |||
325 | /* No alpha_mv alias for XLT, since we compile it in unconditionally | ||
326 | with ALCOR; setup_arch knows how to cope. */ | ||
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c new file mode 100644 index 000000000000..8e3374d34c95 --- /dev/null +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
@@ -0,0 +1,448 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_cabriolet.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999, 2000 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the Cabriolet (AlphaPC64), EB66+, and EB164, | ||
9 | * PC164 and LX164. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/bitops.h> | ||
20 | |||
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <asm/dma.h> | ||
24 | #include <asm/irq.h> | ||
25 | #include <asm/mmu_context.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/core_apecs.h> | ||
29 | #include <asm/core_cia.h> | ||
30 | #include <asm/core_lca.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | |||
33 | #include "proto.h" | ||
34 | #include "irq_impl.h" | ||
35 | #include "pci_impl.h" | ||
36 | #include "machvec_impl.h" | ||
37 | |||
38 | |||
39 | /* Note mask bit is true for DISABLED irqs. */ | ||
40 | static unsigned long cached_irq_mask = ~0UL; | ||
41 | |||
42 | static inline void | ||
43 | cabriolet_update_irq_hw(unsigned int irq, unsigned long mask) | ||
44 | { | ||
45 | int ofs = (irq - 16) / 8; | ||
46 | outb(mask >> (16 + ofs * 8), 0x804 + ofs); | ||
47 | } | ||
48 | |||
49 | static inline void | ||
50 | cabriolet_enable_irq(unsigned int irq) | ||
51 | { | ||
52 | cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); | ||
53 | } | ||
54 | |||
55 | static void | ||
56 | cabriolet_disable_irq(unsigned int irq) | ||
57 | { | ||
58 | cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); | ||
59 | } | ||
60 | |||
61 | static unsigned int | ||
62 | cabriolet_startup_irq(unsigned int irq) | ||
63 | { | ||
64 | cabriolet_enable_irq(irq); | ||
65 | return 0; /* never anything pending */ | ||
66 | } | ||
67 | |||
68 | static void | ||
69 | cabriolet_end_irq(unsigned int irq) | ||
70 | { | ||
71 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
72 | cabriolet_enable_irq(irq); | ||
73 | } | ||
74 | |||
75 | static struct hw_interrupt_type cabriolet_irq_type = { | ||
76 | .typename = "CABRIOLET", | ||
77 | .startup = cabriolet_startup_irq, | ||
78 | .shutdown = cabriolet_disable_irq, | ||
79 | .enable = cabriolet_enable_irq, | ||
80 | .disable = cabriolet_disable_irq, | ||
81 | .ack = cabriolet_disable_irq, | ||
82 | .end = cabriolet_end_irq, | ||
83 | }; | ||
84 | |||
85 | static void | ||
86 | cabriolet_device_interrupt(unsigned long v, struct pt_regs *r) | ||
87 | { | ||
88 | unsigned long pld; | ||
89 | unsigned int i; | ||
90 | |||
91 | /* Read the interrupt summary registers */ | ||
92 | pld = inb(0x804) | (inb(0x805) << 8) | (inb(0x806) << 16); | ||
93 | |||
94 | /* | ||
95 | * Now for every possible bit set, work through them and call | ||
96 | * the appropriate interrupt handler. | ||
97 | */ | ||
98 | while (pld) { | ||
99 | i = ffz(~pld); | ||
100 | pld &= pld - 1; /* clear least bit set */ | ||
101 | if (i == 4) { | ||
102 | isa_device_interrupt(v, r); | ||
103 | } else { | ||
104 | handle_irq(16 + i, r); | ||
105 | } | ||
106 | } | ||
107 | } | ||
108 | |||
109 | static void __init | ||
110 | common_init_irq(void (*srm_dev_int)(unsigned long v, struct pt_regs *r)) | ||
111 | { | ||
112 | init_i8259a_irqs(); | ||
113 | |||
114 | if (alpha_using_srm) { | ||
115 | alpha_mv.device_interrupt = srm_dev_int; | ||
116 | init_srm_irqs(35, 0); | ||
117 | } | ||
118 | else { | ||
119 | long i; | ||
120 | |||
121 | outb(0xff, 0x804); | ||
122 | outb(0xff, 0x805); | ||
123 | outb(0xff, 0x806); | ||
124 | |||
125 | for (i = 16; i < 35; ++i) { | ||
126 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
127 | irq_desc[i].handler = &cabriolet_irq_type; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | common_init_isa_dma(); | ||
132 | setup_irq(16+4, &isa_cascade_irqaction); | ||
133 | } | ||
134 | |||
135 | #ifndef CONFIG_ALPHA_PC164 | ||
136 | static void __init | ||
137 | cabriolet_init_irq(void) | ||
138 | { | ||
139 | common_init_irq(srm_device_interrupt); | ||
140 | } | ||
141 | #endif | ||
142 | |||
143 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164) | ||
144 | /* In theory, the PC164 has the same interrupt hardware as the other | ||
145 | Cabriolet based systems. However, something got screwed up late | ||
146 | in the development cycle which broke the interrupt masking hardware. | ||
147 | Repeat, it is not possible to mask and ack interrupts. At all. | ||
148 | |||
149 | In an attempt to work around this, while processing interrupts, | ||
150 | we do not allow the IPL to drop below what it is currently. This | ||
151 | prevents the possibility of recursion. | ||
152 | |||
153 | ??? Another option might be to force all PCI devices to use edge | ||
154 | triggered rather than level triggered interrupts. That might be | ||
155 | too invasive though. */ | ||
156 | |||
157 | static void | ||
158 | pc164_srm_device_interrupt(unsigned long v, struct pt_regs *r) | ||
159 | { | ||
160 | __min_ipl = getipl(); | ||
161 | srm_device_interrupt(v, r); | ||
162 | __min_ipl = 0; | ||
163 | } | ||
164 | |||
165 | static void | ||
166 | pc164_device_interrupt(unsigned long v, struct pt_regs *r) | ||
167 | { | ||
168 | __min_ipl = getipl(); | ||
169 | cabriolet_device_interrupt(v, r); | ||
170 | __min_ipl = 0; | ||
171 | } | ||
172 | |||
173 | static void __init | ||
174 | pc164_init_irq(void) | ||
175 | { | ||
176 | common_init_irq(pc164_srm_device_interrupt); | ||
177 | } | ||
178 | #endif | ||
179 | |||
180 | /* | ||
181 | * The EB66+ is very similar to the EB66 except that it does not have | ||
182 | * the on-board NCR and Tulip chips. In the code below, I have used | ||
183 | * slot number to refer to the id select line and *not* the slot | ||
184 | * number used in the EB66+ documentation. However, in the table, | ||
185 | * I've given the slot number, the id select line and the Jxx number | ||
186 | * that's printed on the board. The interrupt pins from the PCI slots | ||
187 | * are wired into 3 interrupt summary registers at 0x804, 0x805 and | ||
188 | * 0x806 ISA. | ||
189 | * | ||
190 | * In the table, -1 means don't assign an IRQ number. This is usually | ||
191 | * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. | ||
192 | */ | ||
193 | |||
194 | static inline int __init | ||
195 | eb66p_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
196 | { | ||
197 | static char irq_tab[5][5] __initdata = { | ||
198 | /*INT INTA INTB INTC INTD */ | ||
199 | {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */ | ||
200 | {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */ | ||
201 | { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ | ||
202 | {16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 9, slot 2, J27 */ | ||
203 | {16+3, 16+3, 16+8, 16+12, 16+6} /* IdSel 10, slot 3, J28 */ | ||
204 | }; | ||
205 | const long min_idsel = 6, max_idsel = 10, irqs_per_slot = 5; | ||
206 | return COMMON_TABLE_LOOKUP; | ||
207 | } | ||
208 | |||
209 | |||
210 | /* | ||
211 | * The AlphaPC64 is very similar to the EB66+ except that its slots | ||
212 | * are numbered differently. In the code below, I have used slot | ||
213 | * number to refer to the id select line and *not* the slot number | ||
214 | * used in the AlphaPC64 documentation. However, in the table, I've | ||
215 | * given the slot number, the id select line and the Jxx number that's | ||
216 | * printed on the board. The interrupt pins from the PCI slots are | ||
217 | * wired into 3 interrupt summary registers at 0x804, 0x805 and 0x806 | ||
218 | * ISA. | ||
219 | * | ||
220 | * In the table, -1 means don't assign an IRQ number. This is usually | ||
221 | * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. | ||
222 | */ | ||
223 | |||
224 | static inline int __init | ||
225 | cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
226 | { | ||
227 | static char irq_tab[5][5] __initdata = { | ||
228 | /*INT INTA INTB INTC INTD */ | ||
229 | { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */ | ||
230 | { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */ | ||
231 | { 16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J20 */ | ||
232 | { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ | ||
233 | { 16+3, 16+3, 16+8, 16+12, 16+16} /* IdSel 9, slot 3, J22 */ | ||
234 | }; | ||
235 | const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; | ||
236 | return COMMON_TABLE_LOOKUP; | ||
237 | } | ||
238 | |||
239 | static inline void __init | ||
240 | cabriolet_init_pci(void) | ||
241 | { | ||
242 | common_init_pci(); | ||
243 | ns87312_enable_ide(0x398); | ||
244 | } | ||
245 | |||
246 | static inline void __init | ||
247 | cia_cab_init_pci(void) | ||
248 | { | ||
249 | cia_init_pci(); | ||
250 | ns87312_enable_ide(0x398); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * The PC164 and LX164 have 19 PCI interrupts, four from each of the four | ||
255 | * PCI slots, the SIO, PCI/IDE, and USB. | ||
256 | * | ||
257 | * Each of the interrupts can be individually masked. This is | ||
258 | * accomplished by setting the appropriate bit in the mask register. | ||
259 | * A bit is set by writing a "1" to the desired position in the mask | ||
260 | * register and cleared by writing a "0". There are 3 mask registers | ||
261 | * located at ISA address 804h, 805h and 806h. | ||
262 | * | ||
263 | * An I/O read at ISA address 804h, 805h, 806h will return the | ||
264 | * state of the 11 PCI interrupts and not the state of the MASKED | ||
265 | * interrupts. | ||
266 | * | ||
267 | * Note: A write to I/O 804h, 805h, and 806h the mask register will be | ||
268 | * updated. | ||
269 | * | ||
270 | * | ||
271 | * ISA DATA<7:0> | ||
272 | * ISA +--------------------------------------------------------------+ | ||
273 | * ADDRESS | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | | ||
274 | * +==============================================================+ | ||
275 | * 0x804 | INTB0 | USB | IDE | SIO | INTA3 |INTA2 | INTA1 | INTA0 | | ||
276 | * +--------------------------------------------------------------+ | ||
277 | * 0x805 | INTD0 | INTC3 | INTC2 | INTC1 | INTC0 |INTB3 | INTB2 | INTB1 | | ||
278 | * +--------------------------------------------------------------+ | ||
279 | * 0x806 | Rsrv | Rsrv | Rsrv | Rsrv | Rsrv |INTD3 | INTD2 | INTD1 | | ||
280 | * +--------------------------------------------------------------+ | ||
281 | * * Rsrv = reserved bits | ||
282 | * Note: The mask register is write-only. | ||
283 | * | ||
284 | * IdSel | ||
285 | * 5 32 bit PCI option slot 2 | ||
286 | * 6 64 bit PCI option slot 0 | ||
287 | * 7 64 bit PCI option slot 1 | ||
288 | * 8 Saturn I/O | ||
289 | * 9 32 bit PCI option slot 3 | ||
290 | * 10 USB | ||
291 | * 11 IDE | ||
292 | * | ||
293 | */ | ||
294 | |||
295 | static inline int __init | ||
296 | alphapc164_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
297 | { | ||
298 | static char irq_tab[7][5] __initdata = { | ||
299 | /*INT INTA INTB INTC INTD */ | ||
300 | { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */ | ||
301 | { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */ | ||
302 | { 16+1, 16+1, 16+8, 16+12, 16+16}, /* IdSel 7, slot 1, J26 */ | ||
303 | { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ | ||
304 | { 16+3, 16+3, 16+10, 16+14, 16+18}, /* IdSel 9, slot 3, J19 */ | ||
305 | { 16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 10, USB */ | ||
306 | { 16+5, 16+5, 16+5, 16+5, 16+5} /* IdSel 11, IDE */ | ||
307 | }; | ||
308 | const long min_idsel = 5, max_idsel = 11, irqs_per_slot = 5; | ||
309 | return COMMON_TABLE_LOOKUP; | ||
310 | } | ||
311 | |||
312 | static inline void __init | ||
313 | alphapc164_init_pci(void) | ||
314 | { | ||
315 | cia_init_pci(); | ||
316 | SMC93x_Init(); | ||
317 | } | ||
318 | |||
319 | |||
320 | /* | ||
321 | * The System Vector | ||
322 | */ | ||
323 | |||
324 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET) | ||
325 | struct alpha_machine_vector cabriolet_mv __initmv = { | ||
326 | .vector_name = "Cabriolet", | ||
327 | DO_EV4_MMU, | ||
328 | DO_DEFAULT_RTC, | ||
329 | DO_APECS_IO, | ||
330 | .machine_check = apecs_machine_check, | ||
331 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
332 | .min_io_address = DEFAULT_IO_BASE, | ||
333 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
334 | |||
335 | .nr_irqs = 35, | ||
336 | .device_interrupt = cabriolet_device_interrupt, | ||
337 | |||
338 | .init_arch = apecs_init_arch, | ||
339 | .init_irq = cabriolet_init_irq, | ||
340 | .init_rtc = common_init_rtc, | ||
341 | .init_pci = cabriolet_init_pci, | ||
342 | .pci_map_irq = cabriolet_map_irq, | ||
343 | .pci_swizzle = common_swizzle, | ||
344 | }; | ||
345 | #ifndef CONFIG_ALPHA_EB64P | ||
346 | ALIAS_MV(cabriolet) | ||
347 | #endif | ||
348 | #endif | ||
349 | |||
350 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB164) | ||
351 | struct alpha_machine_vector eb164_mv __initmv = { | ||
352 | .vector_name = "EB164", | ||
353 | DO_EV5_MMU, | ||
354 | DO_DEFAULT_RTC, | ||
355 | DO_CIA_IO, | ||
356 | .machine_check = cia_machine_check, | ||
357 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
358 | .min_io_address = DEFAULT_IO_BASE, | ||
359 | .min_mem_address = CIA_DEFAULT_MEM_BASE, | ||
360 | |||
361 | .nr_irqs = 35, | ||
362 | .device_interrupt = cabriolet_device_interrupt, | ||
363 | |||
364 | .init_arch = cia_init_arch, | ||
365 | .init_irq = cabriolet_init_irq, | ||
366 | .init_rtc = common_init_rtc, | ||
367 | .init_pci = cia_cab_init_pci, | ||
368 | .kill_arch = cia_kill_arch, | ||
369 | .pci_map_irq = cabriolet_map_irq, | ||
370 | .pci_swizzle = common_swizzle, | ||
371 | }; | ||
372 | ALIAS_MV(eb164) | ||
373 | #endif | ||
374 | |||
375 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66P) | ||
376 | struct alpha_machine_vector eb66p_mv __initmv = { | ||
377 | .vector_name = "EB66+", | ||
378 | DO_EV4_MMU, | ||
379 | DO_DEFAULT_RTC, | ||
380 | DO_LCA_IO, | ||
381 | .machine_check = lca_machine_check, | ||
382 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
383 | .min_io_address = DEFAULT_IO_BASE, | ||
384 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
385 | |||
386 | .nr_irqs = 35, | ||
387 | .device_interrupt = cabriolet_device_interrupt, | ||
388 | |||
389 | .init_arch = lca_init_arch, | ||
390 | .init_irq = cabriolet_init_irq, | ||
391 | .init_rtc = common_init_rtc, | ||
392 | .init_pci = cabriolet_init_pci, | ||
393 | .pci_map_irq = eb66p_map_irq, | ||
394 | .pci_swizzle = common_swizzle, | ||
395 | }; | ||
396 | ALIAS_MV(eb66p) | ||
397 | #endif | ||
398 | |||
399 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LX164) | ||
400 | struct alpha_machine_vector lx164_mv __initmv = { | ||
401 | .vector_name = "LX164", | ||
402 | DO_EV5_MMU, | ||
403 | DO_DEFAULT_RTC, | ||
404 | DO_PYXIS_IO, | ||
405 | .machine_check = cia_machine_check, | ||
406 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
407 | .min_io_address = DEFAULT_IO_BASE, | ||
408 | .min_mem_address = DEFAULT_MEM_BASE, | ||
409 | .pci_dac_offset = PYXIS_DAC_OFFSET, | ||
410 | |||
411 | .nr_irqs = 35, | ||
412 | .device_interrupt = cabriolet_device_interrupt, | ||
413 | |||
414 | .init_arch = pyxis_init_arch, | ||
415 | .init_irq = cabriolet_init_irq, | ||
416 | .init_rtc = common_init_rtc, | ||
417 | .init_pci = alphapc164_init_pci, | ||
418 | .kill_arch = cia_kill_arch, | ||
419 | .pci_map_irq = alphapc164_map_irq, | ||
420 | .pci_swizzle = common_swizzle, | ||
421 | }; | ||
422 | ALIAS_MV(lx164) | ||
423 | #endif | ||
424 | |||
425 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164) | ||
426 | struct alpha_machine_vector pc164_mv __initmv = { | ||
427 | .vector_name = "PC164", | ||
428 | DO_EV5_MMU, | ||
429 | DO_DEFAULT_RTC, | ||
430 | DO_CIA_IO, | ||
431 | .machine_check = cia_machine_check, | ||
432 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
433 | .min_io_address = DEFAULT_IO_BASE, | ||
434 | .min_mem_address = CIA_DEFAULT_MEM_BASE, | ||
435 | |||
436 | .nr_irqs = 35, | ||
437 | .device_interrupt = pc164_device_interrupt, | ||
438 | |||
439 | .init_arch = cia_init_arch, | ||
440 | .init_irq = pc164_init_irq, | ||
441 | .init_rtc = common_init_rtc, | ||
442 | .init_pci = alphapc164_init_pci, | ||
443 | .kill_arch = cia_kill_arch, | ||
444 | .pci_map_irq = alphapc164_map_irq, | ||
445 | .pci_swizzle = common_swizzle, | ||
446 | }; | ||
447 | ALIAS_MV(pc164) | ||
448 | #endif | ||
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c new file mode 100644 index 000000000000..9e36b07fa940 --- /dev/null +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -0,0 +1,689 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_dp264.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996, 1999 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Modified by Christopher C. Chimelis, 2001 to | ||
9 | * add support for the addition of Shark to the | ||
10 | * Tsunami family. | ||
11 | * | ||
12 | * Code supporting the DP264 (EV6+TSUNAMI). | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/bitops.h> | ||
23 | |||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/system.h> | ||
26 | #include <asm/dma.h> | ||
27 | #include <asm/irq.h> | ||
28 | #include <asm/mmu_context.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/core_tsunami.h> | ||
32 | #include <asm/hwrpb.h> | ||
33 | #include <asm/tlbflush.h> | ||
34 | |||
35 | #include "proto.h" | ||
36 | #include "irq_impl.h" | ||
37 | #include "pci_impl.h" | ||
38 | #include "machvec_impl.h" | ||
39 | |||
40 | |||
41 | /* Note mask bit is true for ENABLED irqs. */ | ||
42 | static unsigned long cached_irq_mask; | ||
43 | /* dp264 boards handle at max four CPUs */ | ||
44 | static unsigned long cpu_irq_affinity[4] = { 0UL, 0UL, 0UL, 0UL }; | ||
45 | |||
46 | DEFINE_SPINLOCK(dp264_irq_lock); | ||
47 | |||
48 | static void | ||
49 | tsunami_update_irq_hw(unsigned long mask) | ||
50 | { | ||
51 | register tsunami_cchip *cchip = TSUNAMI_cchip; | ||
52 | unsigned long isa_enable = 1UL << 55; | ||
53 | register int bcpu = boot_cpuid; | ||
54 | |||
55 | #ifdef CONFIG_SMP | ||
56 | volatile unsigned long *dim0, *dim1, *dim2, *dim3; | ||
57 | unsigned long mask0, mask1, mask2, mask3, dummy; | ||
58 | |||
59 | mask &= ~isa_enable; | ||
60 | mask0 = mask & cpu_irq_affinity[0]; | ||
61 | mask1 = mask & cpu_irq_affinity[1]; | ||
62 | mask2 = mask & cpu_irq_affinity[2]; | ||
63 | mask3 = mask & cpu_irq_affinity[3]; | ||
64 | |||
65 | if (bcpu == 0) mask0 |= isa_enable; | ||
66 | else if (bcpu == 1) mask1 |= isa_enable; | ||
67 | else if (bcpu == 2) mask2 |= isa_enable; | ||
68 | else mask3 |= isa_enable; | ||
69 | |||
70 | dim0 = &cchip->dim0.csr; | ||
71 | dim1 = &cchip->dim1.csr; | ||
72 | dim2 = &cchip->dim2.csr; | ||
73 | dim3 = &cchip->dim3.csr; | ||
74 | if (!cpu_possible(0)) dim0 = &dummy; | ||
75 | if (!cpu_possible(1)) dim1 = &dummy; | ||
76 | if (!cpu_possible(2)) dim2 = &dummy; | ||
77 | if (!cpu_possible(3)) dim3 = &dummy; | ||
78 | |||
79 | *dim0 = mask0; | ||
80 | *dim1 = mask1; | ||
81 | *dim2 = mask2; | ||
82 | *dim3 = mask3; | ||
83 | mb(); | ||
84 | *dim0; | ||
85 | *dim1; | ||
86 | *dim2; | ||
87 | *dim3; | ||
88 | #else | ||
89 | volatile unsigned long *dimB; | ||
90 | if (bcpu == 0) dimB = &cchip->dim0.csr; | ||
91 | else if (bcpu == 1) dimB = &cchip->dim1.csr; | ||
92 | else if (bcpu == 2) dimB = &cchip->dim2.csr; | ||
93 | else dimB = &cchip->dim3.csr; | ||
94 | |||
95 | *dimB = mask | isa_enable; | ||
96 | mb(); | ||
97 | *dimB; | ||
98 | #endif | ||
99 | } | ||
100 | |||
101 | static void | ||
102 | dp264_enable_irq(unsigned int irq) | ||
103 | { | ||
104 | spin_lock(&dp264_irq_lock); | ||
105 | cached_irq_mask |= 1UL << irq; | ||
106 | tsunami_update_irq_hw(cached_irq_mask); | ||
107 | spin_unlock(&dp264_irq_lock); | ||
108 | } | ||
109 | |||
110 | static void | ||
111 | dp264_disable_irq(unsigned int irq) | ||
112 | { | ||
113 | spin_lock(&dp264_irq_lock); | ||
114 | cached_irq_mask &= ~(1UL << irq); | ||
115 | tsunami_update_irq_hw(cached_irq_mask); | ||
116 | spin_unlock(&dp264_irq_lock); | ||
117 | } | ||
118 | |||
119 | static unsigned int | ||
120 | dp264_startup_irq(unsigned int irq) | ||
121 | { | ||
122 | dp264_enable_irq(irq); | ||
123 | return 0; /* never anything pending */ | ||
124 | } | ||
125 | |||
126 | static void | ||
127 | dp264_end_irq(unsigned int irq) | ||
128 | { | ||
129 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
130 | dp264_enable_irq(irq); | ||
131 | } | ||
132 | |||
133 | static void | ||
134 | clipper_enable_irq(unsigned int irq) | ||
135 | { | ||
136 | spin_lock(&dp264_irq_lock); | ||
137 | cached_irq_mask |= 1UL << (irq - 16); | ||
138 | tsunami_update_irq_hw(cached_irq_mask); | ||
139 | spin_unlock(&dp264_irq_lock); | ||
140 | } | ||
141 | |||
142 | static void | ||
143 | clipper_disable_irq(unsigned int irq) | ||
144 | { | ||
145 | spin_lock(&dp264_irq_lock); | ||
146 | cached_irq_mask &= ~(1UL << (irq - 16)); | ||
147 | tsunami_update_irq_hw(cached_irq_mask); | ||
148 | spin_unlock(&dp264_irq_lock); | ||
149 | } | ||
150 | |||
151 | static unsigned int | ||
152 | clipper_startup_irq(unsigned int irq) | ||
153 | { | ||
154 | clipper_enable_irq(irq); | ||
155 | return 0; /* never anything pending */ | ||
156 | } | ||
157 | |||
158 | static void | ||
159 | clipper_end_irq(unsigned int irq) | ||
160 | { | ||
161 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
162 | clipper_enable_irq(irq); | ||
163 | } | ||
164 | |||
165 | static void | ||
166 | cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | ||
167 | { | ||
168 | int cpu; | ||
169 | |||
170 | for (cpu = 0; cpu < 4; cpu++) { | ||
171 | unsigned long aff = cpu_irq_affinity[cpu]; | ||
172 | if (cpu_isset(cpu, affinity)) | ||
173 | aff |= 1UL << irq; | ||
174 | else | ||
175 | aff &= ~(1UL << irq); | ||
176 | cpu_irq_affinity[cpu] = aff; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static void | ||
181 | dp264_set_affinity(unsigned int irq, cpumask_t affinity) | ||
182 | { | ||
183 | spin_lock(&dp264_irq_lock); | ||
184 | cpu_set_irq_affinity(irq, affinity); | ||
185 | tsunami_update_irq_hw(cached_irq_mask); | ||
186 | spin_unlock(&dp264_irq_lock); | ||
187 | } | ||
188 | |||
189 | static void | ||
190 | clipper_set_affinity(unsigned int irq, cpumask_t affinity) | ||
191 | { | ||
192 | spin_lock(&dp264_irq_lock); | ||
193 | cpu_set_irq_affinity(irq - 16, affinity); | ||
194 | tsunami_update_irq_hw(cached_irq_mask); | ||
195 | spin_unlock(&dp264_irq_lock); | ||
196 | } | ||
197 | |||
198 | static struct hw_interrupt_type dp264_irq_type = { | ||
199 | .typename = "DP264", | ||
200 | .startup = dp264_startup_irq, | ||
201 | .shutdown = dp264_disable_irq, | ||
202 | .enable = dp264_enable_irq, | ||
203 | .disable = dp264_disable_irq, | ||
204 | .ack = dp264_disable_irq, | ||
205 | .end = dp264_end_irq, | ||
206 | .set_affinity = dp264_set_affinity, | ||
207 | }; | ||
208 | |||
209 | static struct hw_interrupt_type clipper_irq_type = { | ||
210 | .typename = "CLIPPER", | ||
211 | .startup = clipper_startup_irq, | ||
212 | .shutdown = clipper_disable_irq, | ||
213 | .enable = clipper_enable_irq, | ||
214 | .disable = clipper_disable_irq, | ||
215 | .ack = clipper_disable_irq, | ||
216 | .end = clipper_end_irq, | ||
217 | .set_affinity = clipper_set_affinity, | ||
218 | }; | ||
219 | |||
220 | static void | ||
221 | dp264_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
222 | { | ||
223 | #if 1 | ||
224 | printk("dp264_device_interrupt: NOT IMPLEMENTED YET!! \n"); | ||
225 | #else | ||
226 | unsigned long pld; | ||
227 | unsigned int i; | ||
228 | |||
229 | /* Read the interrupt summary register of TSUNAMI */ | ||
230 | pld = TSUNAMI_cchip->dir0.csr; | ||
231 | |||
232 | /* | ||
233 | * Now for every possible bit set, work through them and call | ||
234 | * the appropriate interrupt handler. | ||
235 | */ | ||
236 | while (pld) { | ||
237 | i = ffz(~pld); | ||
238 | pld &= pld - 1; /* clear least bit set */ | ||
239 | if (i == 55) | ||
240 | isa_device_interrupt(vector, regs); | ||
241 | else | ||
242 | handle_irq(16 + i, 16 + i, regs); | ||
243 | #if 0 | ||
244 | TSUNAMI_cchip->dir0.csr = 1UL << i; mb(); | ||
245 | tmp = TSUNAMI_cchip->dir0.csr; | ||
246 | #endif | ||
247 | } | ||
248 | #endif | ||
249 | } | ||
250 | |||
251 | static void | ||
252 | dp264_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
253 | { | ||
254 | int irq; | ||
255 | |||
256 | irq = (vector - 0x800) >> 4; | ||
257 | |||
258 | /* | ||
259 | * The SRM console reports PCI interrupts with a vector calculated by: | ||
260 | * | ||
261 | * 0x900 + (0x10 * DRIR-bit) | ||
262 | * | ||
263 | * So bit 16 shows up as IRQ 32, etc. | ||
264 | * | ||
265 | * On DP264/BRICK/MONET, we adjust it down by 16 because at least | ||
266 | * that many of the low order bits of the DRIR are not used, and | ||
267 | * so we don't count them. | ||
268 | */ | ||
269 | if (irq >= 32) | ||
270 | irq -= 16; | ||
271 | |||
272 | handle_irq(irq, regs); | ||
273 | } | ||
274 | |||
275 | static void | ||
276 | clipper_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
277 | { | ||
278 | int irq; | ||
279 | |||
280 | irq = (vector - 0x800) >> 4; | ||
281 | |||
282 | /* | ||
283 | * The SRM console reports PCI interrupts with a vector calculated by: | ||
284 | * | ||
285 | * 0x900 + (0x10 * DRIR-bit) | ||
286 | * | ||
287 | * So bit 16 shows up as IRQ 32, etc. | ||
288 | * | ||
289 | * CLIPPER uses bits 8-47 for PCI interrupts, so we do not need | ||
290 | * to scale down the vector reported, we just use it. | ||
291 | * | ||
292 | * Eg IRQ 24 is DRIR bit 8, etc, etc | ||
293 | */ | ||
294 | handle_irq(irq, regs); | ||
295 | } | ||
296 | |||
297 | static void __init | ||
298 | init_tsunami_irqs(struct hw_interrupt_type * ops, int imin, int imax) | ||
299 | { | ||
300 | long i; | ||
301 | for (i = imin; i <= imax; ++i) { | ||
302 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
303 | irq_desc[i].handler = ops; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static void __init | ||
308 | dp264_init_irq(void) | ||
309 | { | ||
310 | outb(0, DMA1_RESET_REG); | ||
311 | outb(0, DMA2_RESET_REG); | ||
312 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG); | ||
313 | outb(0, DMA2_MASK_REG); | ||
314 | |||
315 | if (alpha_using_srm) | ||
316 | alpha_mv.device_interrupt = dp264_srm_device_interrupt; | ||
317 | |||
318 | tsunami_update_irq_hw(0); | ||
319 | |||
320 | init_i8259a_irqs(); | ||
321 | init_tsunami_irqs(&dp264_irq_type, 16, 47); | ||
322 | } | ||
323 | |||
324 | static void __init | ||
325 | clipper_init_irq(void) | ||
326 | { | ||
327 | outb(0, DMA1_RESET_REG); | ||
328 | outb(0, DMA2_RESET_REG); | ||
329 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG); | ||
330 | outb(0, DMA2_MASK_REG); | ||
331 | |||
332 | if (alpha_using_srm) | ||
333 | alpha_mv.device_interrupt = clipper_srm_device_interrupt; | ||
334 | |||
335 | tsunami_update_irq_hw(0); | ||
336 | |||
337 | init_i8259a_irqs(); | ||
338 | init_tsunami_irqs(&clipper_irq_type, 24, 63); | ||
339 | } | ||
340 | |||
341 | |||
342 | /* | ||
343 | * PCI Fixup configuration. | ||
344 | * | ||
345 | * Summary @ TSUNAMI_CSR_DIM0: | ||
346 | * Bit Meaning | ||
347 | * 0-17 Unused | ||
348 | *18 Interrupt SCSI B (Adaptec 7895 builtin) | ||
349 | *19 Interrupt SCSI A (Adaptec 7895 builtin) | ||
350 | *20 Interrupt Line D from slot 2 PCI0 | ||
351 | *21 Interrupt Line C from slot 2 PCI0 | ||
352 | *22 Interrupt Line B from slot 2 PCI0 | ||
353 | *23 Interrupt Line A from slot 2 PCI0 | ||
354 | *24 Interrupt Line D from slot 1 PCI0 | ||
355 | *25 Interrupt Line C from slot 1 PCI0 | ||
356 | *26 Interrupt Line B from slot 1 PCI0 | ||
357 | *27 Interrupt Line A from slot 1 PCI0 | ||
358 | *28 Interrupt Line D from slot 0 PCI0 | ||
359 | *29 Interrupt Line C from slot 0 PCI0 | ||
360 | *30 Interrupt Line B from slot 0 PCI0 | ||
361 | *31 Interrupt Line A from slot 0 PCI0 | ||
362 | * | ||
363 | *32 Interrupt Line D from slot 3 PCI1 | ||
364 | *33 Interrupt Line C from slot 3 PCI1 | ||
365 | *34 Interrupt Line B from slot 3 PCI1 | ||
366 | *35 Interrupt Line A from slot 3 PCI1 | ||
367 | *36 Interrupt Line D from slot 2 PCI1 | ||
368 | *37 Interrupt Line C from slot 2 PCI1 | ||
369 | *38 Interrupt Line B from slot 2 PCI1 | ||
370 | *39 Interrupt Line A from slot 2 PCI1 | ||
371 | *40 Interrupt Line D from slot 1 PCI1 | ||
372 | *41 Interrupt Line C from slot 1 PCI1 | ||
373 | *42 Interrupt Line B from slot 1 PCI1 | ||
374 | *43 Interrupt Line A from slot 1 PCI1 | ||
375 | *44 Interrupt Line D from slot 0 PCI1 | ||
376 | *45 Interrupt Line C from slot 0 PCI1 | ||
377 | *46 Interrupt Line B from slot 0 PCI1 | ||
378 | *47 Interrupt Line A from slot 0 PCI1 | ||
379 | *48-52 Unused | ||
380 | *53 PCI0 NMI (from Cypress) | ||
381 | *54 PCI0 SMI INT (from Cypress) | ||
382 | *55 PCI0 ISA Interrupt (from Cypress) | ||
383 | *56-60 Unused | ||
384 | *61 PCI1 Bus Error | ||
385 | *62 PCI0 Bus Error | ||
386 | *63 Reserved | ||
387 | * | ||
388 | * IdSel | ||
389 | * 5 Cypress Bridge I/O | ||
390 | * 6 SCSI Adaptec builtin | ||
391 | * 7 64 bit PCI option slot 0 (all busses) | ||
392 | * 8 64 bit PCI option slot 1 (all busses) | ||
393 | * 9 64 bit PCI option slot 2 (all busses) | ||
394 | * 10 64 bit PCI option slot 3 (not bus 0) | ||
395 | */ | ||
396 | |||
397 | static int __init | ||
398 | dp264_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
399 | { | ||
400 | static char irq_tab[6][5] __initdata = { | ||
401 | /*INT INTA INTB INTC INTD */ | ||
402 | { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */ | ||
403 | { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/ | ||
404 | { 16+15, 16+15, 16+14, 16+13, 16+12}, /* IdSel 7 slot 0 */ | ||
405 | { 16+11, 16+11, 16+10, 16+ 9, 16+ 8}, /* IdSel 8 slot 1 */ | ||
406 | { 16+ 7, 16+ 7, 16+ 6, 16+ 5, 16+ 4}, /* IdSel 9 slot 2 */ | ||
407 | { 16+ 3, 16+ 3, 16+ 2, 16+ 1, 16+ 0} /* IdSel 10 slot 3 */ | ||
408 | }; | ||
409 | const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5; | ||
410 | |||
411 | struct pci_controller *hose = dev->sysdata; | ||
412 | int irq = COMMON_TABLE_LOOKUP; | ||
413 | |||
414 | if (irq > 0) { | ||
415 | irq += 16 * hose->index; | ||
416 | } else { | ||
417 | /* ??? The Contaq IDE controller on the ISA bridge uses | ||
418 | "legacy" interrupts 14 and 15. I don't know if anything | ||
419 | can wind up at the same slot+pin on hose1, so we'll | ||
420 | just have to trust whatever value the console might | ||
421 | have assigned. */ | ||
422 | |||
423 | u8 irq8; | ||
424 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq8); | ||
425 | irq = irq8; | ||
426 | } | ||
427 | |||
428 | return irq; | ||
429 | } | ||
430 | |||
431 | static int __init | ||
432 | monet_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
433 | { | ||
434 | static char irq_tab[13][5] __initdata = { | ||
435 | /*INT INTA INTB INTC INTD */ | ||
436 | { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */ | ||
437 | { -1, -1, -1, -1, -1}, /* IdSel 4 unused */ | ||
438 | { -1, -1, -1, -1, -1}, /* IdSel 5 unused */ | ||
439 | { 47, 47, 47, 47, 47}, /* IdSel 6 SCSI PCI1 */ | ||
440 | { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ | ||
441 | { -1, -1, -1, -1, -1}, /* IdSel 8 P2P PCI1 */ | ||
442 | #if 1 | ||
443 | { 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/ | ||
444 | { 24, 24, 25, 26, 27}, /* IdSel 15 slot 5 PCI2*/ | ||
445 | #else | ||
446 | { -1, -1, -1, -1, -1}, /* IdSel 9 unused */ | ||
447 | { -1, -1, -1, -1, -1}, /* IdSel 10 unused */ | ||
448 | #endif | ||
449 | { 40, 40, 41, 42, 43}, /* IdSel 11 slot 1 PCI0*/ | ||
450 | { 36, 36, 37, 38, 39}, /* IdSel 12 slot 2 PCI0*/ | ||
451 | { 32, 32, 33, 34, 35}, /* IdSel 13 slot 3 PCI0*/ | ||
452 | { 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/ | ||
453 | { 24, 24, 25, 26, 27} /* IdSel 15 slot 5 PCI2*/ | ||
454 | }; | ||
455 | const long min_idsel = 3, max_idsel = 15, irqs_per_slot = 5; | ||
456 | return COMMON_TABLE_LOOKUP; | ||
457 | } | ||
458 | |||
459 | static u8 __init | ||
460 | monet_swizzle(struct pci_dev *dev, u8 *pinp) | ||
461 | { | ||
462 | struct pci_controller *hose = dev->sysdata; | ||
463 | int slot, pin = *pinp; | ||
464 | |||
465 | if (!dev->bus->parent) { | ||
466 | slot = PCI_SLOT(dev->devfn); | ||
467 | } | ||
468 | /* Check for the built-in bridge on hose 1. */ | ||
469 | else if (hose->index == 1 && PCI_SLOT(dev->bus->self->devfn) == 8) { | ||
470 | slot = PCI_SLOT(dev->devfn); | ||
471 | } else { | ||
472 | /* Must be a card-based bridge. */ | ||
473 | do { | ||
474 | /* Check for built-in bridge on hose 1. */ | ||
475 | if (hose->index == 1 && | ||
476 | PCI_SLOT(dev->bus->self->devfn) == 8) { | ||
477 | slot = PCI_SLOT(dev->devfn); | ||
478 | break; | ||
479 | } | ||
480 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; | ||
481 | |||
482 | /* Move up the chain of bridges. */ | ||
483 | dev = dev->bus->self; | ||
484 | /* Slot of the next bridge. */ | ||
485 | slot = PCI_SLOT(dev->devfn); | ||
486 | } while (dev->bus->self); | ||
487 | } | ||
488 | *pinp = pin; | ||
489 | return slot; | ||
490 | } | ||
491 | |||
492 | static int __init | ||
493 | webbrick_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
494 | { | ||
495 | static char irq_tab[13][5] __initdata = { | ||
496 | /*INT INTA INTB INTC INTD */ | ||
497 | { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ | ||
498 | { -1, -1, -1, -1, -1}, /* IdSel 8 unused */ | ||
499 | { 29, 29, 29, 29, 29}, /* IdSel 9 21143 #1 */ | ||
500 | { -1, -1, -1, -1, -1}, /* IdSel 10 unused */ | ||
501 | { 30, 30, 30, 30, 30}, /* IdSel 11 21143 #2 */ | ||
502 | { -1, -1, -1, -1, -1}, /* IdSel 12 unused */ | ||
503 | { -1, -1, -1, -1, -1}, /* IdSel 13 unused */ | ||
504 | { 35, 35, 34, 33, 32}, /* IdSel 14 slot 0 */ | ||
505 | { 39, 39, 38, 37, 36}, /* IdSel 15 slot 1 */ | ||
506 | { 43, 43, 42, 41, 40}, /* IdSel 16 slot 2 */ | ||
507 | { 47, 47, 46, 45, 44}, /* IdSel 17 slot 3 */ | ||
508 | }; | ||
509 | const long min_idsel = 7, max_idsel = 17, irqs_per_slot = 5; | ||
510 | return COMMON_TABLE_LOOKUP; | ||
511 | } | ||
512 | |||
513 | static int __init | ||
514 | clipper_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
515 | { | ||
516 | static char irq_tab[7][5] __initdata = { | ||
517 | /*INT INTA INTB INTC INTD */ | ||
518 | { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */ | ||
519 | { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */ | ||
520 | { 16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 3 slot 3 */ | ||
521 | { 16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 4 slot 4 */ | ||
522 | { 16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 5 slot 5 */ | ||
523 | { 16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 6 slot 6 */ | ||
524 | { -1, -1, -1, -1, -1} /* IdSel 7 ISA Bridge */ | ||
525 | }; | ||
526 | const long min_idsel = 1, max_idsel = 7, irqs_per_slot = 5; | ||
527 | |||
528 | struct pci_controller *hose = dev->sysdata; | ||
529 | int irq = COMMON_TABLE_LOOKUP; | ||
530 | |||
531 | if (irq > 0) | ||
532 | irq += 16 * hose->index; | ||
533 | |||
534 | return irq; | ||
535 | } | ||
536 | |||
537 | static void __init | ||
538 | dp264_init_pci(void) | ||
539 | { | ||
540 | common_init_pci(); | ||
541 | SMC669_Init(0); | ||
542 | } | ||
543 | |||
544 | static void __init | ||
545 | monet_init_pci(void) | ||
546 | { | ||
547 | common_init_pci(); | ||
548 | SMC669_Init(1); | ||
549 | es1888_init(); | ||
550 | } | ||
551 | |||
552 | static void __init | ||
553 | webbrick_init_arch(void) | ||
554 | { | ||
555 | tsunami_init_arch(); | ||
556 | |||
557 | /* Tsunami caches 4 PTEs at a time; DS10 has only 1 hose. */ | ||
558 | hose_head->sg_isa->align_entry = 4; | ||
559 | hose_head->sg_pci->align_entry = 4; | ||
560 | } | ||
561 | |||
562 | |||
563 | /* | ||
564 | * The System Vectors | ||
565 | */ | ||
566 | |||
567 | struct alpha_machine_vector dp264_mv __initmv = { | ||
568 | .vector_name = "DP264", | ||
569 | DO_EV6_MMU, | ||
570 | DO_DEFAULT_RTC, | ||
571 | DO_TSUNAMI_IO, | ||
572 | .machine_check = tsunami_machine_check, | ||
573 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
574 | .min_io_address = DEFAULT_IO_BASE, | ||
575 | .min_mem_address = DEFAULT_MEM_BASE, | ||
576 | .pci_dac_offset = TSUNAMI_DAC_OFFSET, | ||
577 | |||
578 | .nr_irqs = 64, | ||
579 | .device_interrupt = dp264_device_interrupt, | ||
580 | |||
581 | .init_arch = tsunami_init_arch, | ||
582 | .init_irq = dp264_init_irq, | ||
583 | .init_rtc = common_init_rtc, | ||
584 | .init_pci = dp264_init_pci, | ||
585 | .kill_arch = tsunami_kill_arch, | ||
586 | .pci_map_irq = dp264_map_irq, | ||
587 | .pci_swizzle = common_swizzle, | ||
588 | }; | ||
589 | ALIAS_MV(dp264) | ||
590 | |||
591 | struct alpha_machine_vector monet_mv __initmv = { | ||
592 | .vector_name = "Monet", | ||
593 | DO_EV6_MMU, | ||
594 | DO_DEFAULT_RTC, | ||
595 | DO_TSUNAMI_IO, | ||
596 | .machine_check = tsunami_machine_check, | ||
597 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
598 | .min_io_address = DEFAULT_IO_BASE, | ||
599 | .min_mem_address = DEFAULT_MEM_BASE, | ||
600 | .pci_dac_offset = TSUNAMI_DAC_OFFSET, | ||
601 | |||
602 | .nr_irqs = 64, | ||
603 | .device_interrupt = dp264_device_interrupt, | ||
604 | |||
605 | .init_arch = tsunami_init_arch, | ||
606 | .init_irq = dp264_init_irq, | ||
607 | .init_rtc = common_init_rtc, | ||
608 | .init_pci = monet_init_pci, | ||
609 | .kill_arch = tsunami_kill_arch, | ||
610 | .pci_map_irq = monet_map_irq, | ||
611 | .pci_swizzle = monet_swizzle, | ||
612 | }; | ||
613 | |||
614 | struct alpha_machine_vector webbrick_mv __initmv = { | ||
615 | .vector_name = "Webbrick", | ||
616 | DO_EV6_MMU, | ||
617 | DO_DEFAULT_RTC, | ||
618 | DO_TSUNAMI_IO, | ||
619 | .machine_check = tsunami_machine_check, | ||
620 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
621 | .min_io_address = DEFAULT_IO_BASE, | ||
622 | .min_mem_address = DEFAULT_MEM_BASE, | ||
623 | .pci_dac_offset = TSUNAMI_DAC_OFFSET, | ||
624 | |||
625 | .nr_irqs = 64, | ||
626 | .device_interrupt = dp264_device_interrupt, | ||
627 | |||
628 | .init_arch = webbrick_init_arch, | ||
629 | .init_irq = dp264_init_irq, | ||
630 | .init_rtc = common_init_rtc, | ||
631 | .init_pci = common_init_pci, | ||
632 | .kill_arch = tsunami_kill_arch, | ||
633 | .pci_map_irq = webbrick_map_irq, | ||
634 | .pci_swizzle = common_swizzle, | ||
635 | }; | ||
636 | |||
637 | struct alpha_machine_vector clipper_mv __initmv = { | ||
638 | .vector_name = "Clipper", | ||
639 | DO_EV6_MMU, | ||
640 | DO_DEFAULT_RTC, | ||
641 | DO_TSUNAMI_IO, | ||
642 | .machine_check = tsunami_machine_check, | ||
643 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
644 | .min_io_address = DEFAULT_IO_BASE, | ||
645 | .min_mem_address = DEFAULT_MEM_BASE, | ||
646 | .pci_dac_offset = TSUNAMI_DAC_OFFSET, | ||
647 | |||
648 | .nr_irqs = 64, | ||
649 | .device_interrupt = dp264_device_interrupt, | ||
650 | |||
651 | .init_arch = tsunami_init_arch, | ||
652 | .init_irq = clipper_init_irq, | ||
653 | .init_rtc = common_init_rtc, | ||
654 | .init_pci = common_init_pci, | ||
655 | .kill_arch = tsunami_kill_arch, | ||
656 | .pci_map_irq = clipper_map_irq, | ||
657 | .pci_swizzle = common_swizzle, | ||
658 | }; | ||
659 | |||
660 | /* Sharks strongly resemble Clipper, at least as far | ||
661 | * as interrupt routing, etc, so we're using the | ||
662 | * same functions as Clipper does | ||
663 | */ | ||
664 | |||
665 | struct alpha_machine_vector shark_mv __initmv = { | ||
666 | .vector_name = "Shark", | ||
667 | DO_EV6_MMU, | ||
668 | DO_DEFAULT_RTC, | ||
669 | DO_TSUNAMI_IO, | ||
670 | .machine_check = tsunami_machine_check, | ||
671 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
672 | .min_io_address = DEFAULT_IO_BASE, | ||
673 | .min_mem_address = DEFAULT_MEM_BASE, | ||
674 | .pci_dac_offset = TSUNAMI_DAC_OFFSET, | ||
675 | |||
676 | .nr_irqs = 64, | ||
677 | .device_interrupt = dp264_device_interrupt, | ||
678 | |||
679 | .init_arch = tsunami_init_arch, | ||
680 | .init_irq = clipper_init_irq, | ||
681 | .init_rtc = common_init_rtc, | ||
682 | .init_pci = common_init_pci, | ||
683 | .kill_arch = tsunami_kill_arch, | ||
684 | .pci_map_irq = clipper_map_irq, | ||
685 | .pci_swizzle = common_swizzle, | ||
686 | }; | ||
687 | |||
688 | /* No alpha_mv alias for webbrick/monet/clipper, since we compile them | ||
689 | in unconditionally with DP264; setup_arch knows how to cope. */ | ||
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c new file mode 100644 index 000000000000..61a79c354f0b --- /dev/null +++ b/arch/alpha/kernel/sys_eb64p.c | |||
@@ -0,0 +1,256 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_eb64p.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the EB64+ and EB66. | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/bitops.h> | ||
19 | |||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/system.h> | ||
22 | #include <asm/dma.h> | ||
23 | #include <asm/irq.h> | ||
24 | #include <asm/mmu_context.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/core_apecs.h> | ||
28 | #include <asm/core_lca.h> | ||
29 | #include <asm/hwrpb.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | |||
32 | #include "proto.h" | ||
33 | #include "irq_impl.h" | ||
34 | #include "pci_impl.h" | ||
35 | #include "machvec_impl.h" | ||
36 | |||
37 | |||
38 | /* Note mask bit is true for DISABLED irqs. */ | ||
39 | static unsigned int cached_irq_mask = -1; | ||
40 | |||
41 | static inline void | ||
42 | eb64p_update_irq_hw(unsigned int irq, unsigned long mask) | ||
43 | { | ||
44 | outb(mask >> (irq >= 24 ? 24 : 16), (irq >= 24 ? 0x27 : 0x26)); | ||
45 | } | ||
46 | |||
47 | static inline void | ||
48 | eb64p_enable_irq(unsigned int irq) | ||
49 | { | ||
50 | eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); | ||
51 | } | ||
52 | |||
53 | static void | ||
54 | eb64p_disable_irq(unsigned int irq) | ||
55 | { | ||
56 | eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); | ||
57 | } | ||
58 | |||
59 | static unsigned int | ||
60 | eb64p_startup_irq(unsigned int irq) | ||
61 | { | ||
62 | eb64p_enable_irq(irq); | ||
63 | return 0; /* never anything pending */ | ||
64 | } | ||
65 | |||
66 | static void | ||
67 | eb64p_end_irq(unsigned int irq) | ||
68 | { | ||
69 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
70 | eb64p_enable_irq(irq); | ||
71 | } | ||
72 | |||
73 | static struct hw_interrupt_type eb64p_irq_type = { | ||
74 | .typename = "EB64P", | ||
75 | .startup = eb64p_startup_irq, | ||
76 | .shutdown = eb64p_disable_irq, | ||
77 | .enable = eb64p_enable_irq, | ||
78 | .disable = eb64p_disable_irq, | ||
79 | .ack = eb64p_disable_irq, | ||
80 | .end = eb64p_end_irq, | ||
81 | }; | ||
82 | |||
83 | static void | ||
84 | eb64p_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
85 | { | ||
86 | unsigned long pld; | ||
87 | unsigned int i; | ||
88 | |||
89 | /* Read the interrupt summary registers */ | ||
90 | pld = inb(0x26) | (inb(0x27) << 8); | ||
91 | |||
92 | /* | ||
93 | * Now, for every possible bit set, work through | ||
94 | * them and call the appropriate interrupt handler. | ||
95 | */ | ||
96 | while (pld) { | ||
97 | i = ffz(~pld); | ||
98 | pld &= pld - 1; /* clear least bit set */ | ||
99 | |||
100 | if (i == 5) { | ||
101 | isa_device_interrupt(vector, regs); | ||
102 | } else { | ||
103 | handle_irq(16 + i, regs); | ||
104 | } | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void __init | ||
109 | eb64p_init_irq(void) | ||
110 | { | ||
111 | long i; | ||
112 | |||
113 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET) | ||
114 | /* | ||
115 | * CABRIO SRM may not set variation correctly, so here we test | ||
116 | * the high word of the interrupt summary register for the RAZ | ||
117 | * bits, and hope that a true EB64+ would read all ones... | ||
118 | */ | ||
119 | if (inw(0x806) != 0xffff) { | ||
120 | extern struct alpha_machine_vector cabriolet_mv; | ||
121 | |||
122 | printk("Detected Cabriolet: correcting HWRPB.\n"); | ||
123 | |||
124 | hwrpb->sys_variation |= 2L << 10; | ||
125 | hwrpb_update_checksum(hwrpb); | ||
126 | |||
127 | alpha_mv = cabriolet_mv; | ||
128 | alpha_mv.init_irq(); | ||
129 | return; | ||
130 | } | ||
131 | #endif /* GENERIC */ | ||
132 | |||
133 | outb(0xff, 0x26); | ||
134 | outb(0xff, 0x27); | ||
135 | |||
136 | init_i8259a_irqs(); | ||
137 | |||
138 | for (i = 16; i < 32; ++i) { | ||
139 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
140 | irq_desc[i].handler = &eb64p_irq_type; | ||
141 | } | ||
142 | |||
143 | common_init_isa_dma(); | ||
144 | setup_irq(16+5, &isa_cascade_irqaction); | ||
145 | } | ||
146 | |||
147 | /* | ||
148 | * PCI Fixup configuration. | ||
149 | * | ||
150 | * There are two 8 bit external summary registers as follows: | ||
151 | * | ||
152 | * Summary @ 0x26: | ||
153 | * Bit Meaning | ||
154 | * 0 Interrupt Line A from slot 0 | ||
155 | * 1 Interrupt Line A from slot 1 | ||
156 | * 2 Interrupt Line B from slot 0 | ||
157 | * 3 Interrupt Line B from slot 1 | ||
158 | * 4 Interrupt Line C from slot 0 | ||
159 | * 5 Interrupt line from the two ISA PICs | ||
160 | * 6 Tulip | ||
161 | * 7 NCR SCSI | ||
162 | * | ||
163 | * Summary @ 0x27 | ||
164 | * Bit Meaning | ||
165 | * 0 Interrupt Line C from slot 1 | ||
166 | * 1 Interrupt Line D from slot 0 | ||
167 | * 2 Interrupt Line D from slot 1 | ||
168 | * 3 RAZ | ||
169 | * 4 RAZ | ||
170 | * 5 RAZ | ||
171 | * 6 RAZ | ||
172 | * 7 RAZ | ||
173 | * | ||
174 | * The device to slot mapping looks like: | ||
175 | * | ||
176 | * Slot Device | ||
177 | * 5 NCR SCSI controller | ||
178 | * 6 PCI on board slot 0 | ||
179 | * 7 PCI on board slot 1 | ||
180 | * 8 Intel SIO PCI-ISA bridge chip | ||
181 | * 9 Tulip - DECchip 21040 Ethernet controller | ||
182 | * | ||
183 | * | ||
184 | * This two layered interrupt approach means that we allocate IRQ 16 and | ||
185 | * above for PCI interrupts. The IRQ relates to which bit the interrupt | ||
186 | * comes in on. This makes interrupt processing much easier. | ||
187 | */ | ||
188 | |||
189 | static int __init | ||
190 | eb64p_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
191 | { | ||
192 | static char irq_tab[5][5] __initdata = { | ||
193 | /*INT INTA INTB INTC INTD */ | ||
194 | {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ | ||
195 | {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ | ||
196 | {16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */ | ||
197 | { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ | ||
198 | {16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 9, TULIP */ | ||
199 | }; | ||
200 | const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; | ||
201 | return COMMON_TABLE_LOOKUP; | ||
202 | } | ||
203 | |||
204 | |||
205 | /* | ||
206 | * The System Vector | ||
207 | */ | ||
208 | |||
209 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P) | ||
210 | struct alpha_machine_vector eb64p_mv __initmv = { | ||
211 | .vector_name = "EB64+", | ||
212 | DO_EV4_MMU, | ||
213 | DO_DEFAULT_RTC, | ||
214 | DO_APECS_IO, | ||
215 | .machine_check = apecs_machine_check, | ||
216 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
217 | .min_io_address = DEFAULT_IO_BASE, | ||
218 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
219 | |||
220 | .nr_irqs = 32, | ||
221 | .device_interrupt = eb64p_device_interrupt, | ||
222 | |||
223 | .init_arch = apecs_init_arch, | ||
224 | .init_irq = eb64p_init_irq, | ||
225 | .init_rtc = common_init_rtc, | ||
226 | .init_pci = common_init_pci, | ||
227 | .kill_arch = NULL, | ||
228 | .pci_map_irq = eb64p_map_irq, | ||
229 | .pci_swizzle = common_swizzle, | ||
230 | }; | ||
231 | ALIAS_MV(eb64p) | ||
232 | #endif | ||
233 | |||
234 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66) | ||
235 | struct alpha_machine_vector eb66_mv __initmv = { | ||
236 | .vector_name = "EB66", | ||
237 | DO_EV4_MMU, | ||
238 | DO_DEFAULT_RTC, | ||
239 | DO_LCA_IO, | ||
240 | .machine_check = lca_machine_check, | ||
241 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
242 | .min_io_address = DEFAULT_IO_BASE, | ||
243 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
244 | |||
245 | .nr_irqs = 32, | ||
246 | .device_interrupt = eb64p_device_interrupt, | ||
247 | |||
248 | .init_arch = lca_init_arch, | ||
249 | .init_irq = eb64p_init_irq, | ||
250 | .init_rtc = common_init_rtc, | ||
251 | .init_pci = common_init_pci, | ||
252 | .pci_map_irq = eb64p_map_irq, | ||
253 | .pci_swizzle = common_swizzle, | ||
254 | }; | ||
255 | ALIAS_MV(eb66) | ||
256 | #endif | ||
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c new file mode 100644 index 000000000000..bd6e5f0e43c7 --- /dev/null +++ b/arch/alpha/kernel/sys_eiger.c | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_eiger.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996, 1999 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * Copyright (C) 1999 Iain Grant | ||
8 | * | ||
9 | * Code supporting the EIGER (EV6+TSUNAMI). | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/bitops.h> | ||
19 | |||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/system.h> | ||
22 | #include <asm/dma.h> | ||
23 | #include <asm/irq.h> | ||
24 | #include <asm/mmu_context.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/pci.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/core_tsunami.h> | ||
29 | #include <asm/hwrpb.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | |||
32 | #include "proto.h" | ||
33 | #include "irq_impl.h" | ||
34 | #include "pci_impl.h" | ||
35 | #include "machvec_impl.h" | ||
36 | |||
37 | |||
38 | /* Note that this interrupt code is identical to TAKARA. */ | ||
39 | |||
40 | /* Note mask bit is true for DISABLED irqs. */ | ||
41 | static unsigned long cached_irq_mask[2] = { -1, -1 }; | ||
42 | |||
43 | static inline void | ||
44 | eiger_update_irq_hw(unsigned long irq, unsigned long mask) | ||
45 | { | ||
46 | int regaddr; | ||
47 | |||
48 | mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); | ||
49 | regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); | ||
50 | outl(mask & 0xffff0000UL, regaddr); | ||
51 | } | ||
52 | |||
53 | static inline void | ||
54 | eiger_enable_irq(unsigned int irq) | ||
55 | { | ||
56 | unsigned long mask; | ||
57 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); | ||
58 | eiger_update_irq_hw(irq, mask); | ||
59 | } | ||
60 | |||
61 | static void | ||
62 | eiger_disable_irq(unsigned int irq) | ||
63 | { | ||
64 | unsigned long mask; | ||
65 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); | ||
66 | eiger_update_irq_hw(irq, mask); | ||
67 | } | ||
68 | |||
69 | static unsigned int | ||
70 | eiger_startup_irq(unsigned int irq) | ||
71 | { | ||
72 | eiger_enable_irq(irq); | ||
73 | return 0; /* never anything pending */ | ||
74 | } | ||
75 | |||
76 | static void | ||
77 | eiger_end_irq(unsigned int irq) | ||
78 | { | ||
79 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
80 | eiger_enable_irq(irq); | ||
81 | } | ||
82 | |||
83 | static struct hw_interrupt_type eiger_irq_type = { | ||
84 | .typename = "EIGER", | ||
85 | .startup = eiger_startup_irq, | ||
86 | .shutdown = eiger_disable_irq, | ||
87 | .enable = eiger_enable_irq, | ||
88 | .disable = eiger_disable_irq, | ||
89 | .ack = eiger_disable_irq, | ||
90 | .end = eiger_end_irq, | ||
91 | }; | ||
92 | |||
93 | static void | ||
94 | eiger_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
95 | { | ||
96 | unsigned intstatus; | ||
97 | |||
98 | /* | ||
99 | * The PALcode will have passed us vectors 0x800 or 0x810, | ||
100 | * which are fairly arbitrary values and serve only to tell | ||
101 | * us whether an interrupt has come in on IRQ0 or IRQ1. If | ||
102 | * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's | ||
103 | * probably ISA, but PCI interrupts can come through IRQ0 | ||
104 | * as well if the interrupt controller isn't in accelerated | ||
105 | * mode. | ||
106 | * | ||
107 | * OTOH, the accelerator thing doesn't seem to be working | ||
108 | * overly well, so what we'll do instead is try directly | ||
109 | * examining the Master Interrupt Register to see if it's a | ||
110 | * PCI interrupt, and if _not_ then we'll pass it on to the | ||
111 | * ISA handler. | ||
112 | */ | ||
113 | |||
114 | intstatus = inw(0x500) & 15; | ||
115 | if (intstatus) { | ||
116 | /* | ||
117 | * This is a PCI interrupt. Check each bit and | ||
118 | * despatch an interrupt if it's set. | ||
119 | */ | ||
120 | |||
121 | if (intstatus & 8) handle_irq(16+3, regs); | ||
122 | if (intstatus & 4) handle_irq(16+2, regs); | ||
123 | if (intstatus & 2) handle_irq(16+1, regs); | ||
124 | if (intstatus & 1) handle_irq(16+0, regs); | ||
125 | } else { | ||
126 | isa_device_interrupt(vector, regs); | ||
127 | } | ||
128 | } | ||
129 | |||
130 | static void | ||
131 | eiger_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
132 | { | ||
133 | int irq = (vector - 0x800) >> 4; | ||
134 | handle_irq(irq, regs); | ||
135 | } | ||
136 | |||
137 | static void __init | ||
138 | eiger_init_irq(void) | ||
139 | { | ||
140 | long i; | ||
141 | |||
142 | outb(0, DMA1_RESET_REG); | ||
143 | outb(0, DMA2_RESET_REG); | ||
144 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG); | ||
145 | outb(0, DMA2_MASK_REG); | ||
146 | |||
147 | if (alpha_using_srm) | ||
148 | alpha_mv.device_interrupt = eiger_srm_device_interrupt; | ||
149 | |||
150 | for (i = 16; i < 128; i += 16) | ||
151 | eiger_update_irq_hw(i, -1); | ||
152 | |||
153 | init_i8259a_irqs(); | ||
154 | |||
155 | for (i = 16; i < 128; ++i) { | ||
156 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
157 | irq_desc[i].handler = &eiger_irq_type; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | static int __init | ||
162 | eiger_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
163 | { | ||
164 | u8 irq_orig; | ||
165 | |||
166 | /* The SRM console has already calculated out the IRQ value's for | ||
167 | option cards. As this works lets just read in the value already | ||
168 | set and change it to a useable value by Linux. | ||
169 | |||
170 | All the IRQ values generated by the console are greater than 90, | ||
171 | so we subtract 80 because it is (90 - allocated ISA IRQ's). */ | ||
172 | |||
173 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig); | ||
174 | |||
175 | return irq_orig - 0x80; | ||
176 | } | ||
177 | |||
178 | static u8 __init | ||
179 | eiger_swizzle(struct pci_dev *dev, u8 *pinp) | ||
180 | { | ||
181 | struct pci_controller *hose = dev->sysdata; | ||
182 | int slot, pin = *pinp; | ||
183 | int bridge_count = 0; | ||
184 | |||
185 | /* Find the number of backplane bridges. */ | ||
186 | int backplane = inw(0x502) & 0x0f; | ||
187 | |||
188 | switch (backplane) | ||
189 | { | ||
190 | case 0x00: bridge_count = 0; break; /* No bridges */ | ||
191 | case 0x01: bridge_count = 1; break; /* 1 */ | ||
192 | case 0x03: bridge_count = 2; break; /* 2 */ | ||
193 | case 0x07: bridge_count = 3; break; /* 3 */ | ||
194 | case 0x0f: bridge_count = 4; break; /* 4 */ | ||
195 | }; | ||
196 | |||
197 | slot = PCI_SLOT(dev->devfn); | ||
198 | while (dev->bus->self) { | ||
199 | /* Check for built-in bridges on hose 0. */ | ||
200 | if (hose->index == 0 | ||
201 | && (PCI_SLOT(dev->bus->self->devfn) | ||
202 | > 20 - bridge_count)) { | ||
203 | slot = PCI_SLOT(dev->devfn); | ||
204 | break; | ||
205 | } | ||
206 | /* Must be a card-based bridge. */ | ||
207 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); | ||
208 | |||
209 | /* Move up the chain of bridges. */ | ||
210 | dev = dev->bus->self; | ||
211 | } | ||
212 | *pinp = pin; | ||
213 | return slot; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * The System Vectors | ||
218 | */ | ||
219 | |||
220 | struct alpha_machine_vector eiger_mv __initmv = { | ||
221 | .vector_name = "Eiger", | ||
222 | DO_EV6_MMU, | ||
223 | DO_DEFAULT_RTC, | ||
224 | DO_TSUNAMI_IO, | ||
225 | .machine_check = tsunami_machine_check, | ||
226 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
227 | .min_io_address = DEFAULT_IO_BASE, | ||
228 | .min_mem_address = DEFAULT_MEM_BASE, | ||
229 | .pci_dac_offset = TSUNAMI_DAC_OFFSET, | ||
230 | |||
231 | .nr_irqs = 128, | ||
232 | .device_interrupt = eiger_device_interrupt, | ||
233 | |||
234 | .init_arch = tsunami_init_arch, | ||
235 | .init_irq = eiger_init_irq, | ||
236 | .init_rtc = common_init_rtc, | ||
237 | .init_pci = common_init_pci, | ||
238 | .kill_arch = tsunami_kill_arch, | ||
239 | .pci_map_irq = eiger_map_irq, | ||
240 | .pci_swizzle = eiger_swizzle, | ||
241 | }; | ||
242 | ALIAS_MV(eiger) | ||
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c new file mode 100644 index 000000000000..fcabb7c96a16 --- /dev/null +++ b/arch/alpha/kernel/sys_jensen.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_jensen.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * Copyright (C) 1998, 1999 Richard Henderson | ||
6 | * | ||
7 | * Code supporting the Jensen. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/init.h> | ||
16 | |||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/system.h> | ||
19 | |||
20 | #define __EXTERN_INLINE inline | ||
21 | #include <asm/io.h> | ||
22 | #include <asm/jensen.h> | ||
23 | #undef __EXTERN_INLINE | ||
24 | |||
25 | #include <asm/dma.h> | ||
26 | #include <asm/irq.h> | ||
27 | #include <asm/mmu_context.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | |||
31 | #include "proto.h" | ||
32 | #include "irq_impl.h" | ||
33 | #include "pci_impl.h" | ||
34 | #include "machvec_impl.h" | ||
35 | |||
36 | |||
37 | /* | ||
38 | * Jensen is special: the vector is 0x8X0 for EISA interrupt X, and | ||
39 | * 0x9X0 for the local motherboard interrupts. | ||
40 | * | ||
41 | * Note especially that those local interrupts CANNOT be masked, | ||
42 | * which causes much of the pain below... | ||
43 | * | ||
44 | * 0x660 - NMI | ||
45 | * | ||
46 | * 0x800 - IRQ0 interval timer (not used, as we use the RTC timer) | ||
47 | * 0x810 - IRQ1 line printer (duh..) | ||
48 | * 0x860 - IRQ6 floppy disk | ||
49 | * | ||
50 | * 0x900 - COM1 | ||
51 | * 0x920 - COM2 | ||
52 | * 0x980 - keyboard | ||
53 | * 0x990 - mouse | ||
54 | * | ||
55 | * PCI-based systems are more sane: they don't have the local | ||
56 | * interrupts at all, and have only normal PCI interrupts from | ||
57 | * devices. Happily it's easy enough to do a sane mapping from the | ||
58 | * Jensen. | ||
59 | * | ||
60 | * Note that this means that we may have to do a hardware | ||
61 | * "local_op" to a different interrupt than we report to the rest of the | ||
62 | * world. | ||
63 | */ | ||
64 | |||
65 | static unsigned int | ||
66 | jensen_local_startup(unsigned int irq) | ||
67 | { | ||
68 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
69 | if (irq == 7) | ||
70 | i8259a_startup_irq(1); | ||
71 | else | ||
72 | /* | ||
73 | * For all true local interrupts, set the flag that prevents | ||
74 | * the IPL from being dropped during handler processing. | ||
75 | */ | ||
76 | if (irq_desc[irq].action) | ||
77 | irq_desc[irq].action->flags |= SA_INTERRUPT; | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static void | ||
82 | jensen_local_shutdown(unsigned int irq) | ||
83 | { | ||
84 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
85 | if (irq == 7) | ||
86 | i8259a_disable_irq(1); | ||
87 | } | ||
88 | |||
89 | static void | ||
90 | jensen_local_enable(unsigned int irq) | ||
91 | { | ||
92 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
93 | if (irq == 7) | ||
94 | i8259a_enable_irq(1); | ||
95 | } | ||
96 | |||
97 | static void | ||
98 | jensen_local_disable(unsigned int irq) | ||
99 | { | ||
100 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
101 | if (irq == 7) | ||
102 | i8259a_disable_irq(1); | ||
103 | } | ||
104 | |||
105 | static void | ||
106 | jensen_local_ack(unsigned int irq) | ||
107 | { | ||
108 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
109 | if (irq == 7) | ||
110 | i8259a_mask_and_ack_irq(1); | ||
111 | } | ||
112 | |||
113 | static void | ||
114 | jensen_local_end(unsigned int irq) | ||
115 | { | ||
116 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
117 | if (irq == 7) | ||
118 | i8259a_end_irq(1); | ||
119 | } | ||
120 | |||
121 | static struct hw_interrupt_type jensen_local_irq_type = { | ||
122 | .typename = "LOCAL", | ||
123 | .startup = jensen_local_startup, | ||
124 | .shutdown = jensen_local_shutdown, | ||
125 | .enable = jensen_local_enable, | ||
126 | .disable = jensen_local_disable, | ||
127 | .ack = jensen_local_ack, | ||
128 | .end = jensen_local_end, | ||
129 | }; | ||
130 | |||
131 | static void | ||
132 | jensen_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
133 | { | ||
134 | int irq; | ||
135 | |||
136 | switch (vector) { | ||
137 | case 0x660: | ||
138 | printk("Whee.. NMI received. Probable hardware error\n"); | ||
139 | printk("61=%02x, 461=%02x\n", inb(0x61), inb(0x461)); | ||
140 | return; | ||
141 | |||
142 | /* local device interrupts: */ | ||
143 | case 0x900: irq = 4; break; /* com1 -> irq 4 */ | ||
144 | case 0x920: irq = 3; break; /* com2 -> irq 3 */ | ||
145 | case 0x980: irq = 1; break; /* kbd -> irq 1 */ | ||
146 | case 0x990: irq = 9; break; /* mouse -> irq 9 */ | ||
147 | |||
148 | default: | ||
149 | if (vector > 0x900) { | ||
150 | printk("Unknown local interrupt %lx\n", vector); | ||
151 | return; | ||
152 | } | ||
153 | |||
154 | irq = (vector - 0x800) >> 4; | ||
155 | if (irq == 1) | ||
156 | irq = 7; | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | /* If there is no handler yet... */ | ||
161 | if (irq_desc[irq].action == NULL) { | ||
162 | /* If it is a local interrupt that cannot be masked... */ | ||
163 | if (vector >= 0x900) | ||
164 | { | ||
165 | /* Clear keyboard/mouse state */ | ||
166 | inb(0x64); | ||
167 | inb(0x60); | ||
168 | /* Reset serial ports */ | ||
169 | inb(0x3fa); | ||
170 | inb(0x2fa); | ||
171 | outb(0x0c, 0x3fc); | ||
172 | outb(0x0c, 0x2fc); | ||
173 | /* Clear NMI */ | ||
174 | outb(0,0x61); | ||
175 | outb(0,0x461); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | #if 0 | ||
180 | /* A useful bit of code to find out if an interrupt is going wild. */ | ||
181 | { | ||
182 | static unsigned int last_msg = 0, last_cc = 0; | ||
183 | static int last_irq = -1, count = 0; | ||
184 | unsigned int cc; | ||
185 | |||
186 | __asm __volatile("rpcc %0" : "=r"(cc)); | ||
187 | ++count; | ||
188 | #define JENSEN_CYCLES_PER_SEC (150000000) | ||
189 | if (cc - last_msg > ((JENSEN_CYCLES_PER_SEC) * 3) || | ||
190 | irq != last_irq) { | ||
191 | printk(KERN_CRIT " irq %d count %d cc %u @ %lx\n", | ||
192 | irq, count, cc-last_cc, regs->pc); | ||
193 | count = 0; | ||
194 | last_msg = cc; | ||
195 | last_irq = irq; | ||
196 | } | ||
197 | last_cc = cc; | ||
198 | } | ||
199 | #endif | ||
200 | |||
201 | handle_irq(irq, regs); | ||
202 | } | ||
203 | |||
204 | static void __init | ||
205 | jensen_init_irq(void) | ||
206 | { | ||
207 | init_i8259a_irqs(); | ||
208 | |||
209 | irq_desc[1].handler = &jensen_local_irq_type; | ||
210 | irq_desc[4].handler = &jensen_local_irq_type; | ||
211 | irq_desc[3].handler = &jensen_local_irq_type; | ||
212 | irq_desc[7].handler = &jensen_local_irq_type; | ||
213 | irq_desc[9].handler = &jensen_local_irq_type; | ||
214 | |||
215 | common_init_isa_dma(); | ||
216 | } | ||
217 | |||
218 | static void __init | ||
219 | jensen_init_arch(void) | ||
220 | { | ||
221 | struct pci_controller *hose; | ||
222 | #ifdef CONFIG_PCI | ||
223 | static struct pci_dev fake_isa_bridge = { .dma_mask = 0xffffffffUL, }; | ||
224 | |||
225 | isa_bridge = &fake_isa_bridge; | ||
226 | #endif | ||
227 | |||
228 | /* Create a hose so that we can report i/o base addresses to | ||
229 | userland. */ | ||
230 | |||
231 | pci_isa_hose = hose = alloc_pci_controller(); | ||
232 | hose->io_space = &ioport_resource; | ||
233 | hose->mem_space = &iomem_resource; | ||
234 | hose->index = 0; | ||
235 | |||
236 | hose->sparse_mem_base = EISA_MEM - IDENT_ADDR; | ||
237 | hose->dense_mem_base = 0; | ||
238 | hose->sparse_io_base = EISA_IO - IDENT_ADDR; | ||
239 | hose->dense_io_base = 0; | ||
240 | |||
241 | hose->sg_isa = hose->sg_pci = NULL; | ||
242 | __direct_map_base = 0; | ||
243 | __direct_map_size = 0xffffffff; | ||
244 | } | ||
245 | |||
246 | static void | ||
247 | jensen_machine_check (u64 vector, u64 la, struct pt_regs *regs) | ||
248 | { | ||
249 | printk(KERN_CRIT "Machine check\n"); | ||
250 | } | ||
251 | |||
252 | |||
253 | /* | ||
254 | * The System Vector | ||
255 | */ | ||
256 | |||
257 | struct alpha_machine_vector jensen_mv __initmv = { | ||
258 | .vector_name = "Jensen", | ||
259 | DO_EV4_MMU, | ||
260 | IO_LITE(JENSEN,jensen), | ||
261 | .machine_check = jensen_machine_check, | ||
262 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
263 | .rtc_port = 0x170, | ||
264 | |||
265 | .nr_irqs = 16, | ||
266 | .device_interrupt = jensen_device_interrupt, | ||
267 | |||
268 | .init_arch = jensen_init_arch, | ||
269 | .init_irq = jensen_init_irq, | ||
270 | .init_rtc = common_init_rtc, | ||
271 | .init_pci = NULL, | ||
272 | .kill_arch = NULL, | ||
273 | }; | ||
274 | ALIAS_MV(jensen) | ||
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c new file mode 100644 index 000000000000..804727853d25 --- /dev/null +++ b/arch/alpha/kernel/sys_marvel.c | |||
@@ -0,0 +1,499 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_marvel.c | ||
3 | * | ||
4 | * Marvel / IO7 support | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/bitops.h> | ||
14 | |||
15 | #include <asm/ptrace.h> | ||
16 | #include <asm/system.h> | ||
17 | #include <asm/dma.h> | ||
18 | #include <asm/irq.h> | ||
19 | #include <asm/mmu_context.h> | ||
20 | #include <asm/io.h> | ||
21 | #include <asm/pgtable.h> | ||
22 | #include <asm/core_marvel.h> | ||
23 | #include <asm/hwrpb.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | |||
26 | #include "proto.h" | ||
27 | #include "err_impl.h" | ||
28 | #include "irq_impl.h" | ||
29 | #include "pci_impl.h" | ||
30 | #include "machvec_impl.h" | ||
31 | |||
32 | #if NR_IRQS < MARVEL_NR_IRQS | ||
33 | # error NR_IRQS < MARVEL_NR_IRQS !!! | ||
34 | #endif | ||
35 | |||
36 | |||
37 | /* | ||
38 | * Interrupt handling. | ||
39 | */ | ||
40 | static void | ||
41 | io7_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
42 | { | ||
43 | unsigned int pid; | ||
44 | unsigned int irq; | ||
45 | |||
46 | /* | ||
47 | * Vector is 0x800 + (interrupt) | ||
48 | * | ||
49 | * where (interrupt) is: | ||
50 | * | ||
51 | * ...16|15 14|13 4|3 0 | ||
52 | * -----+-----+--------+--- | ||
53 | * PE | 0 | irq | 0 | ||
54 | * | ||
55 | * where (irq) is | ||
56 | * | ||
57 | * 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4) | ||
58 | * 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4) | ||
59 | */ | ||
60 | pid = vector >> 16; | ||
61 | irq = ((vector & 0xffff) - 0x800) >> 4; | ||
62 | |||
63 | irq += 16; /* offset for legacy */ | ||
64 | irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* not too many bits */ | ||
65 | irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */ | ||
66 | |||
67 | handle_irq(irq, regs); | ||
68 | } | ||
69 | |||
70 | static volatile unsigned long * | ||
71 | io7_get_irq_ctl(unsigned int irq, struct io7 **pio7) | ||
72 | { | ||
73 | volatile unsigned long *ctl; | ||
74 | unsigned int pid; | ||
75 | struct io7 *io7; | ||
76 | |||
77 | pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT; | ||
78 | |||
79 | if (!(io7 = marvel_find_io7(pid))) { | ||
80 | printk(KERN_ERR | ||
81 | "%s for nonexistent io7 -- vec %x, pid %d\n", | ||
82 | __FUNCTION__, irq, pid); | ||
83 | return NULL; | ||
84 | } | ||
85 | |||
86 | irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* isolate the vector */ | ||
87 | irq -= 16; /* subtract legacy bias */ | ||
88 | |||
89 | if (irq >= 0x180) { | ||
90 | printk(KERN_ERR | ||
91 | "%s for invalid irq -- pid %d adjusted irq %x\n", | ||
92 | __FUNCTION__, pid, irq); | ||
93 | return NULL; | ||
94 | } | ||
95 | |||
96 | ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */ | ||
97 | if (irq >= 0x80) /* MSI */ | ||
98 | ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr; | ||
99 | |||
100 | if (pio7) *pio7 = io7; | ||
101 | return ctl; | ||
102 | } | ||
103 | |||
104 | static void | ||
105 | io7_enable_irq(unsigned int irq) | ||
106 | { | ||
107 | volatile unsigned long *ctl; | ||
108 | struct io7 *io7; | ||
109 | |||
110 | ctl = io7_get_irq_ctl(irq, &io7); | ||
111 | if (!ctl || !io7) { | ||
112 | printk(KERN_ERR "%s: get_ctl failed for irq %x\n", | ||
113 | __FUNCTION__, irq); | ||
114 | return; | ||
115 | } | ||
116 | |||
117 | spin_lock(&io7->irq_lock); | ||
118 | *ctl |= 1UL << 24; | ||
119 | mb(); | ||
120 | *ctl; | ||
121 | spin_unlock(&io7->irq_lock); | ||
122 | } | ||
123 | |||
124 | static void | ||
125 | io7_disable_irq(unsigned int irq) | ||
126 | { | ||
127 | volatile unsigned long *ctl; | ||
128 | struct io7 *io7; | ||
129 | |||
130 | ctl = io7_get_irq_ctl(irq, &io7); | ||
131 | if (!ctl || !io7) { | ||
132 | printk(KERN_ERR "%s: get_ctl failed for irq %x\n", | ||
133 | __FUNCTION__, irq); | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | spin_lock(&io7->irq_lock); | ||
138 | *ctl &= ~(1UL << 24); | ||
139 | mb(); | ||
140 | *ctl; | ||
141 | spin_unlock(&io7->irq_lock); | ||
142 | } | ||
143 | |||
144 | static unsigned int | ||
145 | io7_startup_irq(unsigned int irq) | ||
146 | { | ||
147 | io7_enable_irq(irq); | ||
148 | return 0; /* never anything pending */ | ||
149 | } | ||
150 | |||
151 | static void | ||
152 | io7_end_irq(unsigned int irq) | ||
153 | { | ||
154 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
155 | io7_enable_irq(irq); | ||
156 | } | ||
157 | |||
158 | static void | ||
159 | marvel_irq_noop(unsigned int irq) | ||
160 | { | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | static unsigned int | ||
165 | marvel_irq_noop_return(unsigned int irq) | ||
166 | { | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static struct hw_interrupt_type marvel_legacy_irq_type = { | ||
171 | .typename = "LEGACY", | ||
172 | .startup = marvel_irq_noop_return, | ||
173 | .shutdown = marvel_irq_noop, | ||
174 | .enable = marvel_irq_noop, | ||
175 | .disable = marvel_irq_noop, | ||
176 | .ack = marvel_irq_noop, | ||
177 | .end = marvel_irq_noop, | ||
178 | }; | ||
179 | |||
180 | static struct hw_interrupt_type io7_lsi_irq_type = { | ||
181 | .typename = "LSI", | ||
182 | .startup = io7_startup_irq, | ||
183 | .shutdown = io7_disable_irq, | ||
184 | .enable = io7_enable_irq, | ||
185 | .disable = io7_disable_irq, | ||
186 | .ack = io7_disable_irq, | ||
187 | .end = io7_end_irq, | ||
188 | }; | ||
189 | |||
190 | static struct hw_interrupt_type io7_msi_irq_type = { | ||
191 | .typename = "MSI", | ||
192 | .startup = io7_startup_irq, | ||
193 | .shutdown = io7_disable_irq, | ||
194 | .enable = io7_enable_irq, | ||
195 | .disable = io7_disable_irq, | ||
196 | .ack = marvel_irq_noop, | ||
197 | .end = io7_end_irq, | ||
198 | }; | ||
199 | |||
200 | static void | ||
201 | io7_redirect_irq(struct io7 *io7, | ||
202 | volatile unsigned long *csr, | ||
203 | unsigned int where) | ||
204 | { | ||
205 | unsigned long val; | ||
206 | |||
207 | val = *csr; | ||
208 | val &= ~(0x1ffUL << 24); /* clear the target pid */ | ||
209 | val |= ((unsigned long)where << 24); /* set the new target pid */ | ||
210 | |||
211 | *csr = val; | ||
212 | mb(); | ||
213 | *csr; | ||
214 | } | ||
215 | |||
216 | static void | ||
217 | io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where) | ||
218 | { | ||
219 | unsigned long val; | ||
220 | |||
221 | /* | ||
222 | * LSI_CTL has target PID @ 14 | ||
223 | */ | ||
224 | val = io7->csrs->PO7_LSI_CTL[which].csr; | ||
225 | val &= ~(0x1ffUL << 14); /* clear the target pid */ | ||
226 | val |= ((unsigned long)where << 14); /* set the new target pid */ | ||
227 | |||
228 | io7->csrs->PO7_LSI_CTL[which].csr = val; | ||
229 | mb(); | ||
230 | io7->csrs->PO7_LSI_CTL[which].csr; | ||
231 | } | ||
232 | |||
233 | static void | ||
234 | io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where) | ||
235 | { | ||
236 | unsigned long val; | ||
237 | |||
238 | /* | ||
239 | * MSI_CTL has target PID @ 14 | ||
240 | */ | ||
241 | val = io7->csrs->PO7_MSI_CTL[which].csr; | ||
242 | val &= ~(0x1ffUL << 14); /* clear the target pid */ | ||
243 | val |= ((unsigned long)where << 14); /* set the new target pid */ | ||
244 | |||
245 | io7->csrs->PO7_MSI_CTL[which].csr = val; | ||
246 | mb(); | ||
247 | io7->csrs->PO7_MSI_CTL[which].csr; | ||
248 | } | ||
249 | |||
250 | static void __init | ||
251 | init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where) | ||
252 | { | ||
253 | /* | ||
254 | * LSI_CTL has target PID @ 14 | ||
255 | */ | ||
256 | io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14); | ||
257 | mb(); | ||
258 | io7->csrs->PO7_LSI_CTL[which].csr; | ||
259 | } | ||
260 | |||
261 | static void __init | ||
262 | init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where) | ||
263 | { | ||
264 | /* | ||
265 | * MSI_CTL has target PID @ 14 | ||
266 | */ | ||
267 | io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14); | ||
268 | mb(); | ||
269 | io7->csrs->PO7_MSI_CTL[which].csr; | ||
270 | } | ||
271 | |||
272 | static void __init | ||
273 | init_io7_irqs(struct io7 *io7, | ||
274 | struct hw_interrupt_type *lsi_ops, | ||
275 | struct hw_interrupt_type *msi_ops) | ||
276 | { | ||
277 | long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16; | ||
278 | long i; | ||
279 | |||
280 | printk("Initializing interrupts for IO7 at PE %u - base %lx\n", | ||
281 | io7->pe, base); | ||
282 | |||
283 | /* | ||
284 | * Where should interrupts from this IO7 go? | ||
285 | * | ||
286 | * They really should be sent to the local CPU to avoid having to | ||
287 | * traverse the mesh, but if it's not an SMP kernel, they have to | ||
288 | * go to the boot CPU. Send them all to the boot CPU for now, | ||
289 | * as each secondary starts, it can redirect it's local device | ||
290 | * interrupts. | ||
291 | */ | ||
292 | printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid); | ||
293 | |||
294 | spin_lock(&io7->irq_lock); | ||
295 | |||
296 | /* set up the error irqs */ | ||
297 | io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid); | ||
298 | io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid); | ||
299 | io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid); | ||
300 | io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid); | ||
301 | io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid); | ||
302 | |||
303 | /* Set up the lsi irqs. */ | ||
304 | for (i = 0; i < 128; ++i) { | ||
305 | irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
306 | irq_desc[base + i].handler = lsi_ops; | ||
307 | } | ||
308 | |||
309 | /* Disable the implemented irqs in hardware. */ | ||
310 | for (i = 0; i < 0x60; ++i) | ||
311 | init_one_io7_lsi(io7, i, boot_cpuid); | ||
312 | |||
313 | init_one_io7_lsi(io7, 0x74, boot_cpuid); | ||
314 | init_one_io7_lsi(io7, 0x75, boot_cpuid); | ||
315 | |||
316 | |||
317 | /* Set up the msi irqs. */ | ||
318 | for (i = 128; i < (128 + 512); ++i) { | ||
319 | irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
320 | irq_desc[base + i].handler = msi_ops; | ||
321 | } | ||
322 | |||
323 | for (i = 0; i < 16; ++i) | ||
324 | init_one_io7_msi(io7, i, boot_cpuid); | ||
325 | |||
326 | spin_unlock(&io7->irq_lock); | ||
327 | } | ||
328 | |||
329 | static void __init | ||
330 | marvel_init_irq(void) | ||
331 | { | ||
332 | int i; | ||
333 | struct io7 *io7 = NULL; | ||
334 | |||
335 | /* Reserve the legacy irqs. */ | ||
336 | for (i = 0; i < 16; ++i) { | ||
337 | irq_desc[i].status = IRQ_DISABLED; | ||
338 | irq_desc[i].handler = &marvel_legacy_irq_type; | ||
339 | } | ||
340 | |||
341 | /* Init the io7 irqs. */ | ||
342 | for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) | ||
343 | init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type); | ||
344 | } | ||
345 | |||
346 | static int | ||
347 | marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
348 | { | ||
349 | struct pci_controller *hose = dev->sysdata; | ||
350 | struct io7_port *io7_port = hose->sysdata; | ||
351 | struct io7 *io7 = io7_port->io7; | ||
352 | int msi_loc, msi_data_off; | ||
353 | u16 msg_ctl; | ||
354 | u16 msg_dat; | ||
355 | u8 intline; | ||
356 | int irq; | ||
357 | |||
358 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); | ||
359 | irq = intline; | ||
360 | |||
361 | msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
362 | msg_ctl = 0; | ||
363 | if (msi_loc) | ||
364 | pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl); | ||
365 | |||
366 | if (msg_ctl & PCI_MSI_FLAGS_ENABLE) { | ||
367 | msi_data_off = PCI_MSI_DATA_32; | ||
368 | if (msg_ctl & PCI_MSI_FLAGS_64BIT) | ||
369 | msi_data_off = PCI_MSI_DATA_64; | ||
370 | pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat); | ||
371 | |||
372 | irq = msg_dat & 0x1ff; /* we use msg_data<8:0> */ | ||
373 | irq += 0x80; /* offset for lsi */ | ||
374 | |||
375 | #if 1 | ||
376 | printk("PCI:%d:%d:%d (hose %d) [%s] is using MSI\n", | ||
377 | dev->bus->number, | ||
378 | PCI_SLOT(dev->devfn), | ||
379 | PCI_FUNC(dev->devfn), | ||
380 | hose->index, | ||
381 | pci_pretty_name (dev)); | ||
382 | printk(" %d message(s) from 0x%04x\n", | ||
383 | 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4), | ||
384 | msg_dat); | ||
385 | printk(" reporting on %d IRQ(s) from %d (0x%x)\n", | ||
386 | 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4), | ||
387 | (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT), | ||
388 | (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT)); | ||
389 | #endif | ||
390 | |||
391 | #if 0 | ||
392 | pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS, | ||
393 | msg_ctl & ~PCI_MSI_FLAGS_ENABLE); | ||
394 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); | ||
395 | irq = intline; | ||
396 | |||
397 | printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq); | ||
398 | #endif | ||
399 | } | ||
400 | |||
401 | irq += 16; /* offset for legacy */ | ||
402 | irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */ | ||
403 | |||
404 | return irq; | ||
405 | } | ||
406 | |||
407 | static void __init | ||
408 | marvel_init_pci(void) | ||
409 | { | ||
410 | struct io7 *io7; | ||
411 | |||
412 | marvel_register_error_handlers(); | ||
413 | |||
414 | pci_probe_only = 1; | ||
415 | common_init_pci(); | ||
416 | |||
417 | #ifdef CONFIG_VGA_HOSE | ||
418 | locate_and_init_vga(NULL); | ||
419 | #endif | ||
420 | |||
421 | /* Clear any io7 errors. */ | ||
422 | for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) | ||
423 | io7_clear_errors(io7); | ||
424 | } | ||
425 | |||
426 | static void | ||
427 | marvel_init_rtc(void) | ||
428 | { | ||
429 | init_rtc_irq(); | ||
430 | } | ||
431 | |||
432 | static void | ||
433 | marvel_smp_callin(void) | ||
434 | { | ||
435 | int cpuid = hard_smp_processor_id(); | ||
436 | struct io7 *io7 = marvel_find_io7(cpuid); | ||
437 | unsigned int i; | ||
438 | |||
439 | if (!io7) | ||
440 | return; | ||
441 | |||
442 | /* | ||
443 | * There is a local IO7 - redirect all of its interrupts here. | ||
444 | */ | ||
445 | printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid); | ||
446 | |||
447 | /* Redirect the error IRQS here. */ | ||
448 | io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid); | ||
449 | io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid); | ||
450 | io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid); | ||
451 | io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid); | ||
452 | io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid); | ||
453 | |||
454 | /* Redirect the implemented LSIs here. */ | ||
455 | for (i = 0; i < 0x60; ++i) | ||
456 | io7_redirect_one_lsi(io7, i, cpuid); | ||
457 | |||
458 | io7_redirect_one_lsi(io7, 0x74, cpuid); | ||
459 | io7_redirect_one_lsi(io7, 0x75, cpuid); | ||
460 | |||
461 | /* Redirect the MSIs here. */ | ||
462 | for (i = 0; i < 16; ++i) | ||
463 | io7_redirect_one_msi(io7, i, cpuid); | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * System Vectors | ||
468 | */ | ||
469 | struct alpha_machine_vector marvel_ev7_mv __initmv = { | ||
470 | .vector_name = "MARVEL/EV7", | ||
471 | DO_EV7_MMU, | ||
472 | DO_DEFAULT_RTC, | ||
473 | DO_MARVEL_IO, | ||
474 | .machine_check = marvel_machine_check, | ||
475 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
476 | .min_io_address = DEFAULT_IO_BASE, | ||
477 | .min_mem_address = DEFAULT_MEM_BASE, | ||
478 | .pci_dac_offset = IO7_DAC_OFFSET, | ||
479 | |||
480 | .nr_irqs = MARVEL_NR_IRQS, | ||
481 | .device_interrupt = io7_device_interrupt, | ||
482 | |||
483 | .agp_info = marvel_agp_info, | ||
484 | |||
485 | .smp_callin = marvel_smp_callin, | ||
486 | .init_arch = marvel_init_arch, | ||
487 | .init_irq = marvel_init_irq, | ||
488 | .init_rtc = marvel_init_rtc, | ||
489 | .init_pci = marvel_init_pci, | ||
490 | .kill_arch = marvel_kill_arch, | ||
491 | .pci_map_irq = marvel_map_irq, | ||
492 | .pci_swizzle = common_swizzle, | ||
493 | |||
494 | .pa_to_nid = marvel_pa_to_nid, | ||
495 | .cpuid_to_nid = marvel_cpuid_to_nid, | ||
496 | .node_mem_start = marvel_node_mem_start, | ||
497 | .node_mem_size = marvel_node_mem_size, | ||
498 | }; | ||
499 | ALIAS_MV(marvel_ev7) | ||
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c new file mode 100644 index 000000000000..61ac56f8eeea --- /dev/null +++ b/arch/alpha/kernel/sys_miata.c | |||
@@ -0,0 +1,289 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_miata.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999, 2000 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the MIATA (EV56+PYXIS). | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/reboot.h> | ||
18 | |||
19 | #include <asm/ptrace.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/dma.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/mmu_context.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/core_cia.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | #include "proto.h" | ||
30 | #include "irq_impl.h" | ||
31 | #include "pci_impl.h" | ||
32 | #include "machvec_impl.h" | ||
33 | |||
34 | |||
35 | static void | ||
36 | miata_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
37 | { | ||
38 | int irq; | ||
39 | |||
40 | irq = (vector - 0x800) >> 4; | ||
41 | |||
42 | /* | ||
43 | * I really hate to do this, but the MIATA SRM console ignores the | ||
44 | * low 8 bits in the interrupt summary register, and reports the | ||
45 | * vector 0x80 *lower* than I expected from the bit numbering in | ||
46 | * the documentation. | ||
47 | * This was done because the low 8 summary bits really aren't used | ||
48 | * for reporting any interrupts (the PCI-ISA bridge, bit 7, isn't | ||
49 | * used for this purpose, as PIC interrupts are delivered as the | ||
50 | * vectors 0x800-0x8f0). | ||
51 | * But I really don't want to change the fixup code for allocation | ||
52 | * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which | ||
53 | * look nice and clean now. | ||
54 | * So, here's this grotty hack... :-( | ||
55 | */ | ||
56 | if (irq >= 16) | ||
57 | irq = irq + 8; | ||
58 | |||
59 | handle_irq(irq, regs); | ||
60 | } | ||
61 | |||
62 | static void __init | ||
63 | miata_init_irq(void) | ||
64 | { | ||
65 | if (alpha_using_srm) | ||
66 | alpha_mv.device_interrupt = miata_srm_device_interrupt; | ||
67 | |||
68 | #if 0 | ||
69 | /* These break on MiataGL so we'll try not to do it at all. */ | ||
70 | *(vulp)PYXIS_INT_HILO = 0x000000B2UL; mb(); /* ISA/NMI HI */ | ||
71 | *(vulp)PYXIS_RT_COUNT = 0UL; mb(); /* clear count */ | ||
72 | #endif | ||
73 | |||
74 | init_i8259a_irqs(); | ||
75 | |||
76 | /* Not interested in the bogus interrupts (3,10), Fan Fault (0), | ||
77 | NMI (1), or EIDE (9). | ||
78 | |||
79 | We also disable the risers (4,5), since we don't know how to | ||
80 | route the interrupts behind the bridge. */ | ||
81 | init_pyxis_irqs(0x63b0000); | ||
82 | |||
83 | common_init_isa_dma(); | ||
84 | setup_irq(16+2, &halt_switch_irqaction); /* SRM only? */ | ||
85 | setup_irq(16+6, &timer_cascade_irqaction); | ||
86 | } | ||
87 | |||
88 | |||
89 | /* | ||
90 | * PCI Fixup configuration. | ||
91 | * | ||
92 | * Summary @ PYXIS_INT_REQ: | ||
93 | * Bit Meaning | ||
94 | * 0 Fan Fault | ||
95 | * 1 NMI | ||
96 | * 2 Halt/Reset switch | ||
97 | * 3 none | ||
98 | * 4 CID0 (Riser ID) | ||
99 | * 5 CID1 (Riser ID) | ||
100 | * 6 Interval timer | ||
101 | * 7 PCI-ISA Bridge | ||
102 | * 8 Ethernet | ||
103 | * 9 EIDE (deprecated, ISA 14/15 used) | ||
104 | *10 none | ||
105 | *11 USB | ||
106 | *12 Interrupt Line A from slot 4 | ||
107 | *13 Interrupt Line B from slot 4 | ||
108 | *14 Interrupt Line C from slot 4 | ||
109 | *15 Interrupt Line D from slot 4 | ||
110 | *16 Interrupt Line A from slot 5 | ||
111 | *17 Interrupt line B from slot 5 | ||
112 | *18 Interrupt Line C from slot 5 | ||
113 | *19 Interrupt Line D from slot 5 | ||
114 | *20 Interrupt Line A from slot 1 | ||
115 | *21 Interrupt Line B from slot 1 | ||
116 | *22 Interrupt Line C from slot 1 | ||
117 | *23 Interrupt Line D from slot 1 | ||
118 | *24 Interrupt Line A from slot 2 | ||
119 | *25 Interrupt Line B from slot 2 | ||
120 | *26 Interrupt Line C from slot 2 | ||
121 | *27 Interrupt Line D from slot 2 | ||
122 | *27 Interrupt Line A from slot 3 | ||
123 | *29 Interrupt Line B from slot 3 | ||
124 | *30 Interrupt Line C from slot 3 | ||
125 | *31 Interrupt Line D from slot 3 | ||
126 | * | ||
127 | * The device to slot mapping looks like: | ||
128 | * | ||
129 | * Slot Device | ||
130 | * 3 DC21142 Ethernet | ||
131 | * 4 EIDE CMD646 | ||
132 | * 5 none | ||
133 | * 6 USB | ||
134 | * 7 PCI-ISA bridge | ||
135 | * 8 PCI-PCI Bridge (SBU Riser) | ||
136 | * 9 none | ||
137 | * 10 none | ||
138 | * 11 PCI on board slot 4 (SBU Riser) | ||
139 | * 12 PCI on board slot 5 (SBU Riser) | ||
140 | * | ||
141 | * These are behind the bridge, so I'm not sure what to do... | ||
142 | * | ||
143 | * 13 PCI on board slot 1 (SBU Riser) | ||
144 | * 14 PCI on board slot 2 (SBU Riser) | ||
145 | * 15 PCI on board slot 3 (SBU Riser) | ||
146 | * | ||
147 | * | ||
148 | * This two layered interrupt approach means that we allocate IRQ 16 and | ||
149 | * above for PCI interrupts. The IRQ relates to which bit the interrupt | ||
150 | * comes in on. This makes interrupt processing much easier. | ||
151 | */ | ||
152 | |||
153 | static int __init | ||
154 | miata_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
155 | { | ||
156 | static char irq_tab[18][5] __initdata = { | ||
157 | /*INT INTA INTB INTC INTD */ | ||
158 | {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */ | ||
159 | { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */ | ||
160 | { -1, -1, -1, -1, -1}, /* IdSel 16, none */ | ||
161 | { -1, -1, -1, -1, -1}, /* IdSel 17, none */ | ||
162 | { -1, -1, -1, -1, -1}, /* IdSel 18, PCI-ISA */ | ||
163 | { -1, -1, -1, -1, -1}, /* IdSel 19, PCI-PCI */ | ||
164 | { -1, -1, -1, -1, -1}, /* IdSel 20, none */ | ||
165 | { -1, -1, -1, -1, -1}, /* IdSel 21, none */ | ||
166 | {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 22, slot 4 */ | ||
167 | {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 23, slot 5 */ | ||
168 | /* the next 7 are actually on PCI bus 1, across the bridge */ | ||
169 | {16+11, 16+11, 16+11, 16+11, 16+11}, /* IdSel 24, QLISP/GL*/ | ||
170 | { -1, -1, -1, -1, -1}, /* IdSel 25, none */ | ||
171 | { -1, -1, -1, -1, -1}, /* IdSel 26, none */ | ||
172 | { -1, -1, -1, -1, -1}, /* IdSel 27, none */ | ||
173 | {16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 28, slot 1 */ | ||
174 | {16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 29, slot 2 */ | ||
175 | {16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 30, slot 3 */ | ||
176 | /* This bridge is on the main bus of the later orig MIATA */ | ||
177 | { -1, -1, -1, -1, -1}, /* IdSel 31, PCI-PCI */ | ||
178 | }; | ||
179 | const long min_idsel = 3, max_idsel = 20, irqs_per_slot = 5; | ||
180 | |||
181 | /* the USB function of the 82c693 has it's interrupt connected to | ||
182 | the 2nd 8259 controller. So we have to check for it first. */ | ||
183 | |||
184 | if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) { | ||
185 | u8 irq=0; | ||
186 | |||
187 | if(pci_read_config_byte(pci_find_slot(dev->bus->number, dev->devfn & ~(7)), 0x40,&irq)!=PCIBIOS_SUCCESSFUL) | ||
188 | return -1; | ||
189 | else | ||
190 | return irq; | ||
191 | } | ||
192 | |||
193 | return COMMON_TABLE_LOOKUP; | ||
194 | } | ||
195 | |||
196 | static u8 __init | ||
197 | miata_swizzle(struct pci_dev *dev, u8 *pinp) | ||
198 | { | ||
199 | int slot, pin = *pinp; | ||
200 | |||
201 | if (dev->bus->number == 0) { | ||
202 | slot = PCI_SLOT(dev->devfn); | ||
203 | } | ||
204 | /* Check for the built-in bridge. */ | ||
205 | else if ((PCI_SLOT(dev->bus->self->devfn) == 8) || | ||
206 | (PCI_SLOT(dev->bus->self->devfn) == 20)) { | ||
207 | slot = PCI_SLOT(dev->devfn) + 9; | ||
208 | } | ||
209 | else | ||
210 | { | ||
211 | /* Must be a card-based bridge. */ | ||
212 | do { | ||
213 | if ((PCI_SLOT(dev->bus->self->devfn) == 8) || | ||
214 | (PCI_SLOT(dev->bus->self->devfn) == 20)) { | ||
215 | slot = PCI_SLOT(dev->devfn) + 9; | ||
216 | break; | ||
217 | } | ||
218 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); | ||
219 | |||
220 | /* Move up the chain of bridges. */ | ||
221 | dev = dev->bus->self; | ||
222 | /* Slot of the next bridge. */ | ||
223 | slot = PCI_SLOT(dev->devfn); | ||
224 | } while (dev->bus->self); | ||
225 | } | ||
226 | *pinp = pin; | ||
227 | return slot; | ||
228 | } | ||
229 | |||
230 | static void __init | ||
231 | miata_init_pci(void) | ||
232 | { | ||
233 | cia_init_pci(); | ||
234 | SMC669_Init(0); /* it might be a GL (fails harmlessly if not) */ | ||
235 | es1888_init(); | ||
236 | } | ||
237 | |||
238 | static void | ||
239 | miata_kill_arch(int mode) | ||
240 | { | ||
241 | cia_kill_arch(mode); | ||
242 | |||
243 | #ifndef ALPHA_RESTORE_SRM_SETUP | ||
244 | switch(mode) { | ||
245 | case LINUX_REBOOT_CMD_RESTART: | ||
246 | /* Who said DEC engineers have no sense of humor? ;-) */ | ||
247 | if (alpha_using_srm) { | ||
248 | *(vuip) PYXIS_RESET = 0x0000dead; | ||
249 | mb(); | ||
250 | } | ||
251 | break; | ||
252 | case LINUX_REBOOT_CMD_HALT: | ||
253 | break; | ||
254 | case LINUX_REBOOT_CMD_POWER_OFF: | ||
255 | break; | ||
256 | } | ||
257 | |||
258 | halt(); | ||
259 | #endif | ||
260 | } | ||
261 | |||
262 | |||
263 | /* | ||
264 | * The System Vector | ||
265 | */ | ||
266 | |||
267 | struct alpha_machine_vector miata_mv __initmv = { | ||
268 | .vector_name = "Miata", | ||
269 | DO_EV5_MMU, | ||
270 | DO_DEFAULT_RTC, | ||
271 | DO_PYXIS_IO, | ||
272 | .machine_check = cia_machine_check, | ||
273 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
274 | .min_io_address = DEFAULT_IO_BASE, | ||
275 | .min_mem_address = DEFAULT_MEM_BASE, | ||
276 | .pci_dac_offset = PYXIS_DAC_OFFSET, | ||
277 | |||
278 | .nr_irqs = 48, | ||
279 | .device_interrupt = pyxis_device_interrupt, | ||
280 | |||
281 | .init_arch = pyxis_init_arch, | ||
282 | .init_irq = miata_init_irq, | ||
283 | .init_rtc = common_init_rtc, | ||
284 | .init_pci = miata_init_pci, | ||
285 | .kill_arch = miata_kill_arch, | ||
286 | .pci_map_irq = miata_map_irq, | ||
287 | .pci_swizzle = miata_swizzle, | ||
288 | }; | ||
289 | ALIAS_MV(miata) | ||
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c new file mode 100644 index 000000000000..d78a0daa6168 --- /dev/null +++ b/arch/alpha/kernel/sys_mikasa.c | |||
@@ -0,0 +1,265 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_mikasa.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the MIKASA (AlphaServer 1000). | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/bitops.h> | ||
19 | |||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/system.h> | ||
22 | #include <asm/dma.h> | ||
23 | #include <asm/irq.h> | ||
24 | #include <asm/mmu_context.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/core_apecs.h> | ||
28 | #include <asm/core_cia.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | |||
31 | #include "proto.h" | ||
32 | #include "irq_impl.h" | ||
33 | #include "pci_impl.h" | ||
34 | #include "machvec_impl.h" | ||
35 | |||
36 | |||
37 | /* Note mask bit is true for ENABLED irqs. */ | ||
38 | static int cached_irq_mask; | ||
39 | |||
40 | static inline void | ||
41 | mikasa_update_irq_hw(int mask) | ||
42 | { | ||
43 | outw(mask, 0x536); | ||
44 | } | ||
45 | |||
46 | static inline void | ||
47 | mikasa_enable_irq(unsigned int irq) | ||
48 | { | ||
49 | mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); | ||
50 | } | ||
51 | |||
52 | static void | ||
53 | mikasa_disable_irq(unsigned int irq) | ||
54 | { | ||
55 | mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); | ||
56 | } | ||
57 | |||
58 | static unsigned int | ||
59 | mikasa_startup_irq(unsigned int irq) | ||
60 | { | ||
61 | mikasa_enable_irq(irq); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static void | ||
66 | mikasa_end_irq(unsigned int irq) | ||
67 | { | ||
68 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
69 | mikasa_enable_irq(irq); | ||
70 | } | ||
71 | |||
72 | static struct hw_interrupt_type mikasa_irq_type = { | ||
73 | .typename = "MIKASA", | ||
74 | .startup = mikasa_startup_irq, | ||
75 | .shutdown = mikasa_disable_irq, | ||
76 | .enable = mikasa_enable_irq, | ||
77 | .disable = mikasa_disable_irq, | ||
78 | .ack = mikasa_disable_irq, | ||
79 | .end = mikasa_end_irq, | ||
80 | }; | ||
81 | |||
82 | static void | ||
83 | mikasa_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
84 | { | ||
85 | unsigned long pld; | ||
86 | unsigned int i; | ||
87 | |||
88 | /* Read the interrupt summary registers */ | ||
89 | pld = (((~inw(0x534) & 0x0000ffffUL) << 16) | ||
90 | | (((unsigned long) inb(0xa0)) << 8) | ||
91 | | inb(0x20)); | ||
92 | |||
93 | /* | ||
94 | * Now for every possible bit set, work through them and call | ||
95 | * the appropriate interrupt handler. | ||
96 | */ | ||
97 | while (pld) { | ||
98 | i = ffz(~pld); | ||
99 | pld &= pld - 1; /* clear least bit set */ | ||
100 | if (i < 16) { | ||
101 | isa_device_interrupt(vector, regs); | ||
102 | } else { | ||
103 | handle_irq(i, regs); | ||
104 | } | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void __init | ||
109 | mikasa_init_irq(void) | ||
110 | { | ||
111 | long i; | ||
112 | |||
113 | if (alpha_using_srm) | ||
114 | alpha_mv.device_interrupt = srm_device_interrupt; | ||
115 | |||
116 | mikasa_update_irq_hw(0); | ||
117 | |||
118 | for (i = 16; i < 32; ++i) { | ||
119 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
120 | irq_desc[i].handler = &mikasa_irq_type; | ||
121 | } | ||
122 | |||
123 | init_i8259a_irqs(); | ||
124 | common_init_isa_dma(); | ||
125 | } | ||
126 | |||
127 | |||
128 | /* | ||
129 | * PCI Fixup configuration. | ||
130 | * | ||
131 | * Summary @ 0x536: | ||
132 | * Bit Meaning | ||
133 | * 0 Interrupt Line A from slot 0 | ||
134 | * 1 Interrupt Line B from slot 0 | ||
135 | * 2 Interrupt Line C from slot 0 | ||
136 | * 3 Interrupt Line D from slot 0 | ||
137 | * 4 Interrupt Line A from slot 1 | ||
138 | * 5 Interrupt line B from slot 1 | ||
139 | * 6 Interrupt Line C from slot 1 | ||
140 | * 7 Interrupt Line D from slot 1 | ||
141 | * 8 Interrupt Line A from slot 2 | ||
142 | * 9 Interrupt Line B from slot 2 | ||
143 | *10 Interrupt Line C from slot 2 | ||
144 | *11 Interrupt Line D from slot 2 | ||
145 | *12 NCR 810 SCSI | ||
146 | *13 Power Supply Fail | ||
147 | *14 Temperature Warn | ||
148 | *15 Reserved | ||
149 | * | ||
150 | * The device to slot mapping looks like: | ||
151 | * | ||
152 | * Slot Device | ||
153 | * 6 NCR SCSI controller | ||
154 | * 7 Intel PCI-EISA bridge chip | ||
155 | * 11 PCI on board slot 0 | ||
156 | * 12 PCI on board slot 1 | ||
157 | * 13 PCI on board slot 2 | ||
158 | * | ||
159 | * | ||
160 | * This two layered interrupt approach means that we allocate IRQ 16 and | ||
161 | * above for PCI interrupts. The IRQ relates to which bit the interrupt | ||
162 | * comes in on. This makes interrupt processing much easier. | ||
163 | */ | ||
164 | |||
165 | static int __init | ||
166 | mikasa_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
167 | { | ||
168 | static char irq_tab[8][5] __initdata = { | ||
169 | /*INT INTA INTB INTC INTD */ | ||
170 | {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */ | ||
171 | { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ | ||
172 | { -1, -1, -1, -1, -1}, /* IdSel 19, ???? */ | ||
173 | { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */ | ||
174 | { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */ | ||
175 | { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 0 */ | ||
176 | { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ | ||
177 | { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 24, slot 2 */ | ||
178 | }; | ||
179 | const long min_idsel = 6, max_idsel = 13, irqs_per_slot = 5; | ||
180 | return COMMON_TABLE_LOOKUP; | ||
181 | } | ||
182 | |||
183 | |||
184 | #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) | ||
185 | static void | ||
186 | mikasa_apecs_machine_check(unsigned long vector, unsigned long la_ptr, | ||
187 | struct pt_regs * regs) | ||
188 | { | ||
189 | #define MCHK_NO_DEVSEL 0x205U | ||
190 | #define MCHK_NO_TABT 0x204U | ||
191 | |||
192 | struct el_common *mchk_header; | ||
193 | unsigned int code; | ||
194 | |||
195 | mchk_header = (struct el_common *)la_ptr; | ||
196 | |||
197 | /* Clear the error before any reporting. */ | ||
198 | mb(); | ||
199 | mb(); /* magic */ | ||
200 | draina(); | ||
201 | apecs_pci_clr_err(); | ||
202 | wrmces(0x7); | ||
203 | mb(); | ||
204 | |||
205 | code = mchk_header->code; | ||
206 | process_mcheck_info(vector, la_ptr, regs, "MIKASA APECS", | ||
207 | (mcheck_expected(0) | ||
208 | && (code == MCHK_NO_DEVSEL | ||
209 | || code == MCHK_NO_TABT))); | ||
210 | } | ||
211 | #endif | ||
212 | |||
213 | |||
214 | /* | ||
215 | * The System Vector | ||
216 | */ | ||
217 | |||
218 | #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) | ||
219 | struct alpha_machine_vector mikasa_mv __initmv = { | ||
220 | .vector_name = "Mikasa", | ||
221 | DO_EV4_MMU, | ||
222 | DO_DEFAULT_RTC, | ||
223 | DO_APECS_IO, | ||
224 | .machine_check = mikasa_apecs_machine_check, | ||
225 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
226 | .min_io_address = DEFAULT_IO_BASE, | ||
227 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
228 | |||
229 | .nr_irqs = 32, | ||
230 | .device_interrupt = mikasa_device_interrupt, | ||
231 | |||
232 | .init_arch = apecs_init_arch, | ||
233 | .init_irq = mikasa_init_irq, | ||
234 | .init_rtc = common_init_rtc, | ||
235 | .init_pci = common_init_pci, | ||
236 | .pci_map_irq = mikasa_map_irq, | ||
237 | .pci_swizzle = common_swizzle, | ||
238 | }; | ||
239 | ALIAS_MV(mikasa) | ||
240 | #endif | ||
241 | |||
242 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO) | ||
243 | struct alpha_machine_vector mikasa_primo_mv __initmv = { | ||
244 | .vector_name = "Mikasa-Primo", | ||
245 | DO_EV5_MMU, | ||
246 | DO_DEFAULT_RTC, | ||
247 | DO_CIA_IO, | ||
248 | .machine_check = cia_machine_check, | ||
249 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
250 | .min_io_address = DEFAULT_IO_BASE, | ||
251 | .min_mem_address = CIA_DEFAULT_MEM_BASE, | ||
252 | |||
253 | .nr_irqs = 32, | ||
254 | .device_interrupt = mikasa_device_interrupt, | ||
255 | |||
256 | .init_arch = cia_init_arch, | ||
257 | .init_irq = mikasa_init_irq, | ||
258 | .init_rtc = common_init_rtc, | ||
259 | .init_pci = cia_init_pci, | ||
260 | .kill_arch = cia_kill_arch, | ||
261 | .pci_map_irq = mikasa_map_irq, | ||
262 | .pci_swizzle = common_swizzle, | ||
263 | }; | ||
264 | ALIAS_MV(mikasa_primo) | ||
265 | #endif | ||
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c new file mode 100644 index 000000000000..c0d696efec5b --- /dev/null +++ b/arch/alpha/kernel/sys_nautilus.c | |||
@@ -0,0 +1,269 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_nautilus.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1998 Richard Henderson | ||
6 | * Copyright (C) 1999 Alpha Processor, Inc., | ||
7 | * (David Daniel, Stig Telfer, Soohoon Lee) | ||
8 | * | ||
9 | * Code supporting NAUTILUS systems. | ||
10 | * | ||
11 | * | ||
12 | * NAUTILUS has the following I/O features: | ||
13 | * | ||
14 | * a) Driven by AMD 751 aka IRONGATE (northbridge): | ||
15 | * 4 PCI slots | ||
16 | * 1 AGP slot | ||
17 | * | ||
18 | * b) Driven by ALI M1543C (southbridge) | ||
19 | * 2 ISA slots | ||
20 | * 2 IDE connectors | ||
21 | * 1 dual drive capable FDD controller | ||
22 | * 2 serial ports | ||
23 | * 1 ECP/EPP/SP parallel port | ||
24 | * 2 USB ports | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/reboot.h> | ||
34 | #include <linux/bootmem.h> | ||
35 | #include <linux/bitops.h> | ||
36 | |||
37 | #include <asm/ptrace.h> | ||
38 | #include <asm/system.h> | ||
39 | #include <asm/dma.h> | ||
40 | #include <asm/irq.h> | ||
41 | #include <asm/mmu_context.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/pci.h> | ||
44 | #include <asm/pgtable.h> | ||
45 | #include <asm/core_irongate.h> | ||
46 | #include <asm/hwrpb.h> | ||
47 | #include <asm/tlbflush.h> | ||
48 | |||
49 | #include "proto.h" | ||
50 | #include "err_impl.h" | ||
51 | #include "irq_impl.h" | ||
52 | #include "pci_impl.h" | ||
53 | #include "machvec_impl.h" | ||
54 | |||
55 | |||
56 | static void __init | ||
57 | nautilus_init_irq(void) | ||
58 | { | ||
59 | if (alpha_using_srm) { | ||
60 | alpha_mv.device_interrupt = srm_device_interrupt; | ||
61 | } | ||
62 | |||
63 | init_i8259a_irqs(); | ||
64 | common_init_isa_dma(); | ||
65 | } | ||
66 | |||
67 | static int __init | ||
68 | nautilus_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
69 | { | ||
70 | /* Preserve the IRQ set up by the console. */ | ||
71 | |||
72 | u8 irq; | ||
73 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); | ||
74 | return irq; | ||
75 | } | ||
76 | |||
77 | void | ||
78 | nautilus_kill_arch(int mode) | ||
79 | { | ||
80 | struct pci_bus *bus = pci_isa_hose->bus; | ||
81 | u32 pmuport; | ||
82 | int off; | ||
83 | |||
84 | switch (mode) { | ||
85 | case LINUX_REBOOT_CMD_RESTART: | ||
86 | if (! alpha_using_srm) { | ||
87 | u8 t8; | ||
88 | pci_bus_read_config_byte(bus, 0x38, 0x43, &t8); | ||
89 | pci_bus_write_config_byte(bus, 0x38, 0x43, t8 | 0x80); | ||
90 | outb(1, 0x92); | ||
91 | outb(0, 0x92); | ||
92 | /* NOTREACHED */ | ||
93 | } | ||
94 | break; | ||
95 | |||
96 | case LINUX_REBOOT_CMD_POWER_OFF: | ||
97 | /* Assume M1543C */ | ||
98 | off = 0x2000; /* SLP_TYPE = 0, SLP_EN = 1 */ | ||
99 | pci_bus_read_config_dword(bus, 0x88, 0x10, &pmuport); | ||
100 | if (!pmuport) { | ||
101 | /* M1535D/D+ */ | ||
102 | off = 0x3400; /* SLP_TYPE = 5, SLP_EN = 1 */ | ||
103 | pci_bus_read_config_dword(bus, 0x88, 0xe0, &pmuport); | ||
104 | } | ||
105 | pmuport &= 0xfffe; | ||
106 | outw(0xffff, pmuport); /* Clear pending events. */ | ||
107 | outw(off, pmuport + 4); | ||
108 | /* NOTREACHED */ | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /* Perform analysis of a machine check that arrived from the system (NMI) */ | ||
114 | |||
115 | static void | ||
116 | naut_sys_machine_check(unsigned long vector, unsigned long la_ptr, | ||
117 | struct pt_regs *regs) | ||
118 | { | ||
119 | printk("PC %lx RA %lx\n", regs->pc, regs->r26); | ||
120 | irongate_pci_clr_err(); | ||
121 | } | ||
122 | |||
123 | /* Machine checks can come from two sources - those on the CPU and those | ||
124 | in the system. They are analysed separately but all starts here. */ | ||
125 | |||
126 | void | ||
127 | nautilus_machine_check(unsigned long vector, unsigned long la_ptr, | ||
128 | struct pt_regs *regs) | ||
129 | { | ||
130 | char *mchk_class; | ||
131 | |||
132 | /* Now for some analysis. Machine checks fall into two classes -- | ||
133 | those picked up by the system, and those picked up by the CPU. | ||
134 | Add to that the two levels of severity - correctable or not. */ | ||
135 | |||
136 | if (vector == SCB_Q_SYSMCHK | ||
137 | && ((IRONGATE0->dramms & 0x300) == 0x300)) { | ||
138 | unsigned long nmi_ctl; | ||
139 | |||
140 | /* Clear ALI NMI */ | ||
141 | nmi_ctl = inb(0x61); | ||
142 | nmi_ctl |= 0x0c; | ||
143 | outb(nmi_ctl, 0x61); | ||
144 | nmi_ctl &= ~0x0c; | ||
145 | outb(nmi_ctl, 0x61); | ||
146 | |||
147 | /* Write again clears error bits. */ | ||
148 | IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100; | ||
149 | mb(); | ||
150 | IRONGATE0->stat_cmd; | ||
151 | |||
152 | /* Write again clears error bits. */ | ||
153 | IRONGATE0->dramms = IRONGATE0->dramms; | ||
154 | mb(); | ||
155 | IRONGATE0->dramms; | ||
156 | |||
157 | draina(); | ||
158 | wrmces(0x7); | ||
159 | mb(); | ||
160 | return; | ||
161 | } | ||
162 | |||
163 | if (vector == SCB_Q_SYSERR) | ||
164 | mchk_class = "Correctable"; | ||
165 | else if (vector == SCB_Q_SYSMCHK) | ||
166 | mchk_class = "Fatal"; | ||
167 | else { | ||
168 | ev6_machine_check(vector, la_ptr, regs); | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | printk(KERN_CRIT "NAUTILUS Machine check 0x%lx " | ||
173 | "[%s System Machine Check (NMI)]\n", | ||
174 | vector, mchk_class); | ||
175 | |||
176 | naut_sys_machine_check(vector, la_ptr, regs); | ||
177 | |||
178 | /* Tell the PALcode to clear the machine check */ | ||
179 | draina(); | ||
180 | wrmces(0x7); | ||
181 | mb(); | ||
182 | } | ||
183 | |||
184 | extern void free_reserved_mem(void *, void *); | ||
185 | |||
186 | static struct resource irongate_mem = { | ||
187 | .name = "Irongate PCI MEM", | ||
188 | .flags = IORESOURCE_MEM, | ||
189 | }; | ||
190 | |||
191 | void __init | ||
192 | nautilus_init_pci(void) | ||
193 | { | ||
194 | struct pci_controller *hose = hose_head; | ||
195 | struct pci_bus *bus; | ||
196 | struct pci_dev *irongate; | ||
197 | unsigned long bus_align, bus_size, pci_mem; | ||
198 | unsigned long memtop = max_low_pfn << PAGE_SHIFT; | ||
199 | |||
200 | /* Scan our single hose. */ | ||
201 | bus = pci_scan_bus(0, alpha_mv.pci_ops, hose); | ||
202 | hose->bus = bus; | ||
203 | |||
204 | irongate = pci_find_slot(0, 0); | ||
205 | bus->self = irongate; | ||
206 | bus->resource[1] = &irongate_mem; | ||
207 | |||
208 | pci_bus_size_bridges(bus); | ||
209 | |||
210 | /* IO port range. */ | ||
211 | bus->resource[0]->start = 0; | ||
212 | bus->resource[0]->end = 0xffff; | ||
213 | |||
214 | /* Set up PCI memory range - limit is hardwired to 0xffffffff, | ||
215 | base must be at aligned to 16Mb. */ | ||
216 | bus_align = bus->resource[1]->start; | ||
217 | bus_size = bus->resource[1]->end + 1 - bus_align; | ||
218 | if (bus_align < 0x1000000UL) | ||
219 | bus_align = 0x1000000UL; | ||
220 | |||
221 | pci_mem = (0x100000000UL - bus_size) & -bus_align; | ||
222 | |||
223 | bus->resource[1]->start = pci_mem; | ||
224 | bus->resource[1]->end = 0xffffffffUL; | ||
225 | if (request_resource(&iomem_resource, bus->resource[1]) < 0) | ||
226 | printk(KERN_ERR "Failed to request MEM on hose 0\n"); | ||
227 | |||
228 | if (pci_mem < memtop) | ||
229 | memtop = pci_mem; | ||
230 | if (memtop > alpha_mv.min_mem_address) { | ||
231 | free_reserved_mem(__va(alpha_mv.min_mem_address), | ||
232 | __va(memtop)); | ||
233 | printk("nautilus_init_pci: %ldk freed\n", | ||
234 | (memtop - alpha_mv.min_mem_address) >> 10); | ||
235 | } | ||
236 | |||
237 | if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */ | ||
238 | IRONGATE0->pci_mem = pci_mem; | ||
239 | |||
240 | pci_bus_assign_resources(bus); | ||
241 | pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * The System Vectors | ||
246 | */ | ||
247 | |||
248 | struct alpha_machine_vector nautilus_mv __initmv = { | ||
249 | .vector_name = "Nautilus", | ||
250 | DO_EV6_MMU, | ||
251 | DO_DEFAULT_RTC, | ||
252 | DO_IRONGATE_IO, | ||
253 | .machine_check = nautilus_machine_check, | ||
254 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
255 | .min_io_address = DEFAULT_IO_BASE, | ||
256 | .min_mem_address = IRONGATE_DEFAULT_MEM_BASE, | ||
257 | |||
258 | .nr_irqs = 16, | ||
259 | .device_interrupt = isa_device_interrupt, | ||
260 | |||
261 | .init_arch = irongate_init_arch, | ||
262 | .init_irq = nautilus_init_irq, | ||
263 | .init_rtc = common_init_rtc, | ||
264 | .init_pci = nautilus_init_pci, | ||
265 | .kill_arch = nautilus_kill_arch, | ||
266 | .pci_map_irq = nautilus_map_irq, | ||
267 | .pci_swizzle = common_swizzle, | ||
268 | }; | ||
269 | ALIAS_MV(nautilus) | ||
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c new file mode 100644 index 000000000000..65061f5d7410 --- /dev/null +++ b/arch/alpha/kernel/sys_noritake.c | |||
@@ -0,0 +1,347 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_noritake.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the NORITAKE (AlphaServer 1000A), | ||
9 | * CORELLE (AlphaServer 800), and ALCOR Primo (AlphaStation 600A). | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/bitops.h> | ||
20 | |||
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <asm/dma.h> | ||
24 | #include <asm/irq.h> | ||
25 | #include <asm/mmu_context.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/core_apecs.h> | ||
29 | #include <asm/core_cia.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | |||
32 | #include "proto.h" | ||
33 | #include "irq_impl.h" | ||
34 | #include "pci_impl.h" | ||
35 | #include "machvec_impl.h" | ||
36 | |||
37 | /* Note mask bit is true for ENABLED irqs. */ | ||
38 | static int cached_irq_mask; | ||
39 | |||
40 | static inline void | ||
41 | noritake_update_irq_hw(int irq, int mask) | ||
42 | { | ||
43 | int port = 0x54a; | ||
44 | if (irq >= 32) { | ||
45 | mask >>= 16; | ||
46 | port = 0x54c; | ||
47 | } | ||
48 | outw(mask, port); | ||
49 | } | ||
50 | |||
51 | static void | ||
52 | noritake_enable_irq(unsigned int irq) | ||
53 | { | ||
54 | noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); | ||
55 | } | ||
56 | |||
57 | static void | ||
58 | noritake_disable_irq(unsigned int irq) | ||
59 | { | ||
60 | noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); | ||
61 | } | ||
62 | |||
63 | static unsigned int | ||
64 | noritake_startup_irq(unsigned int irq) | ||
65 | { | ||
66 | noritake_enable_irq(irq); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static struct hw_interrupt_type noritake_irq_type = { | ||
71 | .typename = "NORITAKE", | ||
72 | .startup = noritake_startup_irq, | ||
73 | .shutdown = noritake_disable_irq, | ||
74 | .enable = noritake_enable_irq, | ||
75 | .disable = noritake_disable_irq, | ||
76 | .ack = noritake_disable_irq, | ||
77 | .end = noritake_enable_irq, | ||
78 | }; | ||
79 | |||
80 | static void | ||
81 | noritake_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
82 | { | ||
83 | unsigned long pld; | ||
84 | unsigned int i; | ||
85 | |||
86 | /* Read the interrupt summary registers of NORITAKE */ | ||
87 | pld = (((unsigned long) inw(0x54c) << 32) | ||
88 | | ((unsigned long) inw(0x54a) << 16) | ||
89 | | ((unsigned long) inb(0xa0) << 8) | ||
90 | | inb(0x20)); | ||
91 | |||
92 | /* | ||
93 | * Now for every possible bit set, work through them and call | ||
94 | * the appropriate interrupt handler. | ||
95 | */ | ||
96 | while (pld) { | ||
97 | i = ffz(~pld); | ||
98 | pld &= pld - 1; /* clear least bit set */ | ||
99 | if (i < 16) { | ||
100 | isa_device_interrupt(vector, regs); | ||
101 | } else { | ||
102 | handle_irq(i, regs); | ||
103 | } | ||
104 | } | ||
105 | } | ||
106 | |||
107 | static void | ||
108 | noritake_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
109 | { | ||
110 | int irq; | ||
111 | |||
112 | irq = (vector - 0x800) >> 4; | ||
113 | |||
114 | /* | ||
115 | * I really hate to do this, too, but the NORITAKE SRM console also | ||
116 | * reports PCI vectors *lower* than I expected from the bit numbers | ||
117 | * in the documentation. | ||
118 | * But I really don't want to change the fixup code for allocation | ||
119 | * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which | ||
120 | * look nice and clean now. | ||
121 | * So, here's this additional grotty hack... :-( | ||
122 | */ | ||
123 | if (irq >= 16) | ||
124 | irq = irq + 1; | ||
125 | |||
126 | handle_irq(irq, regs); | ||
127 | } | ||
128 | |||
129 | static void __init | ||
130 | noritake_init_irq(void) | ||
131 | { | ||
132 | long i; | ||
133 | |||
134 | if (alpha_using_srm) | ||
135 | alpha_mv.device_interrupt = noritake_srm_device_interrupt; | ||
136 | |||
137 | outw(0, 0x54a); | ||
138 | outw(0, 0x54c); | ||
139 | |||
140 | for (i = 16; i < 48; ++i) { | ||
141 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
142 | irq_desc[i].handler = &noritake_irq_type; | ||
143 | } | ||
144 | |||
145 | init_i8259a_irqs(); | ||
146 | common_init_isa_dma(); | ||
147 | } | ||
148 | |||
149 | |||
150 | /* | ||
151 | * PCI Fixup configuration. | ||
152 | * | ||
153 | * Summary @ 0x542, summary register #1: | ||
154 | * Bit Meaning | ||
155 | * 0 All valid ints from summary regs 2 & 3 | ||
156 | * 1 QLOGIC ISP1020A SCSI | ||
157 | * 2 Interrupt Line A from slot 0 | ||
158 | * 3 Interrupt Line B from slot 0 | ||
159 | * 4 Interrupt Line A from slot 1 | ||
160 | * 5 Interrupt line B from slot 1 | ||
161 | * 6 Interrupt Line A from slot 2 | ||
162 | * 7 Interrupt Line B from slot 2 | ||
163 | * 8 Interrupt Line A from slot 3 | ||
164 | * 9 Interrupt Line B from slot 3 | ||
165 | *10 Interrupt Line A from slot 4 | ||
166 | *11 Interrupt Line B from slot 4 | ||
167 | *12 Interrupt Line A from slot 5 | ||
168 | *13 Interrupt Line B from slot 5 | ||
169 | *14 Interrupt Line A from slot 6 | ||
170 | *15 Interrupt Line B from slot 6 | ||
171 | * | ||
172 | * Summary @ 0x544, summary register #2: | ||
173 | * Bit Meaning | ||
174 | * 0 OR of all unmasked ints in SR #2 | ||
175 | * 1 OR of secondary bus ints | ||
176 | * 2 Interrupt Line C from slot 0 | ||
177 | * 3 Interrupt Line D from slot 0 | ||
178 | * 4 Interrupt Line C from slot 1 | ||
179 | * 5 Interrupt line D from slot 1 | ||
180 | * 6 Interrupt Line C from slot 2 | ||
181 | * 7 Interrupt Line D from slot 2 | ||
182 | * 8 Interrupt Line C from slot 3 | ||
183 | * 9 Interrupt Line D from slot 3 | ||
184 | *10 Interrupt Line C from slot 4 | ||
185 | *11 Interrupt Line D from slot 4 | ||
186 | *12 Interrupt Line C from slot 5 | ||
187 | *13 Interrupt Line D from slot 5 | ||
188 | *14 Interrupt Line C from slot 6 | ||
189 | *15 Interrupt Line D from slot 6 | ||
190 | * | ||
191 | * The device to slot mapping looks like: | ||
192 | * | ||
193 | * Slot Device | ||
194 | * 7 Intel PCI-EISA bridge chip | ||
195 | * 8 DEC PCI-PCI bridge chip | ||
196 | * 11 PCI on board slot 0 | ||
197 | * 12 PCI on board slot 1 | ||
198 | * 13 PCI on board slot 2 | ||
199 | * | ||
200 | * | ||
201 | * This two layered interrupt approach means that we allocate IRQ 16 and | ||
202 | * above for PCI interrupts. The IRQ relates to which bit the interrupt | ||
203 | * comes in on. This makes interrupt processing much easier. | ||
204 | */ | ||
205 | |||
206 | static int __init | ||
207 | noritake_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
208 | { | ||
209 | static char irq_tab[15][5] __initdata = { | ||
210 | /*INT INTA INTB INTC INTD */ | ||
211 | /* note: IDSELs 16, 17, and 25 are CORELLE only */ | ||
212 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ | ||
213 | { -1, -1, -1, -1, -1}, /* IdSel 17, S3 Trio64 */ | ||
214 | { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ | ||
215 | { -1, -1, -1, -1, -1}, /* IdSel 19, PPB */ | ||
216 | { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */ | ||
217 | { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */ | ||
218 | { 16+2, 16+2, 16+3, 32+2, 32+3}, /* IdSel 22, slot 0 */ | ||
219 | { 16+4, 16+4, 16+5, 32+4, 32+5}, /* IdSel 23, slot 1 */ | ||
220 | { 16+6, 16+6, 16+7, 32+6, 32+7}, /* IdSel 24, slot 2 */ | ||
221 | { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 25, slot 3 */ | ||
222 | /* The following 5 are actually on PCI bus 1, which is | ||
223 | across the built-in bridge of the NORITAKE only. */ | ||
224 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ | ||
225 | { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 17, slot 3 */ | ||
226 | {16+10, 16+10, 16+11, 32+10, 32+11}, /* IdSel 18, slot 4 */ | ||
227 | {16+12, 16+12, 16+13, 32+12, 32+13}, /* IdSel 19, slot 5 */ | ||
228 | {16+14, 16+14, 16+15, 32+14, 32+15}, /* IdSel 20, slot 6 */ | ||
229 | }; | ||
230 | const long min_idsel = 5, max_idsel = 19, irqs_per_slot = 5; | ||
231 | return COMMON_TABLE_LOOKUP; | ||
232 | } | ||
233 | |||
234 | static u8 __init | ||
235 | noritake_swizzle(struct pci_dev *dev, u8 *pinp) | ||
236 | { | ||
237 | int slot, pin = *pinp; | ||
238 | |||
239 | if (dev->bus->number == 0) { | ||
240 | slot = PCI_SLOT(dev->devfn); | ||
241 | } | ||
242 | /* Check for the built-in bridge */ | ||
243 | else if (PCI_SLOT(dev->bus->self->devfn) == 8) { | ||
244 | slot = PCI_SLOT(dev->devfn) + 15; /* WAG! */ | ||
245 | } | ||
246 | else | ||
247 | { | ||
248 | /* Must be a card-based bridge. */ | ||
249 | do { | ||
250 | if (PCI_SLOT(dev->bus->self->devfn) == 8) { | ||
251 | slot = PCI_SLOT(dev->devfn) + 15; | ||
252 | break; | ||
253 | } | ||
254 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; | ||
255 | |||
256 | /* Move up the chain of bridges. */ | ||
257 | dev = dev->bus->self; | ||
258 | /* Slot of the next bridge. */ | ||
259 | slot = PCI_SLOT(dev->devfn); | ||
260 | } while (dev->bus->self); | ||
261 | } | ||
262 | *pinp = pin; | ||
263 | return slot; | ||
264 | } | ||
265 | |||
266 | #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) | ||
267 | static void | ||
268 | noritake_apecs_machine_check(unsigned long vector, unsigned long la_ptr, | ||
269 | struct pt_regs * regs) | ||
270 | { | ||
271 | #define MCHK_NO_DEVSEL 0x205U | ||
272 | #define MCHK_NO_TABT 0x204U | ||
273 | |||
274 | struct el_common *mchk_header; | ||
275 | unsigned int code; | ||
276 | |||
277 | mchk_header = (struct el_common *)la_ptr; | ||
278 | |||
279 | /* Clear the error before any reporting. */ | ||
280 | mb(); | ||
281 | mb(); /* magic */ | ||
282 | draina(); | ||
283 | apecs_pci_clr_err(); | ||
284 | wrmces(0x7); | ||
285 | mb(); | ||
286 | |||
287 | code = mchk_header->code; | ||
288 | process_mcheck_info(vector, la_ptr, regs, "NORITAKE APECS", | ||
289 | (mcheck_expected(0) | ||
290 | && (code == MCHK_NO_DEVSEL | ||
291 | || code == MCHK_NO_TABT))); | ||
292 | } | ||
293 | #endif | ||
294 | |||
295 | |||
296 | /* | ||
297 | * The System Vectors | ||
298 | */ | ||
299 | |||
300 | #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) | ||
301 | struct alpha_machine_vector noritake_mv __initmv = { | ||
302 | .vector_name = "Noritake", | ||
303 | DO_EV4_MMU, | ||
304 | DO_DEFAULT_RTC, | ||
305 | DO_APECS_IO, | ||
306 | .machine_check = noritake_apecs_machine_check, | ||
307 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
308 | .min_io_address = EISA_DEFAULT_IO_BASE, | ||
309 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
310 | |||
311 | .nr_irqs = 48, | ||
312 | .device_interrupt = noritake_device_interrupt, | ||
313 | |||
314 | .init_arch = apecs_init_arch, | ||
315 | .init_irq = noritake_init_irq, | ||
316 | .init_rtc = common_init_rtc, | ||
317 | .init_pci = common_init_pci, | ||
318 | .pci_map_irq = noritake_map_irq, | ||
319 | .pci_swizzle = noritake_swizzle, | ||
320 | }; | ||
321 | ALIAS_MV(noritake) | ||
322 | #endif | ||
323 | |||
324 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO) | ||
325 | struct alpha_machine_vector noritake_primo_mv __initmv = { | ||
326 | .vector_name = "Noritake-Primo", | ||
327 | DO_EV5_MMU, | ||
328 | DO_DEFAULT_RTC, | ||
329 | DO_CIA_IO, | ||
330 | .machine_check = cia_machine_check, | ||
331 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
332 | .min_io_address = EISA_DEFAULT_IO_BASE, | ||
333 | .min_mem_address = CIA_DEFAULT_MEM_BASE, | ||
334 | |||
335 | .nr_irqs = 48, | ||
336 | .device_interrupt = noritake_device_interrupt, | ||
337 | |||
338 | .init_arch = cia_init_arch, | ||
339 | .init_irq = noritake_init_irq, | ||
340 | .init_rtc = common_init_rtc, | ||
341 | .init_pci = cia_init_pci, | ||
342 | .kill_arch = cia_kill_arch, | ||
343 | .pci_map_irq = noritake_map_irq, | ||
344 | .pci_swizzle = noritake_swizzle, | ||
345 | }; | ||
346 | ALIAS_MV(noritake_primo) | ||
347 | #endif | ||
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c new file mode 100644 index 000000000000..05888a02a604 --- /dev/null +++ b/arch/alpha/kernel/sys_rawhide.c | |||
@@ -0,0 +1,270 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_rawhide.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the RAWHIDE. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/init.h> | ||
17 | |||
18 | #include <asm/ptrace.h> | ||
19 | #include <asm/system.h> | ||
20 | #include <asm/dma.h> | ||
21 | #include <asm/irq.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/core_mcpcia.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | |||
28 | #include "proto.h" | ||
29 | #include "irq_impl.h" | ||
30 | #include "pci_impl.h" | ||
31 | #include "machvec_impl.h" | ||
32 | |||
33 | |||
34 | /* | ||
35 | * HACK ALERT! only the boot cpu is used for interrupts. | ||
36 | */ | ||
37 | |||
38 | |||
39 | /* Note mask bit is true for ENABLED irqs. */ | ||
40 | |||
41 | static unsigned int hose_irq_masks[4] = { | ||
42 | 0xff0000, 0xfe0000, 0xff0000, 0xff0000 | ||
43 | }; | ||
44 | static unsigned int cached_irq_masks[4]; | ||
45 | DEFINE_SPINLOCK(rawhide_irq_lock); | ||
46 | |||
47 | static inline void | ||
48 | rawhide_update_irq_hw(int hose, int mask) | ||
49 | { | ||
50 | *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)) = mask; | ||
51 | mb(); | ||
52 | *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)); | ||
53 | } | ||
54 | |||
55 | static inline void | ||
56 | rawhide_enable_irq(unsigned int irq) | ||
57 | { | ||
58 | unsigned int mask, hose; | ||
59 | |||
60 | irq -= 16; | ||
61 | hose = irq / 24; | ||
62 | irq -= hose * 24; | ||
63 | mask = 1 << irq; | ||
64 | |||
65 | spin_lock(&rawhide_irq_lock); | ||
66 | mask |= cached_irq_masks[hose]; | ||
67 | cached_irq_masks[hose] = mask; | ||
68 | rawhide_update_irq_hw(hose, mask); | ||
69 | spin_unlock(&rawhide_irq_lock); | ||
70 | } | ||
71 | |||
72 | static void | ||
73 | rawhide_disable_irq(unsigned int irq) | ||
74 | { | ||
75 | unsigned int mask, hose; | ||
76 | |||
77 | irq -= 16; | ||
78 | hose = irq / 24; | ||
79 | irq -= hose * 24; | ||
80 | mask = ~(1 << irq) | hose_irq_masks[hose]; | ||
81 | |||
82 | spin_lock(&rawhide_irq_lock); | ||
83 | mask &= cached_irq_masks[hose]; | ||
84 | cached_irq_masks[hose] = mask; | ||
85 | rawhide_update_irq_hw(hose, mask); | ||
86 | spin_unlock(&rawhide_irq_lock); | ||
87 | } | ||
88 | |||
89 | static void | ||
90 | rawhide_mask_and_ack_irq(unsigned int irq) | ||
91 | { | ||
92 | unsigned int mask, mask1, hose; | ||
93 | |||
94 | irq -= 16; | ||
95 | hose = irq / 24; | ||
96 | irq -= hose * 24; | ||
97 | mask1 = 1 << irq; | ||
98 | mask = ~mask1 | hose_irq_masks[hose]; | ||
99 | |||
100 | spin_lock(&rawhide_irq_lock); | ||
101 | |||
102 | mask &= cached_irq_masks[hose]; | ||
103 | cached_irq_masks[hose] = mask; | ||
104 | rawhide_update_irq_hw(hose, mask); | ||
105 | |||
106 | /* Clear the interrupt. */ | ||
107 | *(vuip)MCPCIA_INT_REQ(MCPCIA_HOSE2MID(hose)) = mask1; | ||
108 | |||
109 | spin_unlock(&rawhide_irq_lock); | ||
110 | } | ||
111 | |||
112 | static unsigned int | ||
113 | rawhide_startup_irq(unsigned int irq) | ||
114 | { | ||
115 | rawhide_enable_irq(irq); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static void | ||
120 | rawhide_end_irq(unsigned int irq) | ||
121 | { | ||
122 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
123 | rawhide_enable_irq(irq); | ||
124 | } | ||
125 | |||
126 | static struct hw_interrupt_type rawhide_irq_type = { | ||
127 | .typename = "RAWHIDE", | ||
128 | .startup = rawhide_startup_irq, | ||
129 | .shutdown = rawhide_disable_irq, | ||
130 | .enable = rawhide_enable_irq, | ||
131 | .disable = rawhide_disable_irq, | ||
132 | .ack = rawhide_mask_and_ack_irq, | ||
133 | .end = rawhide_end_irq, | ||
134 | }; | ||
135 | |||
136 | static void | ||
137 | rawhide_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
138 | { | ||
139 | int irq; | ||
140 | |||
141 | irq = (vector - 0x800) >> 4; | ||
142 | |||
143 | /* | ||
144 | * The RAWHIDE SRM console reports PCI interrupts with a vector | ||
145 | * 0x80 *higher* than one might expect, as PCI IRQ 0 (ie bit 0) | ||
146 | * shows up as IRQ 24, etc, etc. We adjust it down by 8 to have | ||
147 | * it line up with the actual bit numbers from the REQ registers, | ||
148 | * which is how we manage the interrupts/mask. Sigh... | ||
149 | * | ||
150 | * Also, PCI #1 interrupts are offset some more... :-( | ||
151 | */ | ||
152 | |||
153 | if (irq == 52) { | ||
154 | /* SCSI on PCI1 is special. */ | ||
155 | irq = 72; | ||
156 | } | ||
157 | |||
158 | /* Adjust by which hose it is from. */ | ||
159 | irq -= ((irq + 16) >> 2) & 0x38; | ||
160 | |||
161 | handle_irq(irq, regs); | ||
162 | } | ||
163 | |||
164 | static void __init | ||
165 | rawhide_init_irq(void) | ||
166 | { | ||
167 | struct pci_controller *hose; | ||
168 | long i; | ||
169 | |||
170 | mcpcia_init_hoses(); | ||
171 | |||
172 | for (hose = hose_head; hose; hose = hose->next) { | ||
173 | unsigned int h = hose->index; | ||
174 | unsigned int mask = hose_irq_masks[h]; | ||
175 | |||
176 | cached_irq_masks[h] = mask; | ||
177 | *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(h)) = mask; | ||
178 | *(vuip)MCPCIA_INT_MASK1(MCPCIA_HOSE2MID(h)) = 0; | ||
179 | } | ||
180 | |||
181 | for (i = 16; i < 128; ++i) { | ||
182 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
183 | irq_desc[i].handler = &rawhide_irq_type; | ||
184 | } | ||
185 | |||
186 | init_i8259a_irqs(); | ||
187 | common_init_isa_dma(); | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * PCI Fixup configuration. | ||
192 | * | ||
193 | * Summary @ MCPCIA_PCI0_INT_REQ: | ||
194 | * Bit Meaning | ||
195 | * 0 Interrupt Line A from slot 2 PCI0 | ||
196 | * 1 Interrupt Line B from slot 2 PCI0 | ||
197 | * 2 Interrupt Line C from slot 2 PCI0 | ||
198 | * 3 Interrupt Line D from slot 2 PCI0 | ||
199 | * 4 Interrupt Line A from slot 3 PCI0 | ||
200 | * 5 Interrupt Line B from slot 3 PCI0 | ||
201 | * 6 Interrupt Line C from slot 3 PCI0 | ||
202 | * 7 Interrupt Line D from slot 3 PCI0 | ||
203 | * 8 Interrupt Line A from slot 4 PCI0 | ||
204 | * 9 Interrupt Line B from slot 4 PCI0 | ||
205 | * 10 Interrupt Line C from slot 4 PCI0 | ||
206 | * 11 Interrupt Line D from slot 4 PCI0 | ||
207 | * 12 Interrupt Line A from slot 5 PCI0 | ||
208 | * 13 Interrupt Line B from slot 5 PCI0 | ||
209 | * 14 Interrupt Line C from slot 5 PCI0 | ||
210 | * 15 Interrupt Line D from slot 5 PCI0 | ||
211 | * 16 EISA interrupt (PCI 0) or SCSI interrupt (PCI 1) | ||
212 | * 17-23 NA | ||
213 | * | ||
214 | * IdSel | ||
215 | * 1 EISA bridge (PCI bus 0 only) | ||
216 | * 2 PCI option slot 2 | ||
217 | * 3 PCI option slot 3 | ||
218 | * 4 PCI option slot 4 | ||
219 | * 5 PCI option slot 5 | ||
220 | * | ||
221 | */ | ||
222 | |||
223 | static int __init | ||
224 | rawhide_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
225 | { | ||
226 | static char irq_tab[5][5] __initdata = { | ||
227 | /*INT INTA INTB INTC INTD */ | ||
228 | { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */ | ||
229 | { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */ | ||
230 | { 16+ 4, 16+ 4, 16+ 5, 16+ 6, 16+ 7}, /* IdSel 3 slot 3 */ | ||
231 | { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 4 slot 4 */ | ||
232 | { 16+12, 16+12, 16+13, 16+14, 16+15} /* IdSel 5 slot 5 */ | ||
233 | }; | ||
234 | const long min_idsel = 1, max_idsel = 5, irqs_per_slot = 5; | ||
235 | |||
236 | struct pci_controller *hose = dev->sysdata; | ||
237 | int irq = COMMON_TABLE_LOOKUP; | ||
238 | if (irq >= 0) | ||
239 | irq += 24 * hose->index; | ||
240 | return irq; | ||
241 | } | ||
242 | |||
243 | |||
244 | /* | ||
245 | * The System Vector | ||
246 | */ | ||
247 | |||
248 | struct alpha_machine_vector rawhide_mv __initmv = { | ||
249 | .vector_name = "Rawhide", | ||
250 | DO_EV5_MMU, | ||
251 | DO_DEFAULT_RTC, | ||
252 | DO_MCPCIA_IO, | ||
253 | .machine_check = mcpcia_machine_check, | ||
254 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
255 | .min_io_address = DEFAULT_IO_BASE, | ||
256 | .min_mem_address = MCPCIA_DEFAULT_MEM_BASE, | ||
257 | .pci_dac_offset = MCPCIA_DAC_OFFSET, | ||
258 | |||
259 | .nr_irqs = 128, | ||
260 | .device_interrupt = rawhide_srm_device_interrupt, | ||
261 | |||
262 | .init_arch = mcpcia_init_arch, | ||
263 | .init_irq = rawhide_init_irq, | ||
264 | .init_rtc = common_init_rtc, | ||
265 | .init_pci = common_init_pci, | ||
266 | .kill_arch = NULL, | ||
267 | .pci_map_irq = rawhide_map_irq, | ||
268 | .pci_swizzle = common_swizzle, | ||
269 | }; | ||
270 | ALIAS_MV(rawhide) | ||
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c new file mode 100644 index 000000000000..78c30decf3ff --- /dev/null +++ b/arch/alpha/kernel/sys_ruffian.c | |||
@@ -0,0 +1,240 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_ruffian.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999, 2000 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the RUFFIAN. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/ioport.h> | ||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <asm/ptrace.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/dma.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/mmu_context.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/core_cia.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm/8253pit.h> | ||
29 | |||
30 | #include "proto.h" | ||
31 | #include "irq_impl.h" | ||
32 | #include "pci_impl.h" | ||
33 | #include "machvec_impl.h" | ||
34 | |||
35 | |||
36 | static void __init | ||
37 | ruffian_init_irq(void) | ||
38 | { | ||
39 | /* Invert 6&7 for i82371 */ | ||
40 | *(vulp)PYXIS_INT_HILO = 0x000000c0UL; mb(); | ||
41 | *(vulp)PYXIS_INT_CNFG = 0x00002064UL; mb(); /* all clear */ | ||
42 | |||
43 | outb(0x11,0xA0); | ||
44 | outb(0x08,0xA1); | ||
45 | outb(0x02,0xA1); | ||
46 | outb(0x01,0xA1); | ||
47 | outb(0xFF,0xA1); | ||
48 | |||
49 | outb(0x11,0x20); | ||
50 | outb(0x00,0x21); | ||
51 | outb(0x04,0x21); | ||
52 | outb(0x01,0x21); | ||
53 | outb(0xFF,0x21); | ||
54 | |||
55 | /* Finish writing the 82C59A PIC Operation Control Words */ | ||
56 | outb(0x20,0xA0); | ||
57 | outb(0x20,0x20); | ||
58 | |||
59 | init_i8259a_irqs(); | ||
60 | |||
61 | /* Not interested in the bogus interrupts (0,3,6), | ||
62 | NMI (1), HALT (2), flash (5), or 21142 (8). */ | ||
63 | init_pyxis_irqs(0x16f0000); | ||
64 | |||
65 | common_init_isa_dma(); | ||
66 | } | ||
67 | |||
68 | #define RUFFIAN_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) | ||
69 | |||
70 | static void __init | ||
71 | ruffian_init_rtc(void) | ||
72 | { | ||
73 | /* Ruffian does not have the RTC connected to the CPU timer | ||
74 | interrupt. Instead, it uses the PIT connected to IRQ 0. */ | ||
75 | |||
76 | /* Setup interval timer. */ | ||
77 | outb(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */ | ||
78 | outb(RUFFIAN_LATCH & 0xff, 0x40); /* LSB */ | ||
79 | outb(RUFFIAN_LATCH >> 8, 0x40); /* MSB */ | ||
80 | |||
81 | outb(0xb6, 0x43); /* pit counter 2: speaker */ | ||
82 | outb(0x31, 0x42); | ||
83 | outb(0x13, 0x42); | ||
84 | |||
85 | setup_irq(0, &timer_irqaction); | ||
86 | } | ||
87 | |||
88 | static void | ||
89 | ruffian_kill_arch (int mode) | ||
90 | { | ||
91 | cia_kill_arch(mode); | ||
92 | #if 0 | ||
93 | /* This only causes re-entry to ARCSBIOS */ | ||
94 | /* Perhaps this works for other PYXIS as well? */ | ||
95 | *(vuip) PYXIS_RESET = 0x0000dead; | ||
96 | mb(); | ||
97 | #endif | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Interrupt routing: | ||
102 | * | ||
103 | * Primary bus | ||
104 | * IdSel INTA INTB INTC INTD | ||
105 | * 21052 13 - - - - | ||
106 | * SIO 14 23 - - - | ||
107 | * 21143 15 44 - - - | ||
108 | * Slot 0 17 43 42 41 40 | ||
109 | * | ||
110 | * Secondary bus | ||
111 | * IdSel INTA INTB INTC INTD | ||
112 | * Slot 0 8 (18) 19 18 17 16 | ||
113 | * Slot 1 9 (19) 31 30 29 28 | ||
114 | * Slot 2 10 (20) 27 26 25 24 | ||
115 | * Slot 3 11 (21) 39 38 37 36 | ||
116 | * Slot 4 12 (22) 35 34 33 32 | ||
117 | * 53c875 13 (23) 20 - - - | ||
118 | * | ||
119 | */ | ||
120 | |||
121 | static int __init | ||
122 | ruffian_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
123 | { | ||
124 | static char irq_tab[11][5] __initdata = { | ||
125 | /*INT INTA INTB INTC INTD */ | ||
126 | {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */ | ||
127 | {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */ | ||
128 | {44, 44, 44, 44, 44}, /* IdSel 15, 21143 */ | ||
129 | {-1, -1, -1, -1, -1}, /* IdSel 16, none */ | ||
130 | {43, 43, 42, 41, 40}, /* IdSel 17, 64-bit slot */ | ||
131 | /* the next 6 are actually on PCI bus 1, across the bridge */ | ||
132 | {19, 19, 18, 17, 16}, /* IdSel 8, slot 0 */ | ||
133 | {31, 31, 30, 29, 28}, /* IdSel 9, slot 1 */ | ||
134 | {27, 27, 26, 25, 24}, /* IdSel 10, slot 2 */ | ||
135 | {39, 39, 38, 37, 36}, /* IdSel 11, slot 3 */ | ||
136 | {35, 35, 34, 33, 32}, /* IdSel 12, slot 4 */ | ||
137 | {20, 20, 20, 20, 20}, /* IdSel 13, 53c875 */ | ||
138 | }; | ||
139 | const long min_idsel = 13, max_idsel = 23, irqs_per_slot = 5; | ||
140 | return COMMON_TABLE_LOOKUP; | ||
141 | } | ||
142 | |||
143 | static u8 __init | ||
144 | ruffian_swizzle(struct pci_dev *dev, u8 *pinp) | ||
145 | { | ||
146 | int slot, pin = *pinp; | ||
147 | |||
148 | if (dev->bus->number == 0) { | ||
149 | slot = PCI_SLOT(dev->devfn); | ||
150 | } | ||
151 | /* Check for the built-in bridge. */ | ||
152 | else if (PCI_SLOT(dev->bus->self->devfn) == 13) { | ||
153 | slot = PCI_SLOT(dev->devfn) + 10; | ||
154 | } | ||
155 | else | ||
156 | { | ||
157 | /* Must be a card-based bridge. */ | ||
158 | do { | ||
159 | if (PCI_SLOT(dev->bus->self->devfn) == 13) { | ||
160 | slot = PCI_SLOT(dev->devfn) + 10; | ||
161 | break; | ||
162 | } | ||
163 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); | ||
164 | |||
165 | /* Move up the chain of bridges. */ | ||
166 | dev = dev->bus->self; | ||
167 | /* Slot of the next bridge. */ | ||
168 | slot = PCI_SLOT(dev->devfn); | ||
169 | } while (dev->bus->self); | ||
170 | } | ||
171 | *pinp = pin; | ||
172 | return slot; | ||
173 | } | ||
174 | |||
175 | #ifdef BUILDING_FOR_MILO | ||
176 | /* | ||
177 | * The DeskStation Ruffian motherboard firmware does not place | ||
178 | * the memory size in the PALimpure area. Therefore, we use | ||
179 | * the Bank Configuration Registers in PYXIS to obtain the size. | ||
180 | */ | ||
181 | static unsigned long __init | ||
182 | ruffian_get_bank_size(unsigned long offset) | ||
183 | { | ||
184 | unsigned long bank_addr, bank, ret = 0; | ||
185 | |||
186 | /* Valid offsets are: 0x800, 0x840 and 0x880 | ||
187 | since Ruffian only uses three banks. */ | ||
188 | bank_addr = (unsigned long)PYXIS_MCR + offset; | ||
189 | bank = *(vulp)bank_addr; | ||
190 | |||
191 | /* Check BANK_ENABLE */ | ||
192 | if (bank & 0x01) { | ||
193 | static unsigned long size[] __initdata = { | ||
194 | 0x40000000UL, /* 0x00, 1G */ | ||
195 | 0x20000000UL, /* 0x02, 512M */ | ||
196 | 0x10000000UL, /* 0x04, 256M */ | ||
197 | 0x08000000UL, /* 0x06, 128M */ | ||
198 | 0x04000000UL, /* 0x08, 64M */ | ||
199 | 0x02000000UL, /* 0x0a, 32M */ | ||
200 | 0x01000000UL, /* 0x0c, 16M */ | ||
201 | 0x00800000UL, /* 0x0e, 8M */ | ||
202 | 0x80000000UL, /* 0x10, 2G */ | ||
203 | }; | ||
204 | |||
205 | bank = (bank & 0x1e) >> 1; | ||
206 | if (bank < sizeof(size)/sizeof(*size)) | ||
207 | ret = size[bank]; | ||
208 | } | ||
209 | |||
210 | return ret; | ||
211 | } | ||
212 | #endif /* BUILDING_FOR_MILO */ | ||
213 | |||
214 | /* | ||
215 | * The System Vector | ||
216 | */ | ||
217 | |||
218 | struct alpha_machine_vector ruffian_mv __initmv = { | ||
219 | .vector_name = "Ruffian", | ||
220 | DO_EV5_MMU, | ||
221 | DO_DEFAULT_RTC, | ||
222 | DO_PYXIS_IO, | ||
223 | .machine_check = cia_machine_check, | ||
224 | .max_isa_dma_address = ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS, | ||
225 | .min_io_address = DEFAULT_IO_BASE, | ||
226 | .min_mem_address = DEFAULT_MEM_BASE, | ||
227 | .pci_dac_offset = PYXIS_DAC_OFFSET, | ||
228 | |||
229 | .nr_irqs = 48, | ||
230 | .device_interrupt = pyxis_device_interrupt, | ||
231 | |||
232 | .init_arch = pyxis_init_arch, | ||
233 | .init_irq = ruffian_init_irq, | ||
234 | .init_rtc = ruffian_init_rtc, | ||
235 | .init_pci = cia_init_pci, | ||
236 | .kill_arch = ruffian_kill_arch, | ||
237 | .pci_map_irq = ruffian_map_irq, | ||
238 | .pci_swizzle = ruffian_swizzle, | ||
239 | }; | ||
240 | ALIAS_MV(ruffian) | ||
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c new file mode 100644 index 000000000000..58404243057b --- /dev/null +++ b/arch/alpha/kernel/sys_rx164.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_rx164.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the RX164 (PCA56+POLARIS). | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/bitops.h> | ||
18 | |||
19 | #include <asm/ptrace.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/dma.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/mmu_context.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/core_polaris.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | #include "proto.h" | ||
30 | #include "irq_impl.h" | ||
31 | #include "pci_impl.h" | ||
32 | #include "machvec_impl.h" | ||
33 | |||
34 | |||
35 | /* Note mask bit is true for ENABLED irqs. */ | ||
36 | static unsigned long cached_irq_mask; | ||
37 | |||
38 | static inline void | ||
39 | rx164_update_irq_hw(unsigned long mask) | ||
40 | { | ||
41 | volatile unsigned int *irq_mask; | ||
42 | |||
43 | irq_mask = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x74); | ||
44 | *irq_mask = mask; | ||
45 | mb(); | ||
46 | *irq_mask; | ||
47 | } | ||
48 | |||
49 | static inline void | ||
50 | rx164_enable_irq(unsigned int irq) | ||
51 | { | ||
52 | rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | ||
53 | } | ||
54 | |||
55 | static void | ||
56 | rx164_disable_irq(unsigned int irq) | ||
57 | { | ||
58 | rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | ||
59 | } | ||
60 | |||
61 | static unsigned int | ||
62 | rx164_startup_irq(unsigned int irq) | ||
63 | { | ||
64 | rx164_enable_irq(irq); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static void | ||
69 | rx164_end_irq(unsigned int irq) | ||
70 | { | ||
71 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
72 | rx164_enable_irq(irq); | ||
73 | } | ||
74 | |||
75 | static struct hw_interrupt_type rx164_irq_type = { | ||
76 | .typename = "RX164", | ||
77 | .startup = rx164_startup_irq, | ||
78 | .shutdown = rx164_disable_irq, | ||
79 | .enable = rx164_enable_irq, | ||
80 | .disable = rx164_disable_irq, | ||
81 | .ack = rx164_disable_irq, | ||
82 | .end = rx164_end_irq, | ||
83 | }; | ||
84 | |||
85 | static void | ||
86 | rx164_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
87 | { | ||
88 | unsigned long pld; | ||
89 | volatile unsigned int *dirr; | ||
90 | long i; | ||
91 | |||
92 | /* Read the interrupt summary register. On Polaris, this is | ||
93 | the DIRR register in PCI config space (offset 0x84). */ | ||
94 | dirr = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x84); | ||
95 | pld = *dirr; | ||
96 | |||
97 | /* | ||
98 | * Now for every possible bit set, work through them and call | ||
99 | * the appropriate interrupt handler. | ||
100 | */ | ||
101 | while (pld) { | ||
102 | i = ffz(~pld); | ||
103 | pld &= pld - 1; /* clear least bit set */ | ||
104 | if (i == 20) { | ||
105 | isa_no_iack_sc_device_interrupt(vector, regs); | ||
106 | } else { | ||
107 | handle_irq(16+i, regs); | ||
108 | } | ||
109 | } | ||
110 | } | ||
111 | |||
112 | static void __init | ||
113 | rx164_init_irq(void) | ||
114 | { | ||
115 | long i; | ||
116 | |||
117 | rx164_update_irq_hw(0); | ||
118 | for (i = 16; i < 40; ++i) { | ||
119 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
120 | irq_desc[i].handler = &rx164_irq_type; | ||
121 | } | ||
122 | |||
123 | init_i8259a_irqs(); | ||
124 | common_init_isa_dma(); | ||
125 | |||
126 | setup_irq(16+20, &isa_cascade_irqaction); | ||
127 | } | ||
128 | |||
129 | |||
130 | /* | ||
131 | * The RX164 changed its interrupt routing between pass1 and pass2... | ||
132 | * | ||
133 | * PASS1: | ||
134 | * | ||
135 | * Slot IDSEL INTA INTB INTC INTD | ||
136 | * 0 6 5 10 15 20 | ||
137 | * 1 7 4 9 14 19 | ||
138 | * 2 5 3 8 13 18 | ||
139 | * 3 9 2 7 12 17 | ||
140 | * 4 10 1 6 11 16 | ||
141 | * | ||
142 | * PASS2: | ||
143 | * Slot IDSEL INTA INTB INTC INTD | ||
144 | * 0 5 1 7 12 17 | ||
145 | * 1 6 2 8 13 18 | ||
146 | * 2 8 3 9 14 19 | ||
147 | * 3 9 4 10 15 20 | ||
148 | * 4 10 5 11 16 6 | ||
149 | * | ||
150 | */ | ||
151 | |||
152 | /* | ||
153 | * IdSel | ||
154 | * 5 32 bit PCI option slot 0 | ||
155 | * 6 64 bit PCI option slot 1 | ||
156 | * 7 PCI-ISA bridge | ||
157 | * 7 64 bit PCI option slot 2 | ||
158 | * 9 32 bit PCI option slot 3 | ||
159 | * 10 PCI-PCI bridge | ||
160 | * | ||
161 | */ | ||
162 | |||
163 | static int __init | ||
164 | rx164_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
165 | { | ||
166 | #if 0 | ||
167 | static char irq_tab_pass1[6][5] __initdata = { | ||
168 | /*INT INTA INTB INTC INTD */ | ||
169 | { 16+3, 16+3, 16+8, 16+13, 16+18}, /* IdSel 5, slot 2 */ | ||
170 | { 16+5, 16+5, 16+10, 16+15, 16+20}, /* IdSel 6, slot 0 */ | ||
171 | { 16+4, 16+4, 16+9, 16+14, 16+19}, /* IdSel 7, slot 1 */ | ||
172 | { -1, -1, -1, -1, -1}, /* IdSel 8, PCI/ISA bridge */ | ||
173 | { 16+2, 16+2, 16+7, 16+12, 16+17}, /* IdSel 9, slot 3 */ | ||
174 | { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */ | ||
175 | }; | ||
176 | #else | ||
177 | static char irq_tab[6][5] __initdata = { | ||
178 | /*INT INTA INTB INTC INTD */ | ||
179 | { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */ | ||
180 | { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */ | ||
181 | { -1, -1, -1, -1, -1}, /* IdSel 7, PCI/ISA bridge */ | ||
182 | { 16+2, 16+2, 16+8, 16+13, 16+18}, /* IdSel 8, slot 2 */ | ||
183 | { 16+3, 16+3, 16+9, 16+14, 16+19}, /* IdSel 9, slot 3 */ | ||
184 | { 16+4, 16+4, 16+10, 16+15, 16+5}, /* IdSel 10, PCI-PCI */ | ||
185 | }; | ||
186 | #endif | ||
187 | const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5; | ||
188 | |||
189 | /* JRP - Need to figure out how to distinguish pass1 from pass2, | ||
190 | and use the correct table. */ | ||
191 | return COMMON_TABLE_LOOKUP; | ||
192 | } | ||
193 | |||
194 | |||
195 | /* | ||
196 | * The System Vector | ||
197 | */ | ||
198 | |||
199 | struct alpha_machine_vector rx164_mv __initmv = { | ||
200 | .vector_name = "RX164", | ||
201 | DO_EV5_MMU, | ||
202 | DO_DEFAULT_RTC, | ||
203 | DO_POLARIS_IO, | ||
204 | .machine_check = polaris_machine_check, | ||
205 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
206 | .min_io_address = DEFAULT_IO_BASE, | ||
207 | .min_mem_address = DEFAULT_MEM_BASE, | ||
208 | |||
209 | .nr_irqs = 40, | ||
210 | .device_interrupt = rx164_device_interrupt, | ||
211 | |||
212 | .init_arch = polaris_init_arch, | ||
213 | .init_irq = rx164_init_irq, | ||
214 | .init_rtc = common_init_rtc, | ||
215 | .init_pci = common_init_pci, | ||
216 | .kill_arch = NULL, | ||
217 | .pci_map_irq = rx164_map_irq, | ||
218 | .pci_swizzle = common_swizzle, | ||
219 | }; | ||
220 | ALIAS_MV(rx164) | ||
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c new file mode 100644 index 000000000000..a7ff84474ace --- /dev/null +++ b/arch/alpha/kernel/sys_sable.c | |||
@@ -0,0 +1,653 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_sable.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the Sable, Sable-Gamma, and Lynx systems. | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <asm/ptrace.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/dma.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/mmu_context.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/core_t2.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | #include "proto.h" | ||
30 | #include "irq_impl.h" | ||
31 | #include "pci_impl.h" | ||
32 | #include "machvec_impl.h" | ||
33 | |||
34 | DEFINE_SPINLOCK(sable_lynx_irq_lock); | ||
35 | |||
36 | typedef struct irq_swizzle_struct | ||
37 | { | ||
38 | char irq_to_mask[64]; | ||
39 | char mask_to_irq[64]; | ||
40 | |||
41 | /* Note mask bit is true for DISABLED irqs. */ | ||
42 | unsigned long shadow_mask; | ||
43 | |||
44 | void (*update_irq_hw)(unsigned long bit, unsigned long mask); | ||
45 | void (*ack_irq_hw)(unsigned long bit); | ||
46 | |||
47 | } irq_swizzle_t; | ||
48 | |||
49 | static irq_swizzle_t *sable_lynx_irq_swizzle; | ||
50 | |||
51 | static void sable_lynx_init_irq(int nr_irqs); | ||
52 | |||
53 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) | ||
54 | |||
55 | /***********************************************************************/ | ||
56 | /* | ||
57 | * For SABLE, which is really baroque, we manage 40 IRQ's, but the | ||
58 | * hardware really only supports 24, not via normal ISA PIC, | ||
59 | * but cascaded custom 8259's, etc. | ||
60 | * 0-7 (char at 536) | ||
61 | * 8-15 (char at 53a) | ||
62 | * 16-23 (char at 53c) | ||
63 | * | ||
64 | * Summary Registers (536/53a/53c): | ||
65 | * | ||
66 | * Bit Meaning Kernel IRQ | ||
67 | *------------------------------------------ | ||
68 | * 0 PCI slot 0 34 | ||
69 | * 1 NCR810 (builtin) 33 | ||
70 | * 2 TULIP (builtin) 32 | ||
71 | * 3 mouse 12 | ||
72 | * 4 PCI slot 1 35 | ||
73 | * 5 PCI slot 2 36 | ||
74 | * 6 keyboard 1 | ||
75 | * 7 floppy 6 | ||
76 | * 8 COM2 3 | ||
77 | * 9 parallel port 7 | ||
78 | *10 EISA irq 3 - | ||
79 | *11 EISA irq 4 - | ||
80 | *12 EISA irq 5 5 | ||
81 | *13 EISA irq 6 - | ||
82 | *14 EISA irq 7 - | ||
83 | *15 COM1 4 | ||
84 | *16 EISA irq 9 9 | ||
85 | *17 EISA irq 10 10 | ||
86 | *18 EISA irq 11 11 | ||
87 | *19 EISA irq 12 - | ||
88 | *20 EISA irq 13 - | ||
89 | *21 EISA irq 14 14 | ||
90 | *22 NC 15 | ||
91 | *23 IIC - | ||
92 | */ | ||
93 | |||
94 | static void | ||
95 | sable_update_irq_hw(unsigned long bit, unsigned long mask) | ||
96 | { | ||
97 | int port = 0x537; | ||
98 | |||
99 | if (bit >= 16) { | ||
100 | port = 0x53d; | ||
101 | mask >>= 16; | ||
102 | } else if (bit >= 8) { | ||
103 | port = 0x53b; | ||
104 | mask >>= 8; | ||
105 | } | ||
106 | |||
107 | outb(mask, port); | ||
108 | } | ||
109 | |||
110 | static void | ||
111 | sable_ack_irq_hw(unsigned long bit) | ||
112 | { | ||
113 | int port, val1, val2; | ||
114 | |||
115 | if (bit >= 16) { | ||
116 | port = 0x53c; | ||
117 | val1 = 0xE0 | (bit - 16); | ||
118 | val2 = 0xE0 | 4; | ||
119 | } else if (bit >= 8) { | ||
120 | port = 0x53a; | ||
121 | val1 = 0xE0 | (bit - 8); | ||
122 | val2 = 0xE0 | 3; | ||
123 | } else { | ||
124 | port = 0x536; | ||
125 | val1 = 0xE0 | (bit - 0); | ||
126 | val2 = 0xE0 | 1; | ||
127 | } | ||
128 | |||
129 | outb(val1, port); /* ack the slave */ | ||
130 | outb(val2, 0x534); /* ack the master */ | ||
131 | } | ||
132 | |||
133 | static irq_swizzle_t sable_irq_swizzle = { | ||
134 | { | ||
135 | -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */ | ||
136 | -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */ | ||
137 | -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */ | ||
138 | -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */ | ||
139 | 2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */ | ||
140 | -1, -1, -1, -1, -1, -1, -1, -1, /* */ | ||
141 | -1, -1, -1, -1, -1, -1, -1, -1, /* */ | ||
142 | -1, -1, -1, -1, -1, -1, -1, -1 /* */ | ||
143 | }, | ||
144 | { | ||
145 | 34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */ | ||
146 | 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */ | ||
147 | 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */ | ||
148 | -1, -1, -1, -1, -1, -1, -1, -1, /* */ | ||
149 | -1, -1, -1, -1, -1, -1, -1, -1, /* */ | ||
150 | -1, -1, -1, -1, -1, -1, -1, -1, /* */ | ||
151 | -1, -1, -1, -1, -1, -1, -1, -1, /* */ | ||
152 | -1, -1, -1, -1, -1, -1, -1, -1 /* */ | ||
153 | }, | ||
154 | -1, | ||
155 | sable_update_irq_hw, | ||
156 | sable_ack_irq_hw | ||
157 | }; | ||
158 | |||
159 | static void __init | ||
160 | sable_init_irq(void) | ||
161 | { | ||
162 | outb(-1, 0x537); /* slave 0 */ | ||
163 | outb(-1, 0x53b); /* slave 1 */ | ||
164 | outb(-1, 0x53d); /* slave 2 */ | ||
165 | outb(0x44, 0x535); /* enable cascades in master */ | ||
166 | |||
167 | sable_lynx_irq_swizzle = &sable_irq_swizzle; | ||
168 | sable_lynx_init_irq(40); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * PCI Fixup configuration for ALPHA SABLE (2100). | ||
173 | * | ||
174 | * The device to slot mapping looks like: | ||
175 | * | ||
176 | * Slot Device | ||
177 | * 0 TULIP | ||
178 | * 1 SCSI | ||
179 | * 2 PCI-EISA bridge | ||
180 | * 3 none | ||
181 | * 4 none | ||
182 | * 5 none | ||
183 | * 6 PCI on board slot 0 | ||
184 | * 7 PCI on board slot 1 | ||
185 | * 8 PCI on board slot 2 | ||
186 | * | ||
187 | * | ||
188 | * This two layered interrupt approach means that we allocate IRQ 16 and | ||
189 | * above for PCI interrupts. The IRQ relates to which bit the interrupt | ||
190 | * comes in on. This makes interrupt processing much easier. | ||
191 | */ | ||
192 | /* | ||
193 | * NOTE: the IRQ assignments below are arbitrary, but need to be consistent | ||
194 | * with the values in the irq swizzling tables above. | ||
195 | */ | ||
196 | |||
197 | static int __init | ||
198 | sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
199 | { | ||
200 | static char irq_tab[9][5] __initdata = { | ||
201 | /*INT INTA INTB INTC INTD */ | ||
202 | { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */ | ||
203 | { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */ | ||
204 | { -1, -1, -1, -1, -1}, /* IdSel 2, SIO */ | ||
205 | { -1, -1, -1, -1, -1}, /* IdSel 3, none */ | ||
206 | { -1, -1, -1, -1, -1}, /* IdSel 4, none */ | ||
207 | { -1, -1, -1, -1, -1}, /* IdSel 5, none */ | ||
208 | { 32+2, 32+2, 32+2, 32+2, 32+2}, /* IdSel 6, slot 0 */ | ||
209 | { 32+3, 32+3, 32+3, 32+3, 32+3}, /* IdSel 7, slot 1 */ | ||
210 | { 32+4, 32+4, 32+4, 32+4, 32+4} /* IdSel 8, slot 2 */ | ||
211 | }; | ||
212 | long min_idsel = 0, max_idsel = 8, irqs_per_slot = 5; | ||
213 | return COMMON_TABLE_LOOKUP; | ||
214 | } | ||
215 | #endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */ | ||
216 | |||
217 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) | ||
218 | |||
219 | /***********************************************************************/ | ||
220 | /* LYNX hardware specifics | ||
221 | */ | ||
222 | /* | ||
223 | * For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC. | ||
224 | * | ||
225 | * Bit Meaning Kernel IRQ | ||
226 | *------------------------------------------ | ||
227 | * 0 | ||
228 | * 1 | ||
229 | * 2 | ||
230 | * 3 mouse 12 | ||
231 | * 4 | ||
232 | * 5 | ||
233 | * 6 keyboard 1 | ||
234 | * 7 floppy 6 | ||
235 | * 8 COM2 3 | ||
236 | * 9 parallel port 7 | ||
237 | *10 EISA irq 3 - | ||
238 | *11 EISA irq 4 - | ||
239 | *12 EISA irq 5 5 | ||
240 | *13 EISA irq 6 - | ||
241 | *14 EISA irq 7 - | ||
242 | *15 COM1 4 | ||
243 | *16 EISA irq 9 9 | ||
244 | *17 EISA irq 10 10 | ||
245 | *18 EISA irq 11 11 | ||
246 | *19 EISA irq 12 - | ||
247 | *20 | ||
248 | *21 EISA irq 14 14 | ||
249 | *22 EISA irq 15 15 | ||
250 | *23 IIC - | ||
251 | *24 VGA (builtin) - | ||
252 | *25 | ||
253 | *26 | ||
254 | *27 | ||
255 | *28 NCR810 (builtin) 28 | ||
256 | *29 | ||
257 | *30 | ||
258 | *31 | ||
259 | *32 PCI 0 slot 4 A primary bus 32 | ||
260 | *33 PCI 0 slot 4 B primary bus 33 | ||
261 | *34 PCI 0 slot 4 C primary bus 34 | ||
262 | *35 PCI 0 slot 4 D primary bus | ||
263 | *36 PCI 0 slot 5 A primary bus | ||
264 | *37 PCI 0 slot 5 B primary bus | ||
265 | *38 PCI 0 slot 5 C primary bus | ||
266 | *39 PCI 0 slot 5 D primary bus | ||
267 | *40 PCI 0 slot 6 A primary bus | ||
268 | *41 PCI 0 slot 6 B primary bus | ||
269 | *42 PCI 0 slot 6 C primary bus | ||
270 | *43 PCI 0 slot 6 D primary bus | ||
271 | *44 PCI 0 slot 7 A primary bus | ||
272 | *45 PCI 0 slot 7 B primary bus | ||
273 | *46 PCI 0 slot 7 C primary bus | ||
274 | *47 PCI 0 slot 7 D primary bus | ||
275 | *48 PCI 0 slot 0 A secondary bus | ||
276 | *49 PCI 0 slot 0 B secondary bus | ||
277 | *50 PCI 0 slot 0 C secondary bus | ||
278 | *51 PCI 0 slot 0 D secondary bus | ||
279 | *52 PCI 0 slot 1 A secondary bus | ||
280 | *53 PCI 0 slot 1 B secondary bus | ||
281 | *54 PCI 0 slot 1 C secondary bus | ||
282 | *55 PCI 0 slot 1 D secondary bus | ||
283 | *56 PCI 0 slot 2 A secondary bus | ||
284 | *57 PCI 0 slot 2 B secondary bus | ||
285 | *58 PCI 0 slot 2 C secondary bus | ||
286 | *59 PCI 0 slot 2 D secondary bus | ||
287 | *60 PCI 0 slot 3 A secondary bus | ||
288 | *61 PCI 0 slot 3 B secondary bus | ||
289 | *62 PCI 0 slot 3 C secondary bus | ||
290 | *63 PCI 0 slot 3 D secondary bus | ||
291 | */ | ||
292 | |||
293 | static void | ||
294 | lynx_update_irq_hw(unsigned long bit, unsigned long mask) | ||
295 | { | ||
296 | /* | ||
297 | * Write the AIR register on the T3/T4 with the | ||
298 | * address of the IC mask register (offset 0x40) | ||
299 | */ | ||
300 | *(vulp)T2_AIR = 0x40; | ||
301 | mb(); | ||
302 | *(vulp)T2_AIR; /* re-read to force write */ | ||
303 | mb(); | ||
304 | *(vulp)T2_DIR = mask; | ||
305 | mb(); | ||
306 | mb(); | ||
307 | } | ||
308 | |||
309 | static void | ||
310 | lynx_ack_irq_hw(unsigned long bit) | ||
311 | { | ||
312 | *(vulp)T2_VAR = (u_long) bit; | ||
313 | mb(); | ||
314 | mb(); | ||
315 | } | ||
316 | |||
317 | static irq_swizzle_t lynx_irq_swizzle = { | ||
318 | { /* irq_to_mask */ | ||
319 | -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */ | ||
320 | -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */ | ||
321 | -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */ | ||
322 | -1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */ | ||
323 | 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */ | ||
324 | 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */ | ||
325 | 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */ | ||
326 | 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */ | ||
327 | }, | ||
328 | { /* mask_to_irq */ | ||
329 | -1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */ | ||
330 | 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */ | ||
331 | 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */ | ||
332 | -1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */ | ||
333 | 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */ | ||
334 | 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */ | ||
335 | 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */ | ||
336 | 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */ | ||
337 | }, | ||
338 | -1, | ||
339 | lynx_update_irq_hw, | ||
340 | lynx_ack_irq_hw | ||
341 | }; | ||
342 | |||
343 | static void __init | ||
344 | lynx_init_irq(void) | ||
345 | { | ||
346 | sable_lynx_irq_swizzle = &lynx_irq_swizzle; | ||
347 | sable_lynx_init_irq(64); | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * PCI Fixup configuration for ALPHA LYNX (2100A) | ||
352 | * | ||
353 | * The device to slot mapping looks like: | ||
354 | * | ||
355 | * Slot Device | ||
356 | * 0 none | ||
357 | * 1 none | ||
358 | * 2 PCI-EISA bridge | ||
359 | * 3 PCI-PCI bridge | ||
360 | * 4 NCR 810 (Demi-Lynx only) | ||
361 | * 5 none | ||
362 | * 6 PCI on board slot 4 | ||
363 | * 7 PCI on board slot 5 | ||
364 | * 8 PCI on board slot 6 | ||
365 | * 9 PCI on board slot 7 | ||
366 | * | ||
367 | * And behind the PPB we have: | ||
368 | * | ||
369 | * 11 PCI on board slot 0 | ||
370 | * 12 PCI on board slot 1 | ||
371 | * 13 PCI on board slot 2 | ||
372 | * 14 PCI on board slot 3 | ||
373 | */ | ||
374 | /* | ||
375 | * NOTE: the IRQ assignments below are arbitrary, but need to be consistent | ||
376 | * with the values in the irq swizzling tables above. | ||
377 | */ | ||
378 | |||
379 | static int __init | ||
380 | lynx_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
381 | { | ||
382 | static char irq_tab[19][5] __initdata = { | ||
383 | /*INT INTA INTB INTC INTD */ | ||
384 | { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */ | ||
385 | { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */ | ||
386 | { 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */ | ||
387 | { -1, -1, -1, -1, -1}, /* IdSel 16, none */ | ||
388 | { 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */ | ||
389 | { 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */ | ||
390 | { 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */ | ||
391 | { 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */ | ||
392 | { -1, -1, -1, -1, -1}, /* IdSel 22, none */ | ||
393 | /* The following are actually behind the PPB. */ | ||
394 | { -1, -1, -1, -1, -1}, /* IdSel 16 none */ | ||
395 | { 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */ | ||
396 | { -1, -1, -1, -1, -1}, /* IdSel 18 none */ | ||
397 | { -1, -1, -1, -1, -1}, /* IdSel 19 none */ | ||
398 | { -1, -1, -1, -1, -1}, /* IdSel 20 none */ | ||
399 | { -1, -1, -1, -1, -1}, /* IdSel 21 none */ | ||
400 | { 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */ | ||
401 | { 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */ | ||
402 | { 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */ | ||
403 | { 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */ | ||
404 | }; | ||
405 | const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5; | ||
406 | return COMMON_TABLE_LOOKUP; | ||
407 | } | ||
408 | |||
409 | static u8 __init | ||
410 | lynx_swizzle(struct pci_dev *dev, u8 *pinp) | ||
411 | { | ||
412 | int slot, pin = *pinp; | ||
413 | |||
414 | if (dev->bus->number == 0) { | ||
415 | slot = PCI_SLOT(dev->devfn); | ||
416 | } | ||
417 | /* Check for the built-in bridge */ | ||
418 | else if (PCI_SLOT(dev->bus->self->devfn) == 3) { | ||
419 | slot = PCI_SLOT(dev->devfn) + 11; | ||
420 | } | ||
421 | else | ||
422 | { | ||
423 | /* Must be a card-based bridge. */ | ||
424 | do { | ||
425 | if (PCI_SLOT(dev->bus->self->devfn) == 3) { | ||
426 | slot = PCI_SLOT(dev->devfn) + 11; | ||
427 | break; | ||
428 | } | ||
429 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; | ||
430 | |||
431 | /* Move up the chain of bridges. */ | ||
432 | dev = dev->bus->self; | ||
433 | /* Slot of the next bridge. */ | ||
434 | slot = PCI_SLOT(dev->devfn); | ||
435 | } while (dev->bus->self); | ||
436 | } | ||
437 | *pinp = pin; | ||
438 | return slot; | ||
439 | } | ||
440 | |||
441 | #endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */ | ||
442 | |||
443 | /***********************************************************************/ | ||
444 | /* GENERIC irq routines */ | ||
445 | |||
446 | static inline void | ||
447 | sable_lynx_enable_irq(unsigned int irq) | ||
448 | { | ||
449 | unsigned long bit, mask; | ||
450 | |||
451 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | ||
452 | spin_lock(&sable_lynx_irq_lock); | ||
453 | mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); | ||
454 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | ||
455 | spin_unlock(&sable_lynx_irq_lock); | ||
456 | #if 0 | ||
457 | printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", | ||
458 | __FUNCTION__, mask, bit, irq); | ||
459 | #endif | ||
460 | } | ||
461 | |||
462 | static void | ||
463 | sable_lynx_disable_irq(unsigned int irq) | ||
464 | { | ||
465 | unsigned long bit, mask; | ||
466 | |||
467 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | ||
468 | spin_lock(&sable_lynx_irq_lock); | ||
469 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; | ||
470 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | ||
471 | spin_unlock(&sable_lynx_irq_lock); | ||
472 | #if 0 | ||
473 | printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", | ||
474 | __FUNCTION__, mask, bit, irq); | ||
475 | #endif | ||
476 | } | ||
477 | |||
478 | static unsigned int | ||
479 | sable_lynx_startup_irq(unsigned int irq) | ||
480 | { | ||
481 | sable_lynx_enable_irq(irq); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | static void | ||
486 | sable_lynx_end_irq(unsigned int irq) | ||
487 | { | ||
488 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
489 | sable_lynx_enable_irq(irq); | ||
490 | } | ||
491 | |||
492 | static void | ||
493 | sable_lynx_mask_and_ack_irq(unsigned int irq) | ||
494 | { | ||
495 | unsigned long bit, mask; | ||
496 | |||
497 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | ||
498 | spin_lock(&sable_lynx_irq_lock); | ||
499 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; | ||
500 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | ||
501 | sable_lynx_irq_swizzle->ack_irq_hw(bit); | ||
502 | spin_unlock(&sable_lynx_irq_lock); | ||
503 | } | ||
504 | |||
505 | static struct hw_interrupt_type sable_lynx_irq_type = { | ||
506 | .typename = "SABLE/LYNX", | ||
507 | .startup = sable_lynx_startup_irq, | ||
508 | .shutdown = sable_lynx_disable_irq, | ||
509 | .enable = sable_lynx_enable_irq, | ||
510 | .disable = sable_lynx_disable_irq, | ||
511 | .ack = sable_lynx_mask_and_ack_irq, | ||
512 | .end = sable_lynx_end_irq, | ||
513 | }; | ||
514 | |||
515 | static void | ||
516 | sable_lynx_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
517 | { | ||
518 | /* Note that the vector reported by the SRM PALcode corresponds | ||
519 | to the interrupt mask bits, but we have to manage via the | ||
520 | so-called legacy IRQs for many common devices. */ | ||
521 | |||
522 | int bit, irq; | ||
523 | |||
524 | bit = (vector - 0x800) >> 4; | ||
525 | irq = sable_lynx_irq_swizzle->mask_to_irq[bit]; | ||
526 | #if 0 | ||
527 | printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n", | ||
528 | __FUNCTION__, vector, bit, irq); | ||
529 | #endif | ||
530 | handle_irq(irq, regs); | ||
531 | } | ||
532 | |||
533 | static void __init | ||
534 | sable_lynx_init_irq(int nr_irqs) | ||
535 | { | ||
536 | long i; | ||
537 | |||
538 | for (i = 0; i < nr_irqs; ++i) { | ||
539 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
540 | irq_desc[i].handler = &sable_lynx_irq_type; | ||
541 | } | ||
542 | |||
543 | common_init_isa_dma(); | ||
544 | } | ||
545 | |||
546 | static void __init | ||
547 | sable_lynx_init_pci(void) | ||
548 | { | ||
549 | common_init_pci(); | ||
550 | } | ||
551 | |||
552 | /*****************************************************************/ | ||
553 | /* | ||
554 | * The System Vectors | ||
555 | * | ||
556 | * In order that T2_HAE_ADDRESS should be a constant, we play | ||
557 | * these games with GAMMA_BIAS. | ||
558 | */ | ||
559 | |||
560 | #if defined(CONFIG_ALPHA_GENERIC) || \ | ||
561 | (defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA)) | ||
562 | #undef GAMMA_BIAS | ||
563 | #define GAMMA_BIAS 0 | ||
564 | struct alpha_machine_vector sable_mv __initmv = { | ||
565 | .vector_name = "Sable", | ||
566 | DO_EV4_MMU, | ||
567 | DO_DEFAULT_RTC, | ||
568 | DO_T2_IO, | ||
569 | .machine_check = t2_machine_check, | ||
570 | .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, | ||
571 | .min_io_address = EISA_DEFAULT_IO_BASE, | ||
572 | .min_mem_address = T2_DEFAULT_MEM_BASE, | ||
573 | |||
574 | .nr_irqs = 40, | ||
575 | .device_interrupt = sable_lynx_srm_device_interrupt, | ||
576 | |||
577 | .init_arch = t2_init_arch, | ||
578 | .init_irq = sable_init_irq, | ||
579 | .init_rtc = common_init_rtc, | ||
580 | .init_pci = sable_lynx_init_pci, | ||
581 | .kill_arch = t2_kill_arch, | ||
582 | .pci_map_irq = sable_map_irq, | ||
583 | .pci_swizzle = common_swizzle, | ||
584 | |||
585 | .sys = { .t2 = { | ||
586 | .gamma_bias = 0 | ||
587 | } } | ||
588 | }; | ||
589 | ALIAS_MV(sable) | ||
590 | #endif /* GENERIC || (SABLE && !GAMMA) */ | ||
591 | |||
592 | #if defined(CONFIG_ALPHA_GENERIC) || \ | ||
593 | (defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA)) | ||
594 | #undef GAMMA_BIAS | ||
595 | #define GAMMA_BIAS _GAMMA_BIAS | ||
596 | struct alpha_machine_vector sable_gamma_mv __initmv = { | ||
597 | .vector_name = "Sable-Gamma", | ||
598 | DO_EV5_MMU, | ||
599 | DO_DEFAULT_RTC, | ||
600 | DO_T2_IO, | ||
601 | .machine_check = t2_machine_check, | ||
602 | .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, | ||
603 | .min_io_address = EISA_DEFAULT_IO_BASE, | ||
604 | .min_mem_address = T2_DEFAULT_MEM_BASE, | ||
605 | |||
606 | .nr_irqs = 40, | ||
607 | .device_interrupt = sable_lynx_srm_device_interrupt, | ||
608 | |||
609 | .init_arch = t2_init_arch, | ||
610 | .init_irq = sable_init_irq, | ||
611 | .init_rtc = common_init_rtc, | ||
612 | .init_pci = sable_lynx_init_pci, | ||
613 | .kill_arch = t2_kill_arch, | ||
614 | .pci_map_irq = sable_map_irq, | ||
615 | .pci_swizzle = common_swizzle, | ||
616 | |||
617 | .sys = { .t2 = { | ||
618 | .gamma_bias = _GAMMA_BIAS | ||
619 | } } | ||
620 | }; | ||
621 | ALIAS_MV(sable_gamma) | ||
622 | #endif /* GENERIC || (SABLE && GAMMA) */ | ||
623 | |||
624 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) | ||
625 | #undef GAMMA_BIAS | ||
626 | #define GAMMA_BIAS _GAMMA_BIAS | ||
627 | struct alpha_machine_vector lynx_mv __initmv = { | ||
628 | .vector_name = "Lynx", | ||
629 | DO_EV4_MMU, | ||
630 | DO_DEFAULT_RTC, | ||
631 | DO_T2_IO, | ||
632 | .machine_check = t2_machine_check, | ||
633 | .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, | ||
634 | .min_io_address = EISA_DEFAULT_IO_BASE, | ||
635 | .min_mem_address = T2_DEFAULT_MEM_BASE, | ||
636 | |||
637 | .nr_irqs = 64, | ||
638 | .device_interrupt = sable_lynx_srm_device_interrupt, | ||
639 | |||
640 | .init_arch = t2_init_arch, | ||
641 | .init_irq = lynx_init_irq, | ||
642 | .init_rtc = common_init_rtc, | ||
643 | .init_pci = sable_lynx_init_pci, | ||
644 | .kill_arch = t2_kill_arch, | ||
645 | .pci_map_irq = lynx_map_irq, | ||
646 | .pci_swizzle = lynx_swizzle, | ||
647 | |||
648 | .sys = { .t2 = { | ||
649 | .gamma_bias = _GAMMA_BIAS | ||
650 | } } | ||
651 | }; | ||
652 | ALIAS_MV(lynx) | ||
653 | #endif /* GENERIC || LYNX */ | ||
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c new file mode 100644 index 000000000000..47df48a6ddb7 --- /dev/null +++ b/arch/alpha/kernel/sys_sio.c | |||
@@ -0,0 +1,438 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_sio.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code for all boards that route the PCI interrupts through the SIO | ||
9 | * PCI/ISA bridge. This includes Noname (AXPpci33), Multia (UDB), | ||
10 | * Kenetics's Platform 2000, Avanti (AlphaStation), XL, and AlphaBook1. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/tty.h> | ||
21 | |||
22 | #include <asm/compiler.h> | ||
23 | #include <asm/ptrace.h> | ||
24 | #include <asm/system.h> | ||
25 | #include <asm/dma.h> | ||
26 | #include <asm/irq.h> | ||
27 | #include <asm/mmu_context.h> | ||
28 | #include <asm/io.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/core_apecs.h> | ||
31 | #include <asm/core_lca.h> | ||
32 | #include <asm/tlbflush.h> | ||
33 | |||
34 | #include "proto.h" | ||
35 | #include "irq_impl.h" | ||
36 | #include "pci_impl.h" | ||
37 | #include "machvec_impl.h" | ||
38 | |||
39 | #if defined(ALPHA_RESTORE_SRM_SETUP) | ||
40 | /* Save LCA configuration data as the console had it set up. */ | ||
41 | struct | ||
42 | { | ||
43 | unsigned int orig_route_tab; /* for SAVE/RESTORE */ | ||
44 | } saved_config __attribute((common)); | ||
45 | #endif | ||
46 | |||
47 | |||
48 | static void __init | ||
49 | sio_init_irq(void) | ||
50 | { | ||
51 | if (alpha_using_srm) | ||
52 | alpha_mv.device_interrupt = srm_device_interrupt; | ||
53 | |||
54 | init_i8259a_irqs(); | ||
55 | common_init_isa_dma(); | ||
56 | } | ||
57 | |||
58 | static inline void __init | ||
59 | alphabook1_init_arch(void) | ||
60 | { | ||
61 | /* The AlphaBook1 has LCD video fixed at 800x600, | ||
62 | 37 rows and 100 cols. */ | ||
63 | screen_info.orig_y = 37; | ||
64 | screen_info.orig_video_cols = 100; | ||
65 | screen_info.orig_video_lines = 37; | ||
66 | |||
67 | lca_init_arch(); | ||
68 | } | ||
69 | |||
70 | |||
71 | /* | ||
72 | * sio_route_tab selects irq routing in PCI/ISA bridge so that: | ||
73 | * PIRQ0 -> irq 15 | ||
74 | * PIRQ1 -> irq 9 | ||
75 | * PIRQ2 -> irq 10 | ||
76 | * PIRQ3 -> irq 11 | ||
77 | * | ||
78 | * This probably ought to be configurable via MILO. For | ||
79 | * example, sound boards seem to like using IRQ 9. | ||
80 | * | ||
81 | * This is NOT how we should do it. PIRQ0-X should have | ||
82 | * their own IRQ's, the way intel uses the IO-APIC irq's. | ||
83 | */ | ||
84 | |||
85 | static void __init | ||
86 | sio_pci_route(void) | ||
87 | { | ||
88 | #if defined(ALPHA_RESTORE_SRM_SETUP) | ||
89 | /* First, read and save the original setting. */ | ||
90 | pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, | ||
91 | &saved_config.orig_route_tab); | ||
92 | printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__, | ||
93 | saved_config.orig_route_tab, alpha_mv.sys.sio.route_tab); | ||
94 | #endif | ||
95 | |||
96 | /* Now override with desired setting. */ | ||
97 | pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, | ||
98 | alpha_mv.sys.sio.route_tab); | ||
99 | } | ||
100 | |||
101 | static unsigned int __init | ||
102 | sio_collect_irq_levels(void) | ||
103 | { | ||
104 | unsigned int level_bits = 0; | ||
105 | struct pci_dev *dev = NULL; | ||
106 | |||
107 | /* Iterate through the devices, collecting IRQ levels. */ | ||
108 | while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
109 | if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && | ||
110 | (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) | ||
111 | continue; | ||
112 | |||
113 | if (dev->irq) | ||
114 | level_bits |= (1 << dev->irq); | ||
115 | } | ||
116 | return level_bits; | ||
117 | } | ||
118 | |||
119 | static void __init | ||
120 | sio_fixup_irq_levels(unsigned int level_bits) | ||
121 | { | ||
122 | unsigned int old_level_bits; | ||
123 | |||
124 | /* | ||
125 | * Now, make all PCI interrupts level sensitive. Notice: | ||
126 | * these registers must be accessed byte-wise. inw()/outw() | ||
127 | * don't work. | ||
128 | * | ||
129 | * Make sure to turn off any level bits set for IRQs 9,10,11,15, | ||
130 | * so that the only bits getting set are for devices actually found. | ||
131 | * Note that we do preserve the remainder of the bits, which we hope | ||
132 | * will be set correctly by ARC/SRM. | ||
133 | * | ||
134 | * Note: we at least preserve any level-set bits on AlphaBook1 | ||
135 | */ | ||
136 | old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); | ||
137 | |||
138 | level_bits |= (old_level_bits & 0x71ff); | ||
139 | |||
140 | outb((level_bits >> 0) & 0xff, 0x4d0); | ||
141 | outb((level_bits >> 8) & 0xff, 0x4d1); | ||
142 | } | ||
143 | |||
144 | static inline int __init | ||
145 | noname_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
146 | { | ||
147 | /* | ||
148 | * The Noname board has 5 PCI slots with each of the 4 | ||
149 | * interrupt pins routed to different pins on the PCI/ISA | ||
150 | * bridge (PIRQ0-PIRQ3). The table below is based on | ||
151 | * information available at: | ||
152 | * | ||
153 | * http://ftp.digital.com/pub/DEC/axppci/ref_interrupts.txt | ||
154 | * | ||
155 | * I have no information on the Avanti interrupt routing, but | ||
156 | * the routing seems to be identical to the Noname except | ||
157 | * that the Avanti has an additional slot whose routing I'm | ||
158 | * unsure of. | ||
159 | * | ||
160 | * pirq_tab[0] is a fake entry to deal with old PCI boards | ||
161 | * that have the interrupt pin number hardwired to 0 (meaning | ||
162 | * that they use the default INTA line, if they are interrupt | ||
163 | * driven at all). | ||
164 | */ | ||
165 | static char irq_tab[][5] __initdata = { | ||
166 | /*INT A B C D */ | ||
167 | { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */ | ||
168 | {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ | ||
169 | { 2, 2, -1, -1, -1}, /* idsel 8 (Hack: slot closest ISA) */ | ||
170 | {-1, -1, -1, -1, -1}, /* idsel 9 (unused) */ | ||
171 | {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */ | ||
172 | { 0, 0, 2, 1, 0}, /* idsel 11 KN25_PCI_SLOT0 */ | ||
173 | { 1, 1, 0, 2, 1}, /* idsel 12 KN25_PCI_SLOT1 */ | ||
174 | { 2, 2, 1, 0, 2}, /* idsel 13 KN25_PCI_SLOT2 */ | ||
175 | { 0, 0, 0, 0, 0}, /* idsel 14 AS255 TULIP */ | ||
176 | }; | ||
177 | const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; | ||
178 | int irq = COMMON_TABLE_LOOKUP, tmp; | ||
179 | tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); | ||
180 | return irq >= 0 ? tmp : -1; | ||
181 | } | ||
182 | |||
183 | static inline int __init | ||
184 | p2k_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
185 | { | ||
186 | static char irq_tab[][5] __initdata = { | ||
187 | /*INT A B C D */ | ||
188 | { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */ | ||
189 | {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ | ||
190 | { 1, 1, 2, 3, 0}, /* idsel 8 (slot A) */ | ||
191 | { 2, 2, 3, 0, 1}, /* idsel 9 (slot B) */ | ||
192 | {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */ | ||
193 | {-1, -1, -1, -1, -1}, /* idsel 11 (unused) */ | ||
194 | { 3, 3, -1, -1, -1}, /* idsel 12 (CMD0646) */ | ||
195 | }; | ||
196 | const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5; | ||
197 | int irq = COMMON_TABLE_LOOKUP, tmp; | ||
198 | tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); | ||
199 | return irq >= 0 ? tmp : -1; | ||
200 | } | ||
201 | |||
202 | static inline void __init | ||
203 | noname_init_pci(void) | ||
204 | { | ||
205 | common_init_pci(); | ||
206 | sio_pci_route(); | ||
207 | sio_fixup_irq_levels(sio_collect_irq_levels()); | ||
208 | ns87312_enable_ide(0x26e); | ||
209 | } | ||
210 | |||
211 | static inline void __init | ||
212 | alphabook1_init_pci(void) | ||
213 | { | ||
214 | struct pci_dev *dev; | ||
215 | unsigned char orig, config; | ||
216 | |||
217 | common_init_pci(); | ||
218 | sio_pci_route(); | ||
219 | |||
220 | /* | ||
221 | * On the AlphaBook1, the PCMCIA chip (Cirrus 6729) | ||
222 | * is sensitive to PCI bus bursts, so we must DISABLE | ||
223 | * burst mode for the NCR 8xx SCSI... :-( | ||
224 | * | ||
225 | * Note that the NCR810 SCSI driver must preserve the | ||
226 | * setting of the bit in order for this to work. At the | ||
227 | * moment (2.0.29), ncr53c8xx.c does NOT do this, but | ||
228 | * 53c7,8xx.c DOES. | ||
229 | */ | ||
230 | |||
231 | dev = NULL; | ||
232 | while ((dev = pci_find_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) { | ||
233 | if (dev->device == PCI_DEVICE_ID_NCR_53C810 | ||
234 | || dev->device == PCI_DEVICE_ID_NCR_53C815 | ||
235 | || dev->device == PCI_DEVICE_ID_NCR_53C820 | ||
236 | || dev->device == PCI_DEVICE_ID_NCR_53C825) { | ||
237 | unsigned long io_port; | ||
238 | unsigned char ctest4; | ||
239 | |||
240 | io_port = dev->resource[0].start; | ||
241 | ctest4 = inb(io_port+0x21); | ||
242 | if (!(ctest4 & 0x80)) { | ||
243 | printk("AlphaBook1 NCR init: setting" | ||
244 | " burst disable\n"); | ||
245 | outb(ctest4 | 0x80, io_port+0x21); | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | /* Do not set *ANY* level triggers for AlphaBook1. */ | ||
251 | sio_fixup_irq_levels(0); | ||
252 | |||
253 | /* Make sure that register PR1 indicates 1Mb mem */ | ||
254 | outb(0x0f, 0x3ce); orig = inb(0x3cf); /* read PR5 */ | ||
255 | outb(0x0f, 0x3ce); outb(0x05, 0x3cf); /* unlock PR0-4 */ | ||
256 | outb(0x0b, 0x3ce); config = inb(0x3cf); /* read PR1 */ | ||
257 | if ((config & 0xc0) != 0xc0) { | ||
258 | printk("AlphaBook1 VGA init: setting 1Mb memory\n"); | ||
259 | config |= 0xc0; | ||
260 | outb(0x0b, 0x3ce); outb(config, 0x3cf); /* write PR1 */ | ||
261 | } | ||
262 | outb(0x0f, 0x3ce); outb(orig, 0x3cf); /* (re)lock PR0-4 */ | ||
263 | } | ||
264 | |||
265 | void | ||
266 | sio_kill_arch(int mode) | ||
267 | { | ||
268 | #if defined(ALPHA_RESTORE_SRM_SETUP) | ||
269 | /* Since we cannot read the PCI DMA Window CSRs, we | ||
270 | * cannot restore them here. | ||
271 | * | ||
272 | * However, we CAN read the PIRQ route register, so restore it | ||
273 | * now... | ||
274 | */ | ||
275 | pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, | ||
276 | saved_config.orig_route_tab); | ||
277 | #endif | ||
278 | } | ||
279 | |||
280 | |||
281 | /* | ||
282 | * The System Vectors | ||
283 | */ | ||
284 | |||
285 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_BOOK1) | ||
286 | struct alpha_machine_vector alphabook1_mv __initmv = { | ||
287 | .vector_name = "AlphaBook1", | ||
288 | DO_EV4_MMU, | ||
289 | DO_DEFAULT_RTC, | ||
290 | DO_LCA_IO, | ||
291 | .machine_check = lca_machine_check, | ||
292 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
293 | .min_io_address = DEFAULT_IO_BASE, | ||
294 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
295 | |||
296 | .nr_irqs = 16, | ||
297 | .device_interrupt = isa_device_interrupt, | ||
298 | |||
299 | .init_arch = alphabook1_init_arch, | ||
300 | .init_irq = sio_init_irq, | ||
301 | .init_rtc = common_init_rtc, | ||
302 | .init_pci = alphabook1_init_pci, | ||
303 | .kill_arch = sio_kill_arch, | ||
304 | .pci_map_irq = noname_map_irq, | ||
305 | .pci_swizzle = common_swizzle, | ||
306 | |||
307 | .sys = { .sio = { | ||
308 | /* NCR810 SCSI is 14, PCMCIA controller is 15. */ | ||
309 | .route_tab = 0x0e0f0a0a, | ||
310 | }} | ||
311 | }; | ||
312 | ALIAS_MV(alphabook1) | ||
313 | #endif | ||
314 | |||
315 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_AVANTI) | ||
316 | struct alpha_machine_vector avanti_mv __initmv = { | ||
317 | .vector_name = "Avanti", | ||
318 | DO_EV4_MMU, | ||
319 | DO_DEFAULT_RTC, | ||
320 | DO_APECS_IO, | ||
321 | .machine_check = apecs_machine_check, | ||
322 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
323 | .min_io_address = DEFAULT_IO_BASE, | ||
324 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
325 | |||
326 | .nr_irqs = 16, | ||
327 | .device_interrupt = isa_device_interrupt, | ||
328 | |||
329 | .init_arch = apecs_init_arch, | ||
330 | .init_irq = sio_init_irq, | ||
331 | .init_rtc = common_init_rtc, | ||
332 | .init_pci = noname_init_pci, | ||
333 | .kill_arch = sio_kill_arch, | ||
334 | .pci_map_irq = noname_map_irq, | ||
335 | .pci_swizzle = common_swizzle, | ||
336 | |||
337 | .sys = { .sio = { | ||
338 | .route_tab = 0x0b0a0e0f, | ||
339 | }} | ||
340 | }; | ||
341 | ALIAS_MV(avanti) | ||
342 | #endif | ||
343 | |||
344 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_NONAME) | ||
345 | struct alpha_machine_vector noname_mv __initmv = { | ||
346 | .vector_name = "Noname", | ||
347 | DO_EV4_MMU, | ||
348 | DO_DEFAULT_RTC, | ||
349 | DO_LCA_IO, | ||
350 | .machine_check = lca_machine_check, | ||
351 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
352 | .min_io_address = DEFAULT_IO_BASE, | ||
353 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
354 | |||
355 | .nr_irqs = 16, | ||
356 | .device_interrupt = srm_device_interrupt, | ||
357 | |||
358 | .init_arch = lca_init_arch, | ||
359 | .init_irq = sio_init_irq, | ||
360 | .init_rtc = common_init_rtc, | ||
361 | .init_pci = noname_init_pci, | ||
362 | .kill_arch = sio_kill_arch, | ||
363 | .pci_map_irq = noname_map_irq, | ||
364 | .pci_swizzle = common_swizzle, | ||
365 | |||
366 | .sys = { .sio = { | ||
367 | /* For UDB, the only available PCI slot must not map to IRQ 9, | ||
368 | since that's the builtin MSS sound chip. That PCI slot | ||
369 | will map to PIRQ1 (for INTA at least), so we give it IRQ 15 | ||
370 | instead. | ||
371 | |||
372 | Unfortunately we have to do this for NONAME as well, since | ||
373 | they are co-indicated when the platform type "Noname" is | ||
374 | selected... :-( */ | ||
375 | |||
376 | .route_tab = 0x0b0a0f0d, | ||
377 | }} | ||
378 | }; | ||
379 | ALIAS_MV(noname) | ||
380 | #endif | ||
381 | |||
382 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_P2K) | ||
383 | struct alpha_machine_vector p2k_mv __initmv = { | ||
384 | .vector_name = "Platform2000", | ||
385 | DO_EV4_MMU, | ||
386 | DO_DEFAULT_RTC, | ||
387 | DO_LCA_IO, | ||
388 | .machine_check = lca_machine_check, | ||
389 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
390 | .min_io_address = DEFAULT_IO_BASE, | ||
391 | .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, | ||
392 | |||
393 | .nr_irqs = 16, | ||
394 | .device_interrupt = srm_device_interrupt, | ||
395 | |||
396 | .init_arch = lca_init_arch, | ||
397 | .init_irq = sio_init_irq, | ||
398 | .init_rtc = common_init_rtc, | ||
399 | .init_pci = noname_init_pci, | ||
400 | .kill_arch = sio_kill_arch, | ||
401 | .pci_map_irq = p2k_map_irq, | ||
402 | .pci_swizzle = common_swizzle, | ||
403 | |||
404 | .sys = { .sio = { | ||
405 | .route_tab = 0x0b0a090f, | ||
406 | }} | ||
407 | }; | ||
408 | ALIAS_MV(p2k) | ||
409 | #endif | ||
410 | |||
411 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XL) | ||
412 | struct alpha_machine_vector xl_mv __initmv = { | ||
413 | .vector_name = "XL", | ||
414 | DO_EV4_MMU, | ||
415 | DO_DEFAULT_RTC, | ||
416 | DO_APECS_IO, | ||
417 | .machine_check = apecs_machine_check, | ||
418 | .max_isa_dma_address = ALPHA_XL_MAX_ISA_DMA_ADDRESS, | ||
419 | .min_io_address = DEFAULT_IO_BASE, | ||
420 | .min_mem_address = XL_DEFAULT_MEM_BASE, | ||
421 | |||
422 | .nr_irqs = 16, | ||
423 | .device_interrupt = isa_device_interrupt, | ||
424 | |||
425 | .init_arch = apecs_init_arch, | ||
426 | .init_irq = sio_init_irq, | ||
427 | .init_rtc = common_init_rtc, | ||
428 | .init_pci = noname_init_pci, | ||
429 | .kill_arch = sio_kill_arch, | ||
430 | .pci_map_irq = noname_map_irq, | ||
431 | .pci_swizzle = common_swizzle, | ||
432 | |||
433 | .sys = { .sio = { | ||
434 | .route_tab = 0x0b0a090f, | ||
435 | }} | ||
436 | }; | ||
437 | ALIAS_MV(xl) | ||
438 | #endif | ||
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c new file mode 100644 index 000000000000..94ad68b7c0ae --- /dev/null +++ b/arch/alpha/kernel/sys_sx164.c | |||
@@ -0,0 +1,178 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_sx164.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999, 2000 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the SX164 (PCA56+PYXIS). | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/bitops.h> | ||
18 | |||
19 | #include <asm/ptrace.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/dma.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/mmu_context.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/core_cia.h> | ||
27 | #include <asm/hwrpb.h> | ||
28 | #include <asm/tlbflush.h> | ||
29 | |||
30 | #include "proto.h" | ||
31 | #include "irq_impl.h" | ||
32 | #include "pci_impl.h" | ||
33 | #include "machvec_impl.h" | ||
34 | |||
35 | |||
36 | static void __init | ||
37 | sx164_init_irq(void) | ||
38 | { | ||
39 | outb(0, DMA1_RESET_REG); | ||
40 | outb(0, DMA2_RESET_REG); | ||
41 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG); | ||
42 | outb(0, DMA2_MASK_REG); | ||
43 | |||
44 | if (alpha_using_srm) | ||
45 | alpha_mv.device_interrupt = srm_device_interrupt; | ||
46 | |||
47 | init_i8259a_irqs(); | ||
48 | |||
49 | /* Not interested in the bogus interrupts (0,3,4,5,40-47), | ||
50 | NMI (1), or HALT (2). */ | ||
51 | if (alpha_using_srm) | ||
52 | init_srm_irqs(40, 0x3f0000); | ||
53 | else | ||
54 | init_pyxis_irqs(0xff00003f0000UL); | ||
55 | |||
56 | setup_irq(16+6, &timer_cascade_irqaction); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * PCI Fixup configuration. | ||
61 | * | ||
62 | * Summary @ PYXIS_INT_REQ: | ||
63 | * Bit Meaning | ||
64 | * 0 RSVD | ||
65 | * 1 NMI | ||
66 | * 2 Halt/Reset switch | ||
67 | * 3 MBZ | ||
68 | * 4 RAZ | ||
69 | * 5 RAZ | ||
70 | * 6 Interval timer (RTC) | ||
71 | * 7 PCI-ISA Bridge | ||
72 | * 8 Interrupt Line A from slot 3 | ||
73 | * 9 Interrupt Line A from slot 2 | ||
74 | *10 Interrupt Line A from slot 1 | ||
75 | *11 Interrupt Line A from slot 0 | ||
76 | *12 Interrupt Line B from slot 3 | ||
77 | *13 Interrupt Line B from slot 2 | ||
78 | *14 Interrupt Line B from slot 1 | ||
79 | *15 Interrupt line B from slot 0 | ||
80 | *16 Interrupt Line C from slot 3 | ||
81 | *17 Interrupt Line C from slot 2 | ||
82 | *18 Interrupt Line C from slot 1 | ||
83 | *19 Interrupt Line C from slot 0 | ||
84 | *20 Interrupt Line D from slot 3 | ||
85 | *21 Interrupt Line D from slot 2 | ||
86 | *22 Interrupt Line D from slot 1 | ||
87 | *23 Interrupt Line D from slot 0 | ||
88 | * | ||
89 | * IdSel | ||
90 | * 5 32 bit PCI option slot 2 | ||
91 | * 6 64 bit PCI option slot 0 | ||
92 | * 7 64 bit PCI option slot 1 | ||
93 | * 8 Cypress I/O | ||
94 | * 9 32 bit PCI option slot 3 | ||
95 | */ | ||
96 | |||
97 | static int __init | ||
98 | sx164_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
99 | { | ||
100 | static char irq_tab[5][5] __initdata = { | ||
101 | /*INT INTA INTB INTC INTD */ | ||
102 | { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ | ||
103 | { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ | ||
104 | { 16+10, 16+10, 16+14, 16+18, 16+22}, /* IdSel 7 slot 1 J18 */ | ||
105 | { -1, -1, -1, -1, -1}, /* IdSel 8 SIO */ | ||
106 | { 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */ | ||
107 | }; | ||
108 | const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; | ||
109 | return COMMON_TABLE_LOOKUP; | ||
110 | } | ||
111 | |||
112 | static void __init | ||
113 | sx164_init_pci(void) | ||
114 | { | ||
115 | cia_init_pci(); | ||
116 | SMC669_Init(0); | ||
117 | } | ||
118 | |||
119 | static void __init | ||
120 | sx164_init_arch(void) | ||
121 | { | ||
122 | /* | ||
123 | * OSF palcode v1.23 forgets to enable PCA56 Motion Video | ||
124 | * Instructions. Let's enable it. | ||
125 | * We have to check palcode revision because CSERVE interface | ||
126 | * is subject to change without notice. For example, it | ||
127 | * has been changed completely since v1.16 (found in MILO | ||
128 | * distribution). -ink | ||
129 | */ | ||
130 | struct percpu_struct *cpu = (struct percpu_struct*) | ||
131 | ((char*)hwrpb + hwrpb->processor_offset); | ||
132 | |||
133 | if (amask(AMASK_MAX) != 0 | ||
134 | && alpha_using_srm | ||
135 | && (cpu->pal_revision & 0xffff) == 0x117) { | ||
136 | __asm__ __volatile__( | ||
137 | "lda $16,8($31)\n" | ||
138 | "call_pal 9\n" /* Allow PALRES insns in kernel mode */ | ||
139 | ".long 0x64000118\n\n" /* hw_mfpr $0,icsr */ | ||
140 | "ldah $16,(1<<(19-16))($31)\n" | ||
141 | "or $0,$16,$0\n" /* set MVE bit */ | ||
142 | ".long 0x74000118\n" /* hw_mtpr $0,icsr */ | ||
143 | "lda $16,9($31)\n" | ||
144 | "call_pal 9" /* Disable PALRES insns */ | ||
145 | : : : "$0", "$16"); | ||
146 | printk("PCA56 MVI set enabled\n"); | ||
147 | } | ||
148 | |||
149 | pyxis_init_arch(); | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * The System Vector | ||
154 | */ | ||
155 | |||
156 | struct alpha_machine_vector sx164_mv __initmv = { | ||
157 | .vector_name = "SX164", | ||
158 | DO_EV5_MMU, | ||
159 | DO_DEFAULT_RTC, | ||
160 | DO_PYXIS_IO, | ||
161 | .machine_check = cia_machine_check, | ||
162 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
163 | .min_io_address = DEFAULT_IO_BASE, | ||
164 | .min_mem_address = DEFAULT_MEM_BASE, | ||
165 | .pci_dac_offset = PYXIS_DAC_OFFSET, | ||
166 | |||
167 | .nr_irqs = 48, | ||
168 | .device_interrupt = pyxis_device_interrupt, | ||
169 | |||
170 | .init_arch = sx164_init_arch, | ||
171 | .init_irq = sx164_init_irq, | ||
172 | .init_rtc = common_init_rtc, | ||
173 | .init_pci = sx164_init_pci, | ||
174 | .kill_arch = cia_kill_arch, | ||
175 | .pci_map_irq = sx164_map_irq, | ||
176 | .pci_swizzle = common_swizzle, | ||
177 | }; | ||
178 | ALIAS_MV(sx164) | ||
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c new file mode 100644 index 000000000000..7955bdfc2db0 --- /dev/null +++ b/arch/alpha/kernel/sys_takara.c | |||
@@ -0,0 +1,296 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_takara.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * | ||
8 | * Code supporting the TAKARA. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/init.h> | ||
17 | |||
18 | #include <asm/ptrace.h> | ||
19 | #include <asm/system.h> | ||
20 | #include <asm/dma.h> | ||
21 | #include <asm/irq.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/core_cia.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | |||
28 | #include "proto.h" | ||
29 | #include "irq_impl.h" | ||
30 | #include "pci_impl.h" | ||
31 | #include "machvec_impl.h" | ||
32 | |||
33 | |||
34 | /* Note mask bit is true for DISABLED irqs. */ | ||
35 | static unsigned long cached_irq_mask[2] = { -1, -1 }; | ||
36 | |||
37 | static inline void | ||
38 | takara_update_irq_hw(unsigned long irq, unsigned long mask) | ||
39 | { | ||
40 | int regaddr; | ||
41 | |||
42 | mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); | ||
43 | regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); | ||
44 | outl(mask & 0xffff0000UL, regaddr); | ||
45 | } | ||
46 | |||
47 | static inline void | ||
48 | takara_enable_irq(unsigned int irq) | ||
49 | { | ||
50 | unsigned long mask; | ||
51 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); | ||
52 | takara_update_irq_hw(irq, mask); | ||
53 | } | ||
54 | |||
55 | static void | ||
56 | takara_disable_irq(unsigned int irq) | ||
57 | { | ||
58 | unsigned long mask; | ||
59 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); | ||
60 | takara_update_irq_hw(irq, mask); | ||
61 | } | ||
62 | |||
63 | static unsigned int | ||
64 | takara_startup_irq(unsigned int irq) | ||
65 | { | ||
66 | takara_enable_irq(irq); | ||
67 | return 0; /* never anything pending */ | ||
68 | } | ||
69 | |||
70 | static void | ||
71 | takara_end_irq(unsigned int irq) | ||
72 | { | ||
73 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
74 | takara_enable_irq(irq); | ||
75 | } | ||
76 | |||
77 | static struct hw_interrupt_type takara_irq_type = { | ||
78 | .typename = "TAKARA", | ||
79 | .startup = takara_startup_irq, | ||
80 | .shutdown = takara_disable_irq, | ||
81 | .enable = takara_enable_irq, | ||
82 | .disable = takara_disable_irq, | ||
83 | .ack = takara_disable_irq, | ||
84 | .end = takara_end_irq, | ||
85 | }; | ||
86 | |||
87 | static void | ||
88 | takara_device_interrupt(unsigned long vector, struct pt_regs *regs) | ||
89 | { | ||
90 | unsigned intstatus; | ||
91 | |||
92 | /* | ||
93 | * The PALcode will have passed us vectors 0x800 or 0x810, | ||
94 | * which are fairly arbitrary values and serve only to tell | ||
95 | * us whether an interrupt has come in on IRQ0 or IRQ1. If | ||
96 | * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's | ||
97 | * probably ISA, but PCI interrupts can come through IRQ0 | ||
98 | * as well if the interrupt controller isn't in accelerated | ||
99 | * mode. | ||
100 | * | ||
101 | * OTOH, the accelerator thing doesn't seem to be working | ||
102 | * overly well, so what we'll do instead is try directly | ||
103 | * examining the Master Interrupt Register to see if it's a | ||
104 | * PCI interrupt, and if _not_ then we'll pass it on to the | ||
105 | * ISA handler. | ||
106 | */ | ||
107 | |||
108 | intstatus = inw(0x500) & 15; | ||
109 | if (intstatus) { | ||
110 | /* | ||
111 | * This is a PCI interrupt. Check each bit and | ||
112 | * despatch an interrupt if it's set. | ||
113 | */ | ||
114 | |||
115 | if (intstatus & 8) handle_irq(16+3, regs); | ||
116 | if (intstatus & 4) handle_irq(16+2, regs); | ||
117 | if (intstatus & 2) handle_irq(16+1, regs); | ||
118 | if (intstatus & 1) handle_irq(16+0, regs); | ||
119 | } else { | ||
120 | isa_device_interrupt (vector, regs); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | static void | ||
125 | takara_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
126 | { | ||
127 | int irq = (vector - 0x800) >> 4; | ||
128 | handle_irq(irq, regs); | ||
129 | } | ||
130 | |||
131 | static void __init | ||
132 | takara_init_irq(void) | ||
133 | { | ||
134 | long i; | ||
135 | |||
136 | init_i8259a_irqs(); | ||
137 | |||
138 | if (alpha_using_srm) { | ||
139 | alpha_mv.device_interrupt = takara_srm_device_interrupt; | ||
140 | } else { | ||
141 | unsigned int ctlreg = inl(0x500); | ||
142 | |||
143 | /* Return to non-accelerated mode. */ | ||
144 | ctlreg &= ~0x8000; | ||
145 | outl(ctlreg, 0x500); | ||
146 | |||
147 | /* Enable the PCI interrupt register. */ | ||
148 | ctlreg = 0x05107c00; | ||
149 | outl(ctlreg, 0x500); | ||
150 | } | ||
151 | |||
152 | for (i = 16; i < 128; i += 16) | ||
153 | takara_update_irq_hw(i, -1); | ||
154 | |||
155 | for (i = 16; i < 128; ++i) { | ||
156 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
157 | irq_desc[i].handler = &takara_irq_type; | ||
158 | } | ||
159 | |||
160 | common_init_isa_dma(); | ||
161 | } | ||
162 | |||
163 | |||
164 | /* | ||
165 | * The Takara has PCI devices 1, 2, and 3 configured to slots 20, | ||
166 | * 19, and 18 respectively, in the default configuration. They can | ||
167 | * also be jumpered to slots 8, 7, and 6 respectively, which is fun | ||
168 | * because the SIO ISA bridge can also be slot 7. However, the SIO | ||
169 | * doesn't explicitly generate PCI-type interrupts, so we can | ||
170 | * assign it whatever the hell IRQ we like and it doesn't matter. | ||
171 | */ | ||
172 | |||
173 | static int __init | ||
174 | takara_map_irq_srm(struct pci_dev *dev, u8 slot, u8 pin) | ||
175 | { | ||
176 | static char irq_tab[15][5] __initdata = { | ||
177 | { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ | ||
178 | { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ | ||
179 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ | ||
180 | { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ | ||
181 | { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ | ||
182 | { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ | ||
183 | /* These are behind the bridges. */ | ||
184 | { 12, 12, 13, 14, 15}, /* slot 12 == nothing */ | ||
185 | { 8, 8, 9, 19, 11}, /* slot 13 == nothing */ | ||
186 | { 4, 4, 5, 6, 7}, /* slot 14 == nothing */ | ||
187 | { 0, 0, 1, 2, 3}, /* slot 15 == nothing */ | ||
188 | { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ | ||
189 | {64+ 0, 64+0, 64+1, 64+2, 64+3}, /* slot 17= device 4 */ | ||
190 | {48+ 0, 48+0, 48+1, 48+2, 48+3}, /* slot 18= device 3 */ | ||
191 | {32+ 0, 32+0, 32+1, 32+2, 32+3}, /* slot 19= device 2 */ | ||
192 | {16+ 0, 16+0, 16+1, 16+2, 16+3}, /* slot 20= device 1 */ | ||
193 | }; | ||
194 | const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5; | ||
195 | int irq = COMMON_TABLE_LOOKUP; | ||
196 | if (irq >= 0 && irq < 16) { | ||
197 | /* Guess that we are behind a bridge. */ | ||
198 | unsigned int busslot = PCI_SLOT(dev->bus->self->devfn); | ||
199 | irq += irq_tab[busslot-min_idsel][0]; | ||
200 | } | ||
201 | return irq; | ||
202 | } | ||
203 | |||
204 | static int __init | ||
205 | takara_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
206 | { | ||
207 | static char irq_tab[15][5] __initdata = { | ||
208 | { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ | ||
209 | { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ | ||
210 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ | ||
211 | { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ | ||
212 | { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ | ||
213 | { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ | ||
214 | { -1, -1, -1, -1, -1}, /* slot 12 == nothing */ | ||
215 | { -1, -1, -1, -1, -1}, /* slot 13 == nothing */ | ||
216 | { -1, -1, -1, -1, -1}, /* slot 14 == nothing */ | ||
217 | { -1, -1, -1, -1, -1}, /* slot 15 == nothing */ | ||
218 | { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ | ||
219 | { -1, -1, -1, -1, -1}, /* slot 17 == nothing */ | ||
220 | { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 18 == device 3 */ | ||
221 | { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 19 == device 2 */ | ||
222 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 20 == device 1 */ | ||
223 | }; | ||
224 | const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5; | ||
225 | return COMMON_TABLE_LOOKUP; | ||
226 | } | ||
227 | |||
228 | static u8 __init | ||
229 | takara_swizzle(struct pci_dev *dev, u8 *pinp) | ||
230 | { | ||
231 | int slot = PCI_SLOT(dev->devfn); | ||
232 | int pin = *pinp; | ||
233 | unsigned int ctlreg = inl(0x500); | ||
234 | unsigned int busslot; | ||
235 | |||
236 | if (!dev->bus->self) | ||
237 | return slot; | ||
238 | |||
239 | busslot = PCI_SLOT(dev->bus->self->devfn); | ||
240 | /* Check for built-in bridges. */ | ||
241 | if (dev->bus->number != 0 | ||
242 | && busslot > 16 | ||
243 | && ((1<<(36-busslot)) & ctlreg)) { | ||
244 | if (pin == 1) | ||
245 | pin += (20 - busslot); | ||
246 | else { | ||
247 | printk(KERN_WARNING "takara_swizzle: can only " | ||
248 | "handle cards with INTA IRQ pin.\n"); | ||
249 | } | ||
250 | } else { | ||
251 | /* Must be a card-based bridge. */ | ||
252 | printk(KERN_WARNING "takara_swizzle: cannot handle " | ||
253 | "card-bridge behind builtin bridge yet.\n"); | ||
254 | } | ||
255 | |||
256 | *pinp = pin; | ||
257 | return slot; | ||
258 | } | ||
259 | |||
260 | static void __init | ||
261 | takara_init_pci(void) | ||
262 | { | ||
263 | if (alpha_using_srm) | ||
264 | alpha_mv.pci_map_irq = takara_map_irq_srm; | ||
265 | |||
266 | cia_init_pci(); | ||
267 | ns87312_enable_ide(0x26e); | ||
268 | } | ||
269 | |||
270 | |||
271 | /* | ||
272 | * The System Vector | ||
273 | */ | ||
274 | |||
275 | struct alpha_machine_vector takara_mv __initmv = { | ||
276 | .vector_name = "Takara", | ||
277 | DO_EV5_MMU, | ||
278 | DO_DEFAULT_RTC, | ||
279 | DO_CIA_IO, | ||
280 | .machine_check = cia_machine_check, | ||
281 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
282 | .min_io_address = DEFAULT_IO_BASE, | ||
283 | .min_mem_address = CIA_DEFAULT_MEM_BASE, | ||
284 | |||
285 | .nr_irqs = 128, | ||
286 | .device_interrupt = takara_device_interrupt, | ||
287 | |||
288 | .init_arch = cia_init_arch, | ||
289 | .init_irq = takara_init_irq, | ||
290 | .init_rtc = common_init_rtc, | ||
291 | .init_pci = takara_init_pci, | ||
292 | .kill_arch = cia_kill_arch, | ||
293 | .pci_map_irq = takara_map_irq, | ||
294 | .pci_swizzle = takara_swizzle, | ||
295 | }; | ||
296 | ALIAS_MV(takara) | ||
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c new file mode 100644 index 000000000000..5f84417eeb7b --- /dev/null +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -0,0 +1,420 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_titan.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David A Rusling | ||
5 | * Copyright (C) 1996, 1999 Jay A Estabrook | ||
6 | * Copyright (C) 1998, 1999 Richard Henderson | ||
7 | * Copyright (C) 1999, 2000 Jeff Wiedemeier | ||
8 | * | ||
9 | * Code supporting TITAN systems (EV6+TITAN), currently: | ||
10 | * Privateer | ||
11 | * Falcon | ||
12 | * Granite | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/bitops.h> | ||
23 | |||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/system.h> | ||
26 | #include <asm/dma.h> | ||
27 | #include <asm/irq.h> | ||
28 | #include <asm/mmu_context.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/core_titan.h> | ||
32 | #include <asm/hwrpb.h> | ||
33 | #include <asm/tlbflush.h> | ||
34 | |||
35 | #include "proto.h" | ||
36 | #include "irq_impl.h" | ||
37 | #include "pci_impl.h" | ||
38 | #include "machvec_impl.h" | ||
39 | #include "err_impl.h" | ||
40 | |||
41 | |||
42 | /* | ||
43 | * Titan generic | ||
44 | */ | ||
45 | |||
46 | /* | ||
47 | * Titan supports up to 4 CPUs | ||
48 | */ | ||
49 | static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL }; | ||
50 | |||
51 | /* | ||
52 | * Mask is set (1) if enabled | ||
53 | */ | ||
54 | static unsigned long titan_cached_irq_mask; | ||
55 | |||
56 | /* | ||
57 | * Need SMP-safe access to interrupt CSRs | ||
58 | */ | ||
59 | DEFINE_SPINLOCK(titan_irq_lock); | ||
60 | |||
61 | static void | ||
62 | titan_update_irq_hw(unsigned long mask) | ||
63 | { | ||
64 | register titan_cchip *cchip = TITAN_cchip; | ||
65 | unsigned long isa_enable = 1UL << 55; | ||
66 | register int bcpu = boot_cpuid; | ||
67 | |||
68 | #ifdef CONFIG_SMP | ||
69 | cpumask_t cpm = cpu_present_mask; | ||
70 | volatile unsigned long *dim0, *dim1, *dim2, *dim3; | ||
71 | unsigned long mask0, mask1, mask2, mask3, dummy; | ||
72 | |||
73 | mask &= ~isa_enable; | ||
74 | mask0 = mask & titan_cpu_irq_affinity[0]; | ||
75 | mask1 = mask & titan_cpu_irq_affinity[1]; | ||
76 | mask2 = mask & titan_cpu_irq_affinity[2]; | ||
77 | mask3 = mask & titan_cpu_irq_affinity[3]; | ||
78 | |||
79 | if (bcpu == 0) mask0 |= isa_enable; | ||
80 | else if (bcpu == 1) mask1 |= isa_enable; | ||
81 | else if (bcpu == 2) mask2 |= isa_enable; | ||
82 | else mask3 |= isa_enable; | ||
83 | |||
84 | dim0 = &cchip->dim0.csr; | ||
85 | dim1 = &cchip->dim1.csr; | ||
86 | dim2 = &cchip->dim2.csr; | ||
87 | dim3 = &cchip->dim3.csr; | ||
88 | if (!cpu_isset(0, cpm)) dim0 = &dummy; | ||
89 | if (!cpu_isset(1, cpm)) dim1 = &dummy; | ||
90 | if (!cpu_isset(2, cpm)) dim2 = &dummy; | ||
91 | if (!cpu_isset(3, cpm)) dim3 = &dummy; | ||
92 | |||
93 | *dim0 = mask0; | ||
94 | *dim1 = mask1; | ||
95 | *dim2 = mask2; | ||
96 | *dim3 = mask3; | ||
97 | mb(); | ||
98 | *dim0; | ||
99 | *dim1; | ||
100 | *dim2; | ||
101 | *dim3; | ||
102 | #else | ||
103 | volatile unsigned long *dimB; | ||
104 | dimB = &cchip->dim0.csr; | ||
105 | if (bcpu == 1) dimB = &cchip->dim1.csr; | ||
106 | else if (bcpu == 2) dimB = &cchip->dim2.csr; | ||
107 | else if (bcpu == 3) dimB = &cchip->dim3.csr; | ||
108 | |||
109 | *dimB = mask | isa_enable; | ||
110 | mb(); | ||
111 | *dimB; | ||
112 | #endif | ||
113 | } | ||
114 | |||
115 | static inline void | ||
116 | titan_enable_irq(unsigned int irq) | ||
117 | { | ||
118 | spin_lock(&titan_irq_lock); | ||
119 | titan_cached_irq_mask |= 1UL << (irq - 16); | ||
120 | titan_update_irq_hw(titan_cached_irq_mask); | ||
121 | spin_unlock(&titan_irq_lock); | ||
122 | } | ||
123 | |||
124 | static inline void | ||
125 | titan_disable_irq(unsigned int irq) | ||
126 | { | ||
127 | spin_lock(&titan_irq_lock); | ||
128 | titan_cached_irq_mask &= ~(1UL << (irq - 16)); | ||
129 | titan_update_irq_hw(titan_cached_irq_mask); | ||
130 | spin_unlock(&titan_irq_lock); | ||
131 | } | ||
132 | |||
133 | static unsigned int | ||
134 | titan_startup_irq(unsigned int irq) | ||
135 | { | ||
136 | titan_enable_irq(irq); | ||
137 | return 0; /* never anything pending */ | ||
138 | } | ||
139 | |||
140 | static void | ||
141 | titan_end_irq(unsigned int irq) | ||
142 | { | ||
143 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
144 | titan_enable_irq(irq); | ||
145 | } | ||
146 | |||
147 | static void | ||
148 | titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | ||
149 | { | ||
150 | int cpu; | ||
151 | |||
152 | for (cpu = 0; cpu < 4; cpu++) { | ||
153 | if (cpu_isset(cpu, affinity)) | ||
154 | titan_cpu_irq_affinity[cpu] |= 1UL << irq; | ||
155 | else | ||
156 | titan_cpu_irq_affinity[cpu] &= ~(1UL << irq); | ||
157 | } | ||
158 | |||
159 | } | ||
160 | |||
161 | static void | ||
162 | titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) | ||
163 | { | ||
164 | spin_lock(&titan_irq_lock); | ||
165 | titan_cpu_set_irq_affinity(irq - 16, affinity); | ||
166 | titan_update_irq_hw(titan_cached_irq_mask); | ||
167 | spin_unlock(&titan_irq_lock); | ||
168 | } | ||
169 | |||
170 | static void | ||
171 | titan_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
172 | { | ||
173 | printk("titan_device_interrupt: NOT IMPLEMENTED YET!! \n"); | ||
174 | } | ||
175 | |||
176 | static void | ||
177 | titan_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
178 | { | ||
179 | int irq; | ||
180 | |||
181 | irq = (vector - 0x800) >> 4; | ||
182 | handle_irq(irq, regs); | ||
183 | } | ||
184 | |||
185 | |||
186 | static void __init | ||
187 | init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax) | ||
188 | { | ||
189 | long i; | ||
190 | for (i = imin; i <= imax; ++i) { | ||
191 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | ||
192 | irq_desc[i].handler = ops; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | static struct hw_interrupt_type titan_irq_type = { | ||
197 | .typename = "TITAN", | ||
198 | .startup = titan_startup_irq, | ||
199 | .shutdown = titan_disable_irq, | ||
200 | .enable = titan_enable_irq, | ||
201 | .disable = titan_disable_irq, | ||
202 | .ack = titan_disable_irq, | ||
203 | .end = titan_end_irq, | ||
204 | .set_affinity = titan_set_irq_affinity, | ||
205 | }; | ||
206 | |||
207 | static irqreturn_t | ||
208 | titan_intr_nop(int irq, void *dev_id, struct pt_regs *regs) | ||
209 | { | ||
210 | /* | ||
211 | * This is a NOP interrupt handler for the purposes of | ||
212 | * event counting -- just return. | ||
213 | */ | ||
214 | return IRQ_HANDLED; | ||
215 | } | ||
216 | |||
217 | static void __init | ||
218 | titan_init_irq(void) | ||
219 | { | ||
220 | if (alpha_using_srm && !alpha_mv.device_interrupt) | ||
221 | alpha_mv.device_interrupt = titan_srm_device_interrupt; | ||
222 | if (!alpha_mv.device_interrupt) | ||
223 | alpha_mv.device_interrupt = titan_device_interrupt; | ||
224 | |||
225 | titan_update_irq_hw(0); | ||
226 | |||
227 | init_titan_irqs(&titan_irq_type, 16, 63 + 16); | ||
228 | } | ||
229 | |||
230 | static void __init | ||
231 | titan_legacy_init_irq(void) | ||
232 | { | ||
233 | /* init the legacy dma controller */ | ||
234 | outb(0, DMA1_RESET_REG); | ||
235 | outb(0, DMA2_RESET_REG); | ||
236 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG); | ||
237 | outb(0, DMA2_MASK_REG); | ||
238 | |||
239 | /* init the legacy irq controller */ | ||
240 | init_i8259a_irqs(); | ||
241 | |||
242 | /* init the titan irqs */ | ||
243 | titan_init_irq(); | ||
244 | } | ||
245 | |||
246 | void | ||
247 | titan_dispatch_irqs(u64 mask, struct pt_regs *regs) | ||
248 | { | ||
249 | unsigned long vector; | ||
250 | |||
251 | /* | ||
252 | * Mask down to those interrupts which are enable on this processor | ||
253 | */ | ||
254 | mask &= titan_cpu_irq_affinity[smp_processor_id()]; | ||
255 | |||
256 | /* | ||
257 | * Dispatch all requested interrupts | ||
258 | */ | ||
259 | while (mask) { | ||
260 | /* convert to SRM vector... priority is <63> -> <0> */ | ||
261 | __asm__("ctlz %1, %0" : "=r"(vector) : "r"(mask)); | ||
262 | vector = 63 - vector; | ||
263 | mask &= ~(1UL << vector); /* clear it out */ | ||
264 | vector = 0x900 + (vector << 4); /* convert to SRM vector */ | ||
265 | |||
266 | /* dispatch it */ | ||
267 | alpha_mv.device_interrupt(vector, regs); | ||
268 | } | ||
269 | } | ||
270 | |||
271 | |||
272 | /* | ||
273 | * Titan Family | ||
274 | */ | ||
275 | static void __init | ||
276 | titan_late_init(void) | ||
277 | { | ||
278 | /* | ||
279 | * Enable the system error interrupts. These interrupts are | ||
280 | * all reported to the kernel as machine checks, so the handler | ||
281 | * is a nop so it can be called to count the individual events. | ||
282 | */ | ||
283 | request_irq(63+16, titan_intr_nop, SA_INTERRUPT, | ||
284 | "CChip Error", NULL); | ||
285 | request_irq(62+16, titan_intr_nop, SA_INTERRUPT, | ||
286 | "PChip 0 H_Error", NULL); | ||
287 | request_irq(61+16, titan_intr_nop, SA_INTERRUPT, | ||
288 | "PChip 1 H_Error", NULL); | ||
289 | request_irq(60+16, titan_intr_nop, SA_INTERRUPT, | ||
290 | "PChip 0 C_Error", NULL); | ||
291 | request_irq(59+16, titan_intr_nop, SA_INTERRUPT, | ||
292 | "PChip 1 C_Error", NULL); | ||
293 | |||
294 | /* | ||
295 | * Register our error handlers. | ||
296 | */ | ||
297 | titan_register_error_handlers(); | ||
298 | |||
299 | /* | ||
300 | * Check if the console left us any error logs. | ||
301 | */ | ||
302 | cdl_check_console_data_log(); | ||
303 | |||
304 | } | ||
305 | |||
306 | static int __devinit | ||
307 | titan_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
308 | { | ||
309 | u8 intline; | ||
310 | int irq; | ||
311 | |||
312 | /* Get the current intline. */ | ||
313 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); | ||
314 | irq = intline; | ||
315 | |||
316 | /* Is it explicitly routed through ISA? */ | ||
317 | if ((irq & 0xF0) == 0xE0) | ||
318 | return irq; | ||
319 | |||
320 | /* Offset by 16 to make room for ISA interrupts 0 - 15. */ | ||
321 | return irq + 16; | ||
322 | } | ||
323 | |||
324 | static void __init | ||
325 | titan_init_pci(void) | ||
326 | { | ||
327 | /* | ||
328 | * This isn't really the right place, but there's some init | ||
329 | * that needs to be done after everything is basically up. | ||
330 | */ | ||
331 | titan_late_init(); | ||
332 | |||
333 | pci_probe_only = 1; | ||
334 | common_init_pci(); | ||
335 | SMC669_Init(0); | ||
336 | #ifdef CONFIG_VGA_HOSE | ||
337 | locate_and_init_vga(NULL); | ||
338 | #endif | ||
339 | } | ||
340 | |||
341 | |||
342 | /* | ||
343 | * Privateer | ||
344 | */ | ||
345 | static void __init | ||
346 | privateer_init_pci(void) | ||
347 | { | ||
348 | /* | ||
349 | * Hook a couple of extra err interrupts that the | ||
350 | * common titan code won't. | ||
351 | */ | ||
352 | request_irq(53+16, titan_intr_nop, SA_INTERRUPT, | ||
353 | "NMI", NULL); | ||
354 | request_irq(50+16, titan_intr_nop, SA_INTERRUPT, | ||
355 | "Temperature Warning", NULL); | ||
356 | |||
357 | /* | ||
358 | * Finish with the common version. | ||
359 | */ | ||
360 | return titan_init_pci(); | ||
361 | } | ||
362 | |||
363 | |||
364 | /* | ||
365 | * The System Vectors. | ||
366 | */ | ||
367 | struct alpha_machine_vector titan_mv __initmv = { | ||
368 | .vector_name = "TITAN", | ||
369 | DO_EV6_MMU, | ||
370 | DO_DEFAULT_RTC, | ||
371 | DO_TITAN_IO, | ||
372 | .machine_check = titan_machine_check, | ||
373 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
374 | .min_io_address = DEFAULT_IO_BASE, | ||
375 | .min_mem_address = DEFAULT_MEM_BASE, | ||
376 | .pci_dac_offset = TITAN_DAC_OFFSET, | ||
377 | |||
378 | .nr_irqs = 80, /* 64 + 16 */ | ||
379 | /* device_interrupt will be filled in by titan_init_irq */ | ||
380 | |||
381 | .agp_info = titan_agp_info, | ||
382 | |||
383 | .init_arch = titan_init_arch, | ||
384 | .init_irq = titan_legacy_init_irq, | ||
385 | .init_rtc = common_init_rtc, | ||
386 | .init_pci = titan_init_pci, | ||
387 | |||
388 | .kill_arch = titan_kill_arch, | ||
389 | .pci_map_irq = titan_map_irq, | ||
390 | .pci_swizzle = common_swizzle, | ||
391 | }; | ||
392 | ALIAS_MV(titan) | ||
393 | |||
394 | struct alpha_machine_vector privateer_mv __initmv = { | ||
395 | .vector_name = "PRIVATEER", | ||
396 | DO_EV6_MMU, | ||
397 | DO_DEFAULT_RTC, | ||
398 | DO_TITAN_IO, | ||
399 | .machine_check = privateer_machine_check, | ||
400 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
401 | .min_io_address = DEFAULT_IO_BASE, | ||
402 | .min_mem_address = DEFAULT_MEM_BASE, | ||
403 | .pci_dac_offset = TITAN_DAC_OFFSET, | ||
404 | |||
405 | .nr_irqs = 80, /* 64 + 16 */ | ||
406 | /* device_interrupt will be filled in by titan_init_irq */ | ||
407 | |||
408 | .agp_info = titan_agp_info, | ||
409 | |||
410 | .init_arch = titan_init_arch, | ||
411 | .init_irq = titan_legacy_init_irq, | ||
412 | .init_rtc = common_init_rtc, | ||
413 | .init_pci = privateer_init_pci, | ||
414 | |||
415 | .kill_arch = titan_kill_arch, | ||
416 | .pci_map_irq = titan_map_irq, | ||
417 | .pci_swizzle = common_swizzle, | ||
418 | }; | ||
419 | /* No alpha_mv alias for privateer since we compile it | ||
420 | in unconditionally with titan; setup_arch knows how to cope. */ | ||
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c new file mode 100644 index 000000000000..1553f470246e --- /dev/null +++ b/arch/alpha/kernel/sys_wildfire.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/sys_wildfire.c | ||
3 | * | ||
4 | * Wildfire support. | ||
5 | * | ||
6 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/bitops.h> | ||
16 | |||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <asm/dma.h> | ||
20 | #include <asm/irq.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/core_wildfire.h> | ||
25 | #include <asm/hwrpb.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | |||
28 | #include "proto.h" | ||
29 | #include "irq_impl.h" | ||
30 | #include "pci_impl.h" | ||
31 | #include "machvec_impl.h" | ||
32 | |||
33 | static unsigned long cached_irq_mask[WILDFIRE_NR_IRQS/(sizeof(long)*8)]; | ||
34 | |||
35 | DEFINE_SPINLOCK(wildfire_irq_lock); | ||
36 | |||
37 | static int doing_init_irq_hw = 0; | ||
38 | |||
39 | static void | ||
40 | wildfire_update_irq_hw(unsigned int irq) | ||
41 | { | ||
42 | int qbbno = (irq >> 8) & (WILDFIRE_MAX_QBB - 1); | ||
43 | int pcano = (irq >> 6) & (WILDFIRE_PCA_PER_QBB - 1); | ||
44 | wildfire_pca *pca; | ||
45 | volatile unsigned long * enable0; | ||
46 | |||
47 | if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) { | ||
48 | if (!doing_init_irq_hw) { | ||
49 | printk(KERN_ERR "wildfire_update_irq_hw:" | ||
50 | " got irq %d for non-existent PCA %d" | ||
51 | " on QBB %d.\n", | ||
52 | irq, pcano, qbbno); | ||
53 | } | ||
54 | return; | ||
55 | } | ||
56 | |||
57 | pca = WILDFIRE_pca(qbbno, pcano); | ||
58 | enable0 = (unsigned long *) &pca->pca_int[0].enable; /* ??? */ | ||
59 | |||
60 | *enable0 = cached_irq_mask[qbbno * WILDFIRE_PCA_PER_QBB + pcano]; | ||
61 | mb(); | ||
62 | *enable0; | ||
63 | } | ||
64 | |||
65 | static void __init | ||
66 | wildfire_init_irq_hw(void) | ||
67 | { | ||
68 | #if 0 | ||
69 | register wildfire_pca * pca = WILDFIRE_pca(0, 0); | ||
70 | volatile unsigned long * enable0, * enable1, * enable2, *enable3; | ||
71 | volatile unsigned long * target0, * target1, * target2, *target3; | ||
72 | |||
73 | enable0 = (unsigned long *) &pca->pca_int[0].enable; | ||
74 | enable1 = (unsigned long *) &pca->pca_int[1].enable; | ||
75 | enable2 = (unsigned long *) &pca->pca_int[2].enable; | ||
76 | enable3 = (unsigned long *) &pca->pca_int[3].enable; | ||
77 | |||
78 | target0 = (unsigned long *) &pca->pca_int[0].target; | ||
79 | target1 = (unsigned long *) &pca->pca_int[1].target; | ||
80 | target2 = (unsigned long *) &pca->pca_int[2].target; | ||
81 | target3 = (unsigned long *) &pca->pca_int[3].target; | ||
82 | |||
83 | *enable0 = *enable1 = *enable2 = *enable3 = 0; | ||
84 | |||
85 | *target0 = (1UL<<8) | WILDFIRE_QBB(0); | ||
86 | *target1 = *target2 = *target3 = 0; | ||
87 | |||
88 | mb(); | ||
89 | |||
90 | *enable0; *enable1; *enable2; *enable3; | ||
91 | *target0; *target1; *target2; *target3; | ||
92 | |||
93 | #else | ||
94 | int i; | ||
95 | |||
96 | doing_init_irq_hw = 1; | ||
97 | |||
98 | /* Need to update only once for every possible PCA. */ | ||
99 | for (i = 0; i < WILDFIRE_NR_IRQS; i+=WILDFIRE_IRQ_PER_PCA) | ||
100 | wildfire_update_irq_hw(i); | ||
101 | |||
102 | doing_init_irq_hw = 0; | ||
103 | #endif | ||
104 | } | ||
105 | |||
106 | static void | ||
107 | wildfire_enable_irq(unsigned int irq) | ||
108 | { | ||
109 | if (irq < 16) | ||
110 | i8259a_enable_irq(irq); | ||
111 | |||
112 | spin_lock(&wildfire_irq_lock); | ||
113 | set_bit(irq, &cached_irq_mask); | ||
114 | wildfire_update_irq_hw(irq); | ||
115 | spin_unlock(&wildfire_irq_lock); | ||
116 | } | ||
117 | |||
118 | static void | ||
119 | wildfire_disable_irq(unsigned int irq) | ||
120 | { | ||
121 | if (irq < 16) | ||
122 | i8259a_disable_irq(irq); | ||
123 | |||
124 | spin_lock(&wildfire_irq_lock); | ||
125 | clear_bit(irq, &cached_irq_mask); | ||
126 | wildfire_update_irq_hw(irq); | ||
127 | spin_unlock(&wildfire_irq_lock); | ||
128 | } | ||
129 | |||
130 | static void | ||
131 | wildfire_mask_and_ack_irq(unsigned int irq) | ||
132 | { | ||
133 | if (irq < 16) | ||
134 | i8259a_mask_and_ack_irq(irq); | ||
135 | |||
136 | spin_lock(&wildfire_irq_lock); | ||
137 | clear_bit(irq, &cached_irq_mask); | ||
138 | wildfire_update_irq_hw(irq); | ||
139 | spin_unlock(&wildfire_irq_lock); | ||
140 | } | ||
141 | |||
142 | static unsigned int | ||
143 | wildfire_startup_irq(unsigned int irq) | ||
144 | { | ||
145 | wildfire_enable_irq(irq); | ||
146 | return 0; /* never anything pending */ | ||
147 | } | ||
148 | |||
149 | static void | ||
150 | wildfire_end_irq(unsigned int irq) | ||
151 | { | ||
152 | #if 0 | ||
153 | if (!irq_desc[irq].action) | ||
154 | printk("got irq %d\n", irq); | ||
155 | #endif | ||
156 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
157 | wildfire_enable_irq(irq); | ||
158 | } | ||
159 | |||
160 | static struct hw_interrupt_type wildfire_irq_type = { | ||
161 | .typename = "WILDFIRE", | ||
162 | .startup = wildfire_startup_irq, | ||
163 | .shutdown = wildfire_disable_irq, | ||
164 | .enable = wildfire_enable_irq, | ||
165 | .disable = wildfire_disable_irq, | ||
166 | .ack = wildfire_mask_and_ack_irq, | ||
167 | .end = wildfire_end_irq, | ||
168 | }; | ||
169 | |||
170 | static void __init | ||
171 | wildfire_init_irq_per_pca(int qbbno, int pcano) | ||
172 | { | ||
173 | int i, irq_bias; | ||
174 | unsigned long io_bias; | ||
175 | static struct irqaction isa_enable = { | ||
176 | .handler = no_action, | ||
177 | .name = "isa_enable", | ||
178 | }; | ||
179 | |||
180 | irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) | ||
181 | + pcano * WILDFIRE_IRQ_PER_PCA; | ||
182 | |||
183 | /* Only need the following for first PCI bus per PCA. */ | ||
184 | io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; | ||
185 | |||
186 | #if 0 | ||
187 | outb(0, DMA1_RESET_REG + io_bias); | ||
188 | outb(0, DMA2_RESET_REG + io_bias); | ||
189 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); | ||
190 | outb(0, DMA2_MASK_REG + io_bias); | ||
191 | #endif | ||
192 | |||
193 | #if 0 | ||
194 | /* ??? Not sure how to do this, yet... */ | ||
195 | init_i8259a_irqs(); /* ??? */ | ||
196 | #endif | ||
197 | |||
198 | for (i = 0; i < 16; ++i) { | ||
199 | if (i == 2) | ||
200 | continue; | ||
201 | irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; | ||
202 | irq_desc[i+irq_bias].handler = &wildfire_irq_type; | ||
203 | } | ||
204 | |||
205 | irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; | ||
206 | irq_desc[36+irq_bias].handler = &wildfire_irq_type; | ||
207 | for (i = 40; i < 64; ++i) { | ||
208 | irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; | ||
209 | irq_desc[i+irq_bias].handler = &wildfire_irq_type; | ||
210 | } | ||
211 | |||
212 | setup_irq(32+irq_bias, &isa_enable); | ||
213 | } | ||
214 | |||
215 | static void __init | ||
216 | wildfire_init_irq(void) | ||
217 | { | ||
218 | int qbbno, pcano; | ||
219 | |||
220 | #if 1 | ||
221 | wildfire_init_irq_hw(); | ||
222 | init_i8259a_irqs(); | ||
223 | #endif | ||
224 | |||
225 | for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) { | ||
226 | if (WILDFIRE_QBB_EXISTS(qbbno)) { | ||
227 | for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) { | ||
228 | if (WILDFIRE_PCA_EXISTS(qbbno, pcano)) { | ||
229 | wildfire_init_irq_per_pca(qbbno, pcano); | ||
230 | } | ||
231 | } | ||
232 | } | ||
233 | } | ||
234 | } | ||
235 | |||
236 | static void | ||
237 | wildfire_device_interrupt(unsigned long vector, struct pt_regs * regs) | ||
238 | { | ||
239 | int irq; | ||
240 | |||
241 | irq = (vector - 0x800) >> 4; | ||
242 | |||
243 | /* | ||
244 | * bits 10-8: source QBB ID | ||
245 | * bits 7-6: PCA | ||
246 | * bits 5-0: irq in PCA | ||
247 | */ | ||
248 | |||
249 | handle_irq(irq, regs); | ||
250 | return; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * PCI Fixup configuration. | ||
255 | * | ||
256 | * Summary per PCA (2 PCI or HIPPI buses): | ||
257 | * | ||
258 | * Bit Meaning | ||
259 | * 0-15 ISA | ||
260 | * | ||
261 | *32 ISA summary | ||
262 | *33 SMI | ||
263 | *34 NMI | ||
264 | *36 builtin QLogic SCSI (or slot 0 if no IO module) | ||
265 | *40 Interrupt Line A from slot 2 PCI0 | ||
266 | *41 Interrupt Line B from slot 2 PCI0 | ||
267 | *42 Interrupt Line C from slot 2 PCI0 | ||
268 | *43 Interrupt Line D from slot 2 PCI0 | ||
269 | *44 Interrupt Line A from slot 3 PCI0 | ||
270 | *45 Interrupt Line B from slot 3 PCI0 | ||
271 | *46 Interrupt Line C from slot 3 PCI0 | ||
272 | *47 Interrupt Line D from slot 3 PCI0 | ||
273 | * | ||
274 | *48 Interrupt Line A from slot 4 PCI1 | ||
275 | *49 Interrupt Line B from slot 4 PCI1 | ||
276 | *50 Interrupt Line C from slot 4 PCI1 | ||
277 | *51 Interrupt Line D from slot 4 PCI1 | ||
278 | *52 Interrupt Line A from slot 5 PCI1 | ||
279 | *53 Interrupt Line B from slot 5 PCI1 | ||
280 | *54 Interrupt Line C from slot 5 PCI1 | ||
281 | *55 Interrupt Line D from slot 5 PCI1 | ||
282 | *56 Interrupt Line A from slot 6 PCI1 | ||
283 | *57 Interrupt Line B from slot 6 PCI1 | ||
284 | *58 Interrupt Line C from slot 6 PCI1 | ||
285 | *50 Interrupt Line D from slot 6 PCI1 | ||
286 | *60 Interrupt Line A from slot 7 PCI1 | ||
287 | *61 Interrupt Line B from slot 7 PCI1 | ||
288 | *62 Interrupt Line C from slot 7 PCI1 | ||
289 | *63 Interrupt Line D from slot 7 PCI1 | ||
290 | * | ||
291 | * | ||
292 | * IdSel | ||
293 | * 0 Cypress Bridge I/O (ISA summary interrupt) | ||
294 | * 1 64 bit PCI 0 option slot 1 (SCSI QLogic builtin) | ||
295 | * 2 64 bit PCI 0 option slot 2 | ||
296 | * 3 64 bit PCI 0 option slot 3 | ||
297 | * 4 64 bit PCI 1 option slot 4 | ||
298 | * 5 64 bit PCI 1 option slot 5 | ||
299 | * 6 64 bit PCI 1 option slot 6 | ||
300 | * 7 64 bit PCI 1 option slot 7 | ||
301 | */ | ||
302 | |||
303 | static int __init | ||
304 | wildfire_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
305 | { | ||
306 | static char irq_tab[8][5] __initdata = { | ||
307 | /*INT INTA INTB INTC INTD */ | ||
308 | { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */ | ||
309 | { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */ | ||
310 | { 40, 40, 40+1, 40+2, 40+3}, /* IdSel 2 PCI 0 slot 2 */ | ||
311 | { 44, 44, 44+1, 44+2, 44+3}, /* IdSel 3 PCI 0 slot 3 */ | ||
312 | { 48, 48, 48+1, 48+2, 48+3}, /* IdSel 4 PCI 1 slot 4 */ | ||
313 | { 52, 52, 52+1, 52+2, 52+3}, /* IdSel 5 PCI 1 slot 5 */ | ||
314 | { 56, 56, 56+1, 56+2, 56+3}, /* IdSel 6 PCI 1 slot 6 */ | ||
315 | { 60, 60, 60+1, 60+2, 60+3}, /* IdSel 7 PCI 1 slot 7 */ | ||
316 | }; | ||
317 | long min_idsel = 0, max_idsel = 7, irqs_per_slot = 5; | ||
318 | |||
319 | struct pci_controller *hose = dev->sysdata; | ||
320 | int irq = COMMON_TABLE_LOOKUP; | ||
321 | |||
322 | if (irq > 0) { | ||
323 | int qbbno = hose->index >> 3; | ||
324 | int pcano = (hose->index >> 1) & 3; | ||
325 | irq += (qbbno << 8) + (pcano << 6); | ||
326 | } | ||
327 | return irq; | ||
328 | } | ||
329 | |||
330 | |||
331 | /* | ||
332 | * The System Vectors | ||
333 | */ | ||
334 | |||
335 | struct alpha_machine_vector wildfire_mv __initmv = { | ||
336 | .vector_name = "WILDFIRE", | ||
337 | DO_EV6_MMU, | ||
338 | DO_DEFAULT_RTC, | ||
339 | DO_WILDFIRE_IO, | ||
340 | .machine_check = wildfire_machine_check, | ||
341 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | ||
342 | .min_io_address = DEFAULT_IO_BASE, | ||
343 | .min_mem_address = DEFAULT_MEM_BASE, | ||
344 | |||
345 | .nr_irqs = WILDFIRE_NR_IRQS, | ||
346 | .device_interrupt = wildfire_device_interrupt, | ||
347 | |||
348 | .init_arch = wildfire_init_arch, | ||
349 | .init_irq = wildfire_init_irq, | ||
350 | .init_rtc = common_init_rtc, | ||
351 | .init_pci = common_init_pci, | ||
352 | .kill_arch = wildfire_kill_arch, | ||
353 | .pci_map_irq = wildfire_map_irq, | ||
354 | .pci_swizzle = common_swizzle, | ||
355 | |||
356 | .pa_to_nid = wildfire_pa_to_nid, | ||
357 | .cpuid_to_nid = wildfire_cpuid_to_nid, | ||
358 | .node_mem_start = wildfire_node_mem_start, | ||
359 | .node_mem_size = wildfire_node_mem_size, | ||
360 | }; | ||
361 | ALIAS_MV(wildfire) | ||
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S new file mode 100644 index 000000000000..faab8c2a03eb --- /dev/null +++ b/arch/alpha/kernel/systbls.S | |||
@@ -0,0 +1,468 @@ | |||
1 | /* | ||
2 | * arch/alpha/kernel/systbls.S | ||
3 | * | ||
4 | * The system call table. | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> /* CONFIG_OSF4_COMPAT */ | ||
8 | #include <asm/unistd.h> | ||
9 | |||
10 | .data | ||
11 | .align 3 | ||
12 | .globl sys_call_table | ||
13 | sys_call_table: | ||
14 | .quad alpha_ni_syscall /* 0 */ | ||
15 | .quad sys_exit | ||
16 | .quad sys_fork | ||
17 | .quad sys_read | ||
18 | .quad sys_write | ||
19 | .quad alpha_ni_syscall /* 5 */ | ||
20 | .quad sys_close | ||
21 | .quad osf_wait4 | ||
22 | .quad alpha_ni_syscall | ||
23 | .quad sys_link | ||
24 | .quad sys_unlink /* 10 */ | ||
25 | .quad alpha_ni_syscall | ||
26 | .quad sys_chdir | ||
27 | .quad sys_fchdir | ||
28 | .quad sys_mknod | ||
29 | .quad sys_chmod /* 15 */ | ||
30 | .quad sys_chown | ||
31 | .quad osf_brk | ||
32 | .quad alpha_ni_syscall | ||
33 | .quad sys_lseek | ||
34 | .quad sys_getxpid /* 20 */ | ||
35 | .quad osf_mount | ||
36 | .quad sys_umount | ||
37 | .quad sys_setuid | ||
38 | .quad sys_getxuid | ||
39 | .quad alpha_ni_syscall /* 25 */ | ||
40 | .quad sys_ptrace | ||
41 | .quad alpha_ni_syscall | ||
42 | .quad alpha_ni_syscall | ||
43 | .quad alpha_ni_syscall | ||
44 | .quad alpha_ni_syscall /* 30 */ | ||
45 | .quad alpha_ni_syscall | ||
46 | .quad alpha_ni_syscall | ||
47 | .quad sys_access | ||
48 | .quad alpha_ni_syscall | ||
49 | .quad alpha_ni_syscall /* 35 */ | ||
50 | .quad sys_sync | ||
51 | .quad sys_kill | ||
52 | .quad alpha_ni_syscall | ||
53 | .quad sys_setpgid | ||
54 | .quad alpha_ni_syscall /* 40 */ | ||
55 | .quad sys_dup | ||
56 | .quad sys_pipe | ||
57 | .quad osf_set_program_attributes | ||
58 | .quad alpha_ni_syscall | ||
59 | .quad sys_open /* 45 */ | ||
60 | .quad alpha_ni_syscall | ||
61 | .quad sys_getxgid | ||
62 | .quad osf_sigprocmask | ||
63 | .quad alpha_ni_syscall | ||
64 | .quad alpha_ni_syscall /* 50 */ | ||
65 | .quad sys_acct | ||
66 | .quad sys_sigpending | ||
67 | .quad alpha_ni_syscall | ||
68 | .quad sys_ioctl | ||
69 | .quad alpha_ni_syscall /* 55 */ | ||
70 | .quad alpha_ni_syscall | ||
71 | .quad sys_symlink | ||
72 | .quad sys_readlink | ||
73 | .quad sys_execve | ||
74 | .quad sys_umask /* 60 */ | ||
75 | .quad sys_chroot | ||
76 | .quad alpha_ni_syscall | ||
77 | .quad sys_getpgrp | ||
78 | .quad sys_getpagesize | ||
79 | .quad alpha_ni_syscall /* 65 */ | ||
80 | .quad sys_vfork | ||
81 | .quad sys_newstat | ||
82 | .quad sys_newlstat | ||
83 | .quad alpha_ni_syscall | ||
84 | .quad alpha_ni_syscall /* 70 */ | ||
85 | .quad osf_mmap | ||
86 | .quad alpha_ni_syscall | ||
87 | .quad sys_munmap | ||
88 | .quad sys_mprotect | ||
89 | .quad sys_madvise /* 75 */ | ||
90 | .quad sys_vhangup | ||
91 | .quad alpha_ni_syscall | ||
92 | .quad alpha_ni_syscall | ||
93 | .quad sys_getgroups | ||
94 | /* map BSD's setpgrp to sys_setpgid for binary compatibility: */ | ||
95 | .quad sys_setgroups /* 80 */ | ||
96 | .quad alpha_ni_syscall | ||
97 | .quad sys_setpgid | ||
98 | .quad osf_setitimer | ||
99 | .quad alpha_ni_syscall | ||
100 | .quad alpha_ni_syscall /* 85 */ | ||
101 | .quad osf_getitimer | ||
102 | .quad sys_gethostname | ||
103 | .quad sys_sethostname | ||
104 | .quad sys_getdtablesize | ||
105 | .quad sys_dup2 /* 90 */ | ||
106 | .quad sys_newfstat | ||
107 | .quad sys_fcntl | ||
108 | .quad osf_select | ||
109 | .quad sys_poll | ||
110 | .quad sys_fsync /* 95 */ | ||
111 | .quad sys_setpriority | ||
112 | .quad sys_socket | ||
113 | .quad sys_connect | ||
114 | .quad sys_accept | ||
115 | .quad osf_getpriority /* 100 */ | ||
116 | .quad sys_send | ||
117 | .quad sys_recv | ||
118 | .quad sys_sigreturn | ||
119 | .quad sys_bind | ||
120 | .quad sys_setsockopt /* 105 */ | ||
121 | .quad sys_listen | ||
122 | .quad alpha_ni_syscall | ||
123 | .quad alpha_ni_syscall | ||
124 | .quad alpha_ni_syscall | ||
125 | .quad alpha_ni_syscall /* 110 */ | ||
126 | .quad sys_sigsuspend | ||
127 | .quad osf_sigstack | ||
128 | .quad sys_recvmsg | ||
129 | .quad sys_sendmsg | ||
130 | .quad alpha_ni_syscall /* 115 */ | ||
131 | .quad osf_gettimeofday | ||
132 | .quad osf_getrusage | ||
133 | .quad sys_getsockopt | ||
134 | .quad alpha_ni_syscall | ||
135 | #ifdef CONFIG_OSF4_COMPAT | ||
136 | .quad osf_readv /* 120 */ | ||
137 | .quad osf_writev | ||
138 | #else | ||
139 | .quad sys_readv /* 120 */ | ||
140 | .quad sys_writev | ||
141 | #endif | ||
142 | .quad osf_settimeofday | ||
143 | .quad sys_fchown | ||
144 | .quad sys_fchmod | ||
145 | .quad sys_recvfrom /* 125 */ | ||
146 | .quad sys_setreuid | ||
147 | .quad sys_setregid | ||
148 | .quad sys_rename | ||
149 | .quad sys_truncate | ||
150 | .quad sys_ftruncate /* 130 */ | ||
151 | .quad sys_flock | ||
152 | .quad sys_setgid | ||
153 | .quad sys_sendto | ||
154 | .quad sys_shutdown | ||
155 | .quad sys_socketpair /* 135 */ | ||
156 | .quad sys_mkdir | ||
157 | .quad sys_rmdir | ||
158 | .quad osf_utimes | ||
159 | .quad alpha_ni_syscall | ||
160 | .quad alpha_ni_syscall /* 140 */ | ||
161 | .quad sys_getpeername | ||
162 | .quad alpha_ni_syscall | ||
163 | .quad alpha_ni_syscall | ||
164 | .quad sys_getrlimit | ||
165 | .quad sys_setrlimit /* 145 */ | ||
166 | .quad alpha_ni_syscall | ||
167 | .quad sys_setsid | ||
168 | .quad sys_quotactl | ||
169 | .quad alpha_ni_syscall | ||
170 | .quad sys_getsockname /* 150 */ | ||
171 | .quad alpha_ni_syscall | ||
172 | .quad alpha_ni_syscall | ||
173 | .quad alpha_ni_syscall | ||
174 | .quad alpha_ni_syscall | ||
175 | .quad alpha_ni_syscall /* 155 */ | ||
176 | .quad osf_sigaction | ||
177 | .quad alpha_ni_syscall | ||
178 | .quad alpha_ni_syscall | ||
179 | .quad osf_getdirentries | ||
180 | .quad osf_statfs /* 160 */ | ||
181 | .quad osf_fstatfs | ||
182 | .quad alpha_ni_syscall | ||
183 | .quad alpha_ni_syscall | ||
184 | .quad alpha_ni_syscall | ||
185 | .quad osf_getdomainname /* 165 */ | ||
186 | .quad sys_setdomainname | ||
187 | .quad alpha_ni_syscall | ||
188 | .quad alpha_ni_syscall | ||
189 | .quad alpha_ni_syscall | ||
190 | .quad alpha_ni_syscall /* 170 */ | ||
191 | .quad alpha_ni_syscall | ||
192 | .quad alpha_ni_syscall | ||
193 | .quad alpha_ni_syscall | ||
194 | .quad alpha_ni_syscall | ||
195 | .quad alpha_ni_syscall /* 175 */ | ||
196 | .quad alpha_ni_syscall | ||
197 | .quad alpha_ni_syscall | ||
198 | .quad alpha_ni_syscall | ||
199 | .quad alpha_ni_syscall | ||
200 | .quad alpha_ni_syscall /* 180 */ | ||
201 | .quad alpha_ni_syscall | ||
202 | .quad alpha_ni_syscall | ||
203 | .quad alpha_ni_syscall | ||
204 | .quad alpha_ni_syscall | ||
205 | .quad alpha_ni_syscall /* 185 */ | ||
206 | .quad alpha_ni_syscall | ||
207 | .quad alpha_ni_syscall | ||
208 | .quad alpha_ni_syscall | ||
209 | .quad alpha_ni_syscall | ||
210 | .quad alpha_ni_syscall /* 190 */ | ||
211 | .quad alpha_ni_syscall | ||
212 | .quad alpha_ni_syscall | ||
213 | .quad alpha_ni_syscall | ||
214 | .quad alpha_ni_syscall | ||
215 | .quad alpha_ni_syscall /* 195 */ | ||
216 | .quad alpha_ni_syscall | ||
217 | .quad alpha_ni_syscall | ||
218 | .quad alpha_ni_syscall | ||
219 | /* The OSF swapon has two extra arguments, but we ignore them. */ | ||
220 | .quad sys_swapon | ||
221 | .quad sys_msgctl /* 200 */ | ||
222 | .quad sys_msgget | ||
223 | .quad sys_msgrcv | ||
224 | .quad sys_msgsnd | ||
225 | .quad sys_semctl | ||
226 | .quad sys_semget /* 205 */ | ||
227 | .quad sys_semop | ||
228 | .quad osf_utsname | ||
229 | .quad sys_lchown | ||
230 | .quad osf_shmat | ||
231 | .quad sys_shmctl /* 210 */ | ||
232 | .quad sys_shmdt | ||
233 | .quad sys_shmget | ||
234 | .quad alpha_ni_syscall | ||
235 | .quad alpha_ni_syscall | ||
236 | .quad alpha_ni_syscall /* 215 */ | ||
237 | .quad alpha_ni_syscall | ||
238 | .quad sys_msync | ||
239 | .quad alpha_ni_syscall | ||
240 | .quad alpha_ni_syscall | ||
241 | .quad alpha_ni_syscall /* 220 */ | ||
242 | .quad alpha_ni_syscall | ||
243 | .quad alpha_ni_syscall | ||
244 | .quad alpha_ni_syscall | ||
245 | .quad alpha_ni_syscall | ||
246 | .quad alpha_ni_syscall /* 225 */ | ||
247 | .quad alpha_ni_syscall | ||
248 | .quad alpha_ni_syscall | ||
249 | .quad alpha_ni_syscall | ||
250 | .quad alpha_ni_syscall | ||
251 | .quad alpha_ni_syscall /* 230 */ | ||
252 | .quad alpha_ni_syscall | ||
253 | .quad alpha_ni_syscall | ||
254 | .quad sys_getpgid | ||
255 | .quad sys_getsid | ||
256 | .quad sys_sigaltstack /* 235 */ | ||
257 | .quad alpha_ni_syscall | ||
258 | .quad alpha_ni_syscall | ||
259 | .quad alpha_ni_syscall | ||
260 | .quad alpha_ni_syscall | ||
261 | .quad alpha_ni_syscall /* 240 */ | ||
262 | .quad osf_sysinfo | ||
263 | .quad alpha_ni_syscall | ||
264 | .quad alpha_ni_syscall | ||
265 | .quad osf_proplist_syscall | ||
266 | .quad alpha_ni_syscall /* 245 */ | ||
267 | .quad alpha_ni_syscall | ||
268 | .quad alpha_ni_syscall | ||
269 | .quad alpha_ni_syscall | ||
270 | .quad alpha_ni_syscall | ||
271 | .quad alpha_ni_syscall /* 250 */ | ||
272 | .quad osf_usleep_thread | ||
273 | .quad alpha_ni_syscall | ||
274 | .quad alpha_ni_syscall | ||
275 | .quad sys_sysfs | ||
276 | .quad alpha_ni_syscall /* 255 */ | ||
277 | .quad osf_getsysinfo | ||
278 | .quad osf_setsysinfo | ||
279 | .quad alpha_ni_syscall | ||
280 | .quad alpha_ni_syscall | ||
281 | .quad alpha_ni_syscall /* 260 */ | ||
282 | .quad alpha_ni_syscall | ||
283 | .quad alpha_ni_syscall | ||
284 | .quad alpha_ni_syscall | ||
285 | .quad alpha_ni_syscall | ||
286 | .quad alpha_ni_syscall /* 265 */ | ||
287 | .quad alpha_ni_syscall | ||
288 | .quad alpha_ni_syscall | ||
289 | .quad alpha_ni_syscall | ||
290 | .quad alpha_ni_syscall | ||
291 | .quad alpha_ni_syscall /* 270 */ | ||
292 | .quad alpha_ni_syscall | ||
293 | .quad alpha_ni_syscall | ||
294 | .quad alpha_ni_syscall | ||
295 | .quad alpha_ni_syscall | ||
296 | .quad alpha_ni_syscall /* 275 */ | ||
297 | .quad alpha_ni_syscall | ||
298 | .quad alpha_ni_syscall | ||
299 | .quad alpha_ni_syscall | ||
300 | .quad alpha_ni_syscall | ||
301 | .quad alpha_ni_syscall /* 280 */ | ||
302 | .quad alpha_ni_syscall | ||
303 | .quad alpha_ni_syscall | ||
304 | .quad alpha_ni_syscall | ||
305 | .quad alpha_ni_syscall | ||
306 | .quad alpha_ni_syscall /* 285 */ | ||
307 | .quad alpha_ni_syscall | ||
308 | .quad alpha_ni_syscall | ||
309 | .quad alpha_ni_syscall | ||
310 | .quad alpha_ni_syscall | ||
311 | .quad alpha_ni_syscall /* 290 */ | ||
312 | .quad alpha_ni_syscall | ||
313 | .quad alpha_ni_syscall | ||
314 | .quad alpha_ni_syscall | ||
315 | .quad alpha_ni_syscall | ||
316 | .quad alpha_ni_syscall /* 295 */ | ||
317 | .quad alpha_ni_syscall | ||
318 | .quad alpha_ni_syscall | ||
319 | .quad alpha_ni_syscall | ||
320 | .quad alpha_ni_syscall | ||
321 | /* linux-specific system calls start at 300 */ | ||
322 | .quad sys_bdflush /* 300 */ | ||
323 | .quad sys_sethae | ||
324 | .quad sys_mount | ||
325 | .quad sys_old_adjtimex | ||
326 | .quad sys_swapoff | ||
327 | .quad sys_getdents /* 305 */ | ||
328 | .quad sys_ni_syscall /* 306: old create_module */ | ||
329 | .quad sys_init_module | ||
330 | .quad sys_delete_module | ||
331 | .quad sys_ni_syscall /* 309: old get_kernel_syms */ | ||
332 | .quad sys_syslog /* 310 */ | ||
333 | .quad sys_reboot | ||
334 | .quad sys_clone | ||
335 | .quad sys_uselib | ||
336 | .quad sys_mlock | ||
337 | .quad sys_munlock /* 315 */ | ||
338 | .quad sys_mlockall | ||
339 | .quad sys_munlockall | ||
340 | .quad sys_sysinfo | ||
341 | .quad sys_sysctl | ||
342 | .quad sys_ni_syscall /* 320 */ | ||
343 | .quad sys_oldumount | ||
344 | .quad sys_swapon | ||
345 | .quad sys_times | ||
346 | .quad sys_personality | ||
347 | .quad sys_setfsuid /* 325 */ | ||
348 | .quad sys_setfsgid | ||
349 | .quad sys_ustat | ||
350 | .quad sys_statfs | ||
351 | .quad sys_fstatfs | ||
352 | .quad sys_sched_setparam /* 330 */ | ||
353 | .quad sys_sched_getparam | ||
354 | .quad sys_sched_setscheduler | ||
355 | .quad sys_sched_getscheduler | ||
356 | .quad sys_sched_yield | ||
357 | .quad sys_sched_get_priority_max /* 335 */ | ||
358 | .quad sys_sched_get_priority_min | ||
359 | .quad sys_sched_rr_get_interval | ||
360 | .quad sys_ni_syscall /* sys_afs_syscall */ | ||
361 | .quad sys_newuname | ||
362 | .quad sys_nanosleep /* 340 */ | ||
363 | .quad sys_mremap | ||
364 | .quad sys_nfsservctl | ||
365 | .quad sys_setresuid | ||
366 | .quad sys_getresuid | ||
367 | .quad sys_pciconfig_read /* 345 */ | ||
368 | .quad sys_pciconfig_write | ||
369 | .quad sys_ni_syscall /* 347: old query_module */ | ||
370 | .quad sys_prctl | ||
371 | .quad sys_pread64 | ||
372 | .quad sys_pwrite64 /* 350 */ | ||
373 | .quad sys_rt_sigreturn | ||
374 | .quad sys_rt_sigaction | ||
375 | .quad sys_rt_sigprocmask | ||
376 | .quad sys_rt_sigpending | ||
377 | .quad sys_rt_sigtimedwait /* 355 */ | ||
378 | .quad sys_rt_sigqueueinfo | ||
379 | .quad sys_rt_sigsuspend | ||
380 | .quad sys_select | ||
381 | .quad sys_gettimeofday | ||
382 | .quad sys_settimeofday /* 360 */ | ||
383 | .quad sys_getitimer | ||
384 | .quad sys_setitimer | ||
385 | .quad sys_utimes | ||
386 | .quad sys_getrusage | ||
387 | .quad sys_wait4 /* 365 */ | ||
388 | .quad sys_adjtimex | ||
389 | .quad sys_getcwd | ||
390 | .quad sys_capget | ||
391 | .quad sys_capset | ||
392 | .quad sys_sendfile64 /* 370 */ | ||
393 | .quad sys_setresgid | ||
394 | .quad sys_getresgid | ||
395 | .quad sys_ni_syscall /* sys_dipc */ | ||
396 | .quad sys_pivot_root | ||
397 | .quad sys_mincore /* 375 */ | ||
398 | .quad sys_pciconfig_iobase | ||
399 | .quad sys_getdents64 | ||
400 | .quad sys_gettid | ||
401 | .quad sys_readahead | ||
402 | .quad sys_ni_syscall /* 380 */ | ||
403 | .quad sys_tkill | ||
404 | .quad sys_setxattr | ||
405 | .quad sys_lsetxattr | ||
406 | .quad sys_fsetxattr | ||
407 | .quad sys_getxattr /* 385 */ | ||
408 | .quad sys_lgetxattr | ||
409 | .quad sys_fgetxattr | ||
410 | .quad sys_listxattr | ||
411 | .quad sys_llistxattr | ||
412 | .quad sys_flistxattr /* 390 */ | ||
413 | .quad sys_removexattr | ||
414 | .quad sys_lremovexattr | ||
415 | .quad sys_fremovexattr | ||
416 | .quad sys_futex | ||
417 | .quad sys_sched_setaffinity /* 395 */ | ||
418 | .quad sys_sched_getaffinity | ||
419 | .quad sys_ni_syscall /* 397, tux */ | ||
420 | .quad sys_io_setup | ||
421 | .quad sys_io_destroy | ||
422 | .quad sys_io_getevents /* 400 */ | ||
423 | .quad sys_io_submit | ||
424 | .quad sys_io_cancel | ||
425 | .quad sys_ni_syscall /* 403, sys_alloc_hugepages */ | ||
426 | .quad sys_ni_syscall /* 404, sys_free_hugepages */ | ||
427 | .quad sys_exit_group /* 405 */ | ||
428 | .quad sys_lookup_dcookie | ||
429 | .quad sys_epoll_create | ||
430 | .quad sys_epoll_ctl | ||
431 | .quad sys_epoll_wait | ||
432 | .quad sys_remap_file_pages /* 410 */ | ||
433 | .quad sys_set_tid_address | ||
434 | .quad sys_restart_syscall | ||
435 | .quad sys_fadvise64 | ||
436 | .quad sys_timer_create | ||
437 | .quad sys_timer_settime /* 415 */ | ||
438 | .quad sys_timer_gettime | ||
439 | .quad sys_timer_getoverrun | ||
440 | .quad sys_timer_delete | ||
441 | .quad sys_clock_settime | ||
442 | .quad sys_clock_gettime /* 420 */ | ||
443 | .quad sys_clock_getres | ||
444 | .quad sys_clock_nanosleep | ||
445 | .quad sys_semtimedop | ||
446 | .quad sys_tgkill | ||
447 | .quad sys_stat64 /* 425 */ | ||
448 | .quad sys_lstat64 | ||
449 | .quad sys_fstat64 | ||
450 | .quad sys_ni_syscall /* sys_vserver */ | ||
451 | .quad sys_ni_syscall /* sys_mbind */ | ||
452 | .quad sys_ni_syscall /* sys_get_mempolicy */ | ||
453 | .quad sys_ni_syscall /* sys_set_mempolicy */ | ||
454 | .quad sys_mq_open | ||
455 | .quad sys_mq_unlink | ||
456 | .quad sys_mq_timedsend | ||
457 | .quad sys_mq_timedreceive /* 435 */ | ||
458 | .quad sys_mq_notify | ||
459 | .quad sys_mq_getsetattr | ||
460 | .quad sys_waitid | ||
461 | |||
462 | .size sys_call_table, . - sys_call_table | ||
463 | .type sys_call_table, @object | ||
464 | |||
465 | /* Remember to update everything, kids. */ | ||
466 | .ifne (. - sys_call_table) - (NR_SYSCALLS * 8) | ||
467 | .err | ||
468 | .endif | ||
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c new file mode 100644 index 000000000000..8226c5cd788c --- /dev/null +++ b/arch/alpha/kernel/time.c | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/kernel/time.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds | ||
5 | * | ||
6 | * This file contains the PC-specific time handling details: | ||
7 | * reading the RTC at bootup, etc.. | ||
8 | * 1994-07-02 Alan Modra | ||
9 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime | ||
10 | * 1995-03-26 Markus Kuhn | ||
11 | * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 | ||
12 | * precision CMOS clock update | ||
13 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | ||
14 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | ||
15 | * 1997-01-09 Adrian Sun | ||
16 | * use interval timer if CONFIG_RTC=y | ||
17 | * 1997-10-29 John Bowman (bowman@math.ualberta.ca) | ||
18 | * fixed tick loss calculation in timer_interrupt | ||
19 | * (round system clock to nearest tick instead of truncating) | ||
20 | * fixed algorithm in time_init for getting time from CMOS clock | ||
21 | * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) | ||
22 | * fixed algorithm in do_gettimeofday() for calculating the precise time | ||
23 | * from processor cycle counter (now taking lost_ticks into account) | ||
24 | * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de> | ||
25 | * Fixed time_init to be aware of epoches != 1900. This prevents | ||
26 | * booting up in 2048 for me;) Code is stolen from rtc.c. | ||
27 | * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> | ||
28 | * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM | ||
29 | */ | ||
30 | #include <linux/config.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/param.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/mm.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/ioport.h> | ||
40 | #include <linux/irq.h> | ||
41 | #include <linux/interrupt.h> | ||
42 | #include <linux/init.h> | ||
43 | #include <linux/bcd.h> | ||
44 | #include <linux/profile.h> | ||
45 | |||
46 | #include <asm/uaccess.h> | ||
47 | #include <asm/io.h> | ||
48 | #include <asm/hwrpb.h> | ||
49 | #include <asm/8253pit.h> | ||
50 | |||
51 | #include <linux/mc146818rtc.h> | ||
52 | #include <linux/time.h> | ||
53 | #include <linux/timex.h> | ||
54 | |||
55 | #include "proto.h" | ||
56 | #include "irq_impl.h" | ||
57 | |||
58 | u64 jiffies_64 = INITIAL_JIFFIES; | ||
59 | |||
60 | EXPORT_SYMBOL(jiffies_64); | ||
61 | |||
62 | extern unsigned long wall_jiffies; /* kernel/timer.c */ | ||
63 | |||
64 | static int set_rtc_mmss(unsigned long); | ||
65 | |||
66 | DEFINE_SPINLOCK(rtc_lock); | ||
67 | |||
68 | #define TICK_SIZE (tick_nsec / 1000) | ||
69 | |||
70 | /* | ||
71 | * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting | ||
72 | * by 48 gives us 16 bits for HZ while keeping the accuracy good even | ||
73 | * for large CPU clock rates. | ||
74 | */ | ||
75 | #define FIX_SHIFT 48 | ||
76 | |||
77 | /* lump static variables together for more efficient access: */ | ||
78 | static struct { | ||
79 | /* cycle counter last time it got invoked */ | ||
80 | __u32 last_time; | ||
81 | /* ticks/cycle * 2^48 */ | ||
82 | unsigned long scaled_ticks_per_cycle; | ||
83 | /* last time the CMOS clock got updated */ | ||
84 | time_t last_rtc_update; | ||
85 | /* partial unused tick */ | ||
86 | unsigned long partial_tick; | ||
87 | } state; | ||
88 | |||
89 | unsigned long est_cycle_freq; | ||
90 | |||
91 | |||
92 | static inline __u32 rpcc(void) | ||
93 | { | ||
94 | __u32 result; | ||
95 | asm volatile ("rpcc %0" : "=r"(result)); | ||
96 | return result; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Scheduler clock - returns current time in nanosec units. | ||
101 | * | ||
102 | * Copied from ARM code for expediency... ;-} | ||
103 | */ | ||
104 | unsigned long long sched_clock(void) | ||
105 | { | ||
106 | return (unsigned long long)jiffies * (1000000000 / HZ); | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | ||
111 | * timer_interrupt() needs to keep up the real-time clock, | ||
112 | * as well as call the "do_timer()" routine every clocktick | ||
113 | */ | ||
114 | irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs) | ||
115 | { | ||
116 | unsigned long delta; | ||
117 | __u32 now; | ||
118 | long nticks; | ||
119 | |||
120 | #ifndef CONFIG_SMP | ||
121 | /* Not SMP, do kernel PC profiling here. */ | ||
122 | profile_tick(CPU_PROFILING, regs); | ||
123 | #endif | ||
124 | |||
125 | write_seqlock(&xtime_lock); | ||
126 | |||
127 | /* | ||
128 | * Calculate how many ticks have passed since the last update, | ||
129 | * including any previous partial leftover. Save any resulting | ||
130 | * fraction for the next pass. | ||
131 | */ | ||
132 | now = rpcc(); | ||
133 | delta = now - state.last_time; | ||
134 | state.last_time = now; | ||
135 | delta = delta * state.scaled_ticks_per_cycle + state.partial_tick; | ||
136 | state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); | ||
137 | nticks = delta >> FIX_SHIFT; | ||
138 | |||
139 | while (nticks > 0) { | ||
140 | do_timer(regs); | ||
141 | #ifndef CONFIG_SMP | ||
142 | update_process_times(user_mode(regs)); | ||
143 | #endif | ||
144 | nticks--; | ||
145 | } | ||
146 | |||
147 | /* | ||
148 | * If we have an externally synchronized Linux clock, then update | ||
149 | * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be | ||
150 | * called as close as possible to 500 ms before the new second starts. | ||
151 | */ | ||
152 | if ((time_status & STA_UNSYNC) == 0 | ||
153 | && xtime.tv_sec > state.last_rtc_update + 660 | ||
154 | && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2 | ||
155 | && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) { | ||
156 | int tmp = set_rtc_mmss(xtime.tv_sec); | ||
157 | state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0); | ||
158 | } | ||
159 | |||
160 | write_sequnlock(&xtime_lock); | ||
161 | return IRQ_HANDLED; | ||
162 | } | ||
163 | |||
164 | void | ||
165 | common_init_rtc(void) | ||
166 | { | ||
167 | unsigned char x; | ||
168 | |||
169 | /* Reset periodic interrupt frequency. */ | ||
170 | x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; | ||
171 | /* Test includes known working values on various platforms | ||
172 | where 0x26 is wrong; we refuse to change those. */ | ||
173 | if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { | ||
174 | printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); | ||
175 | CMOS_WRITE(0x26, RTC_FREQ_SELECT); | ||
176 | } | ||
177 | |||
178 | /* Turn on periodic interrupts. */ | ||
179 | x = CMOS_READ(RTC_CONTROL); | ||
180 | if (!(x & RTC_PIE)) { | ||
181 | printk("Turning on RTC interrupts.\n"); | ||
182 | x |= RTC_PIE; | ||
183 | x &= ~(RTC_AIE | RTC_UIE); | ||
184 | CMOS_WRITE(x, RTC_CONTROL); | ||
185 | } | ||
186 | (void) CMOS_READ(RTC_INTR_FLAGS); | ||
187 | |||
188 | outb(0x36, 0x43); /* pit counter 0: system timer */ | ||
189 | outb(0x00, 0x40); | ||
190 | outb(0x00, 0x40); | ||
191 | |||
192 | outb(0xb6, 0x43); /* pit counter 2: speaker */ | ||
193 | outb(0x31, 0x42); | ||
194 | outb(0x13, 0x42); | ||
195 | |||
196 | init_rtc_irq(); | ||
197 | } | ||
198 | |||
199 | |||
200 | /* Validate a computed cycle counter result against the known bounds for | ||
201 | the given processor core. There's too much brokenness in the way of | ||
202 | timing hardware for any one method to work everywhere. :-( | ||
203 | |||
204 | Return 0 if the result cannot be trusted, otherwise return the argument. */ | ||
205 | |||
206 | static unsigned long __init | ||
207 | validate_cc_value(unsigned long cc) | ||
208 | { | ||
209 | static struct bounds { | ||
210 | unsigned int min, max; | ||
211 | } cpu_hz[] __initdata = { | ||
212 | [EV3_CPU] = { 50000000, 200000000 }, /* guess */ | ||
213 | [EV4_CPU] = { 100000000, 300000000 }, | ||
214 | [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ | ||
215 | [EV45_CPU] = { 200000000, 300000000 }, | ||
216 | [EV5_CPU] = { 250000000, 433000000 }, | ||
217 | [EV56_CPU] = { 333000000, 667000000 }, | ||
218 | [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ | ||
219 | [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ | ||
220 | [EV6_CPU] = { 466000000, 600000000 }, | ||
221 | [EV67_CPU] = { 600000000, 750000000 }, | ||
222 | [EV68AL_CPU] = { 750000000, 940000000 }, | ||
223 | [EV68CB_CPU] = { 1000000000, 1333333333 }, | ||
224 | /* None of the following are shipping as of 2001-11-01. */ | ||
225 | [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ | ||
226 | [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ | ||
227 | [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ | ||
228 | [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ | ||
229 | }; | ||
230 | |||
231 | /* Allow for some drift in the crystal. 10MHz is more than enough. */ | ||
232 | const unsigned int deviation = 10000000; | ||
233 | |||
234 | struct percpu_struct *cpu; | ||
235 | unsigned int index; | ||
236 | |||
237 | cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); | ||
238 | index = cpu->type & 0xffffffff; | ||
239 | |||
240 | /* If index out of bounds, no way to validate. */ | ||
241 | if (index >= sizeof(cpu_hz)/sizeof(cpu_hz[0])) | ||
242 | return cc; | ||
243 | |||
244 | /* If index contains no data, no way to validate. */ | ||
245 | if (cpu_hz[index].max == 0) | ||
246 | return cc; | ||
247 | |||
248 | if (cc < cpu_hz[index].min - deviation | ||
249 | || cc > cpu_hz[index].max + deviation) | ||
250 | return 0; | ||
251 | |||
252 | return cc; | ||
253 | } | ||
254 | |||
255 | |||
256 | /* | ||
257 | * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from | ||
258 | * arch/i386/time.c. | ||
259 | */ | ||
260 | |||
261 | #define CALIBRATE_LATCH 0xffff | ||
262 | #define TIMEOUT_COUNT 0x100000 | ||
263 | |||
264 | static unsigned long __init | ||
265 | calibrate_cc_with_pit(void) | ||
266 | { | ||
267 | int cc, count = 0; | ||
268 | |||
269 | /* Set the Gate high, disable speaker */ | ||
270 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
271 | |||
272 | /* | ||
273 | * Now let's take care of CTC channel 2 | ||
274 | * | ||
275 | * Set the Gate high, program CTC channel 2 for mode 0, | ||
276 | * (interrupt on terminal count mode), binary count, | ||
277 | * load 5 * LATCH count, (LSB and MSB) to begin countdown. | ||
278 | */ | ||
279 | outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ | ||
280 | outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ | ||
281 | outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ | ||
282 | |||
283 | cc = rpcc(); | ||
284 | do { | ||
285 | count++; | ||
286 | } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); | ||
287 | cc = rpcc() - cc; | ||
288 | |||
289 | /* Error: ECTCNEVERSET or ECPUTOOFAST. */ | ||
290 | if (count <= 1 || count == TIMEOUT_COUNT) | ||
291 | return 0; | ||
292 | |||
293 | return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); | ||
294 | } | ||
295 | |||
296 | /* The Linux interpretation of the CMOS clock register contents: | ||
297 | When the Update-In-Progress (UIP) flag goes from 1 to 0, the | ||
298 | RTC registers show the second which has precisely just started. | ||
299 | Let's hope other operating systems interpret the RTC the same way. */ | ||
300 | |||
301 | static unsigned long __init | ||
302 | rpcc_after_update_in_progress(void) | ||
303 | { | ||
304 | do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); | ||
305 | do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); | ||
306 | |||
307 | return rpcc(); | ||
308 | } | ||
309 | |||
310 | void __init | ||
311 | time_init(void) | ||
312 | { | ||
313 | unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch; | ||
314 | unsigned long cycle_freq, tolerance; | ||
315 | long diff; | ||
316 | |||
317 | /* Calibrate CPU clock -- attempt #1. */ | ||
318 | if (!est_cycle_freq) | ||
319 | est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); | ||
320 | |||
321 | cc1 = rpcc_after_update_in_progress(); | ||
322 | |||
323 | /* Calibrate CPU clock -- attempt #2. */ | ||
324 | if (!est_cycle_freq) { | ||
325 | cc2 = rpcc_after_update_in_progress(); | ||
326 | est_cycle_freq = validate_cc_value(cc2 - cc1); | ||
327 | cc1 = cc2; | ||
328 | } | ||
329 | |||
330 | cycle_freq = hwrpb->cycle_freq; | ||
331 | if (est_cycle_freq) { | ||
332 | /* If the given value is within 250 PPM of what we calculated, | ||
333 | accept it. Otherwise, use what we found. */ | ||
334 | tolerance = cycle_freq / 4000; | ||
335 | diff = cycle_freq - est_cycle_freq; | ||
336 | if (diff < 0) | ||
337 | diff = -diff; | ||
338 | if ((unsigned long)diff > tolerance) { | ||
339 | cycle_freq = est_cycle_freq; | ||
340 | printk("HWRPB cycle frequency bogus. " | ||
341 | "Estimated %lu Hz\n", cycle_freq); | ||
342 | } else { | ||
343 | est_cycle_freq = 0; | ||
344 | } | ||
345 | } else if (! validate_cc_value (cycle_freq)) { | ||
346 | printk("HWRPB cycle frequency bogus, " | ||
347 | "and unable to estimate a proper value!\n"); | ||
348 | } | ||
349 | |||
350 | /* From John Bowman <bowman@math.ualberta.ca>: allow the values | ||
351 | to settle, as the Update-In-Progress bit going low isn't good | ||
352 | enough on some hardware. 2ms is our guess; we haven't found | ||
353 | bogomips yet, but this is close on a 500Mhz box. */ | ||
354 | __delay(1000000); | ||
355 | |||
356 | sec = CMOS_READ(RTC_SECONDS); | ||
357 | min = CMOS_READ(RTC_MINUTES); | ||
358 | hour = CMOS_READ(RTC_HOURS); | ||
359 | day = CMOS_READ(RTC_DAY_OF_MONTH); | ||
360 | mon = CMOS_READ(RTC_MONTH); | ||
361 | year = CMOS_READ(RTC_YEAR); | ||
362 | |||
363 | if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
364 | BCD_TO_BIN(sec); | ||
365 | BCD_TO_BIN(min); | ||
366 | BCD_TO_BIN(hour); | ||
367 | BCD_TO_BIN(day); | ||
368 | BCD_TO_BIN(mon); | ||
369 | BCD_TO_BIN(year); | ||
370 | } | ||
371 | |||
372 | /* PC-like is standard; used for year >= 70 */ | ||
373 | epoch = 1900; | ||
374 | if (year < 20) | ||
375 | epoch = 2000; | ||
376 | else if (year >= 20 && year < 48) | ||
377 | /* NT epoch */ | ||
378 | epoch = 1980; | ||
379 | else if (year >= 48 && year < 70) | ||
380 | /* Digital UNIX epoch */ | ||
381 | epoch = 1952; | ||
382 | |||
383 | printk(KERN_INFO "Using epoch = %d\n", epoch); | ||
384 | |||
385 | if ((year += epoch) < 1970) | ||
386 | year += 100; | ||
387 | |||
388 | xtime.tv_sec = mktime(year, mon, day, hour, min, sec); | ||
389 | xtime.tv_nsec = 0; | ||
390 | |||
391 | wall_to_monotonic.tv_sec -= xtime.tv_sec; | ||
392 | wall_to_monotonic.tv_nsec = 0; | ||
393 | |||
394 | if (HZ > (1<<16)) { | ||
395 | extern void __you_loose (void); | ||
396 | __you_loose(); | ||
397 | } | ||
398 | |||
399 | state.last_time = cc1; | ||
400 | state.scaled_ticks_per_cycle | ||
401 | = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; | ||
402 | state.last_rtc_update = 0; | ||
403 | state.partial_tick = 0L; | ||
404 | |||
405 | /* Startup the timer source. */ | ||
406 | alpha_mv.init_rtc(); | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Use the cycle counter to estimate an displacement from the last time | ||
411 | * tick. Unfortunately the Alpha designers made only the low 32-bits of | ||
412 | * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz | ||
413 | * part. So we can't do the "find absolute time in terms of cycles" thing | ||
414 | * that the other ports do. | ||
415 | */ | ||
416 | void | ||
417 | do_gettimeofday(struct timeval *tv) | ||
418 | { | ||
419 | unsigned long flags; | ||
420 | unsigned long sec, usec, lost, seq; | ||
421 | unsigned long delta_cycles, delta_usec, partial_tick; | ||
422 | |||
423 | do { | ||
424 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
425 | |||
426 | delta_cycles = rpcc() - state.last_time; | ||
427 | sec = xtime.tv_sec; | ||
428 | usec = (xtime.tv_nsec / 1000); | ||
429 | partial_tick = state.partial_tick; | ||
430 | lost = jiffies - wall_jiffies; | ||
431 | |||
432 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
433 | |||
434 | #ifdef CONFIG_SMP | ||
435 | /* Until and unless we figure out how to get cpu cycle counters | ||
436 | in sync and keep them there, we can't use the rpcc tricks. */ | ||
437 | delta_usec = lost * (1000000 / HZ); | ||
438 | #else | ||
439 | /* | ||
440 | * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks) | ||
441 | * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks) | ||
442 | * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks) | ||
443 | * | ||
444 | * which, given a 600MHz cycle and a 1024Hz tick, has a | ||
445 | * dynamic range of about 1.7e17, which is less than the | ||
446 | * 1.8e19 in an unsigned long, so we are safe from overflow. | ||
447 | * | ||
448 | * Round, but with .5 up always, since .5 to even is harder | ||
449 | * with no clear gain. | ||
450 | */ | ||
451 | |||
452 | delta_usec = (delta_cycles * state.scaled_ticks_per_cycle | ||
453 | + partial_tick | ||
454 | + (lost << FIX_SHIFT)) * 15625; | ||
455 | delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; | ||
456 | #endif | ||
457 | |||
458 | usec += delta_usec; | ||
459 | if (usec >= 1000000) { | ||
460 | sec += 1; | ||
461 | usec -= 1000000; | ||
462 | } | ||
463 | |||
464 | tv->tv_sec = sec; | ||
465 | tv->tv_usec = usec; | ||
466 | } | ||
467 | |||
468 | EXPORT_SYMBOL(do_gettimeofday); | ||
469 | |||
470 | int | ||
471 | do_settimeofday(struct timespec *tv) | ||
472 | { | ||
473 | time_t wtm_sec, sec = tv->tv_sec; | ||
474 | long wtm_nsec, nsec = tv->tv_nsec; | ||
475 | unsigned long delta_nsec; | ||
476 | |||
477 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
478 | return -EINVAL; | ||
479 | |||
480 | write_seqlock_irq(&xtime_lock); | ||
481 | |||
482 | /* The offset that is added into time in do_gettimeofday above | ||
483 | must be subtracted out here to keep a coherent view of the | ||
484 | time. Without this, a full-tick error is possible. */ | ||
485 | |||
486 | #ifdef CONFIG_SMP | ||
487 | delta_nsec = (jiffies - wall_jiffies) * (NSEC_PER_SEC / HZ); | ||
488 | #else | ||
489 | delta_nsec = rpcc() - state.last_time; | ||
490 | delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle | ||
491 | + state.partial_tick | ||
492 | + ((jiffies - wall_jiffies) << FIX_SHIFT)) * 15625; | ||
493 | delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; | ||
494 | delta_nsec *= 1000; | ||
495 | #endif | ||
496 | |||
497 | nsec -= delta_nsec; | ||
498 | |||
499 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
500 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
501 | |||
502 | set_normalized_timespec(&xtime, sec, nsec); | ||
503 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
504 | |||
505 | time_adjust = 0; /* stop active adjtime() */ | ||
506 | time_status |= STA_UNSYNC; | ||
507 | time_maxerror = NTP_PHASE_LIMIT; | ||
508 | time_esterror = NTP_PHASE_LIMIT; | ||
509 | |||
510 | write_sequnlock_irq(&xtime_lock); | ||
511 | clock_was_set(); | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | EXPORT_SYMBOL(do_settimeofday); | ||
516 | |||
517 | |||
518 | /* | ||
519 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be | ||
520 | * called 500 ms after the second nowtime has started, because when | ||
521 | * nowtime is written into the registers of the CMOS clock, it will | ||
522 | * jump to the next second precisely 500 ms later. Check the Motorola | ||
523 | * MC146818A or Dallas DS12887 data sheet for details. | ||
524 | * | ||
525 | * BUG: This routine does not handle hour overflow properly; it just | ||
526 | * sets the minutes. Usually you won't notice until after reboot! | ||
527 | */ | ||
528 | |||
529 | |||
530 | static int | ||
531 | set_rtc_mmss(unsigned long nowtime) | ||
532 | { | ||
533 | int retval = 0; | ||
534 | int real_seconds, real_minutes, cmos_minutes; | ||
535 | unsigned char save_control, save_freq_select; | ||
536 | |||
537 | /* irq are locally disabled here */ | ||
538 | spin_lock(&rtc_lock); | ||
539 | /* Tell the clock it's being set */ | ||
540 | save_control = CMOS_READ(RTC_CONTROL); | ||
541 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); | ||
542 | |||
543 | /* Stop and reset prescaler */ | ||
544 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); | ||
545 | CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); | ||
546 | |||
547 | cmos_minutes = CMOS_READ(RTC_MINUTES); | ||
548 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
549 | BCD_TO_BIN(cmos_minutes); | ||
550 | |||
551 | /* | ||
552 | * since we're only adjusting minutes and seconds, | ||
553 | * don't interfere with hour overflow. This avoids | ||
554 | * messing with unknown time zones but requires your | ||
555 | * RTC not to be off by more than 15 minutes | ||
556 | */ | ||
557 | real_seconds = nowtime % 60; | ||
558 | real_minutes = nowtime / 60; | ||
559 | if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) { | ||
560 | /* correct for half hour time zone */ | ||
561 | real_minutes += 30; | ||
562 | } | ||
563 | real_minutes %= 60; | ||
564 | |||
565 | if (abs(real_minutes - cmos_minutes) < 30) { | ||
566 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
567 | BIN_TO_BCD(real_seconds); | ||
568 | BIN_TO_BCD(real_minutes); | ||
569 | } | ||
570 | CMOS_WRITE(real_seconds,RTC_SECONDS); | ||
571 | CMOS_WRITE(real_minutes,RTC_MINUTES); | ||
572 | } else { | ||
573 | printk(KERN_WARNING | ||
574 | "set_rtc_mmss: can't update from %d to %d\n", | ||
575 | cmos_minutes, real_minutes); | ||
576 | retval = -1; | ||
577 | } | ||
578 | |||
579 | /* The following flags have to be released exactly in this order, | ||
580 | * otherwise the DS12887 (popular MC146818A clone with integrated | ||
581 | * battery and quartz) will not reset the oscillator and will not | ||
582 | * update precisely 500 ms later. You won't find this mentioned in | ||
583 | * the Dallas Semiconductor data sheets, but who believes data | ||
584 | * sheets anyway ... -- Markus Kuhn | ||
585 | */ | ||
586 | CMOS_WRITE(save_control, RTC_CONTROL); | ||
587 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | ||
588 | spin_unlock(&rtc_lock); | ||
589 | |||
590 | return retval; | ||
591 | } | ||
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c new file mode 100644 index 000000000000..fd7bd17cc960 --- /dev/null +++ b/arch/alpha/kernel/traps.c | |||
@@ -0,0 +1,1092 @@ | |||
1 | /* | ||
2 | * arch/alpha/kernel/traps.c | ||
3 | * | ||
4 | * (C) Copyright 1994 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This file initializes the trap entry points | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/tty.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/smp_lock.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/kallsyms.h> | ||
20 | |||
21 | #include <asm/gentrap.h> | ||
22 | #include <asm/uaccess.h> | ||
23 | #include <asm/unaligned.h> | ||
24 | #include <asm/sysinfo.h> | ||
25 | #include <asm/hwrpb.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | |||
28 | #include "proto.h" | ||
29 | |||
30 | /* Work-around for some SRMs which mishandle opDEC faults. */ | ||
31 | |||
32 | static int opDEC_fix; | ||
33 | |||
34 | static void __init | ||
35 | opDEC_check(void) | ||
36 | { | ||
37 | __asm__ __volatile__ ( | ||
38 | /* Load the address of... */ | ||
39 | " br $16, 1f\n" | ||
40 | /* A stub instruction fault handler. Just add 4 to the | ||
41 | pc and continue. */ | ||
42 | " ldq $16, 8($sp)\n" | ||
43 | " addq $16, 4, $16\n" | ||
44 | " stq $16, 8($sp)\n" | ||
45 | " call_pal %[rti]\n" | ||
46 | /* Install the instruction fault handler. */ | ||
47 | "1: lda $17, 3\n" | ||
48 | " call_pal %[wrent]\n" | ||
49 | /* With that in place, the fault from the round-to-minf fp | ||
50 | insn will arrive either at the "lda 4" insn (bad) or one | ||
51 | past that (good). This places the correct fixup in %0. */ | ||
52 | " lda %[fix], 0\n" | ||
53 | " cvttq/svm $f31,$f31\n" | ||
54 | " lda %[fix], 4" | ||
55 | : [fix] "=r" (opDEC_fix) | ||
56 | : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent) | ||
57 | : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25"); | ||
58 | |||
59 | if (opDEC_fix) | ||
60 | printk("opDEC fixup enabled.\n"); | ||
61 | } | ||
62 | |||
63 | void | ||
64 | dik_show_regs(struct pt_regs *regs, unsigned long *r9_15) | ||
65 | { | ||
66 | printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", | ||
67 | regs->pc, regs->r26, regs->ps, print_tainted()); | ||
68 | print_symbol("pc is at %s\n", regs->pc); | ||
69 | print_symbol("ra is at %s\n", regs->r26 ); | ||
70 | printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", | ||
71 | regs->r0, regs->r1, regs->r2); | ||
72 | printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", | ||
73 | regs->r3, regs->r4, regs->r5); | ||
74 | printk("t5 = %016lx t6 = %016lx t7 = %016lx\n", | ||
75 | regs->r6, regs->r7, regs->r8); | ||
76 | |||
77 | if (r9_15) { | ||
78 | printk("s0 = %016lx s1 = %016lx s2 = %016lx\n", | ||
79 | r9_15[9], r9_15[10], r9_15[11]); | ||
80 | printk("s3 = %016lx s4 = %016lx s5 = %016lx\n", | ||
81 | r9_15[12], r9_15[13], r9_15[14]); | ||
82 | printk("s6 = %016lx\n", r9_15[15]); | ||
83 | } | ||
84 | |||
85 | printk("a0 = %016lx a1 = %016lx a2 = %016lx\n", | ||
86 | regs->r16, regs->r17, regs->r18); | ||
87 | printk("a3 = %016lx a4 = %016lx a5 = %016lx\n", | ||
88 | regs->r19, regs->r20, regs->r21); | ||
89 | printk("t8 = %016lx t9 = %016lx t10= %016lx\n", | ||
90 | regs->r22, regs->r23, regs->r24); | ||
91 | printk("t11= %016lx pv = %016lx at = %016lx\n", | ||
92 | regs->r25, regs->r27, regs->r28); | ||
93 | printk("gp = %016lx sp = %p\n", regs->gp, regs+1); | ||
94 | #if 0 | ||
95 | __halt(); | ||
96 | #endif | ||
97 | } | ||
98 | |||
99 | #if 0 | ||
100 | static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", | ||
101 | "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", | ||
102 | "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", | ||
103 | "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"}; | ||
104 | #endif | ||
105 | |||
106 | static void | ||
107 | dik_show_code(unsigned int *pc) | ||
108 | { | ||
109 | long i; | ||
110 | |||
111 | printk("Code:"); | ||
112 | for (i = -6; i < 2; i++) { | ||
113 | unsigned int insn; | ||
114 | if (__get_user(insn, (unsigned int __user *)pc + i)) | ||
115 | break; | ||
116 | printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); | ||
117 | } | ||
118 | printk("\n"); | ||
119 | } | ||
120 | |||
121 | static void | ||
122 | dik_show_trace(unsigned long *sp) | ||
123 | { | ||
124 | long i = 0; | ||
125 | printk("Trace:\n"); | ||
126 | while (0x1ff8 & (unsigned long) sp) { | ||
127 | extern char _stext[], _etext[]; | ||
128 | unsigned long tmp = *sp; | ||
129 | sp++; | ||
130 | if (tmp < (unsigned long) &_stext) | ||
131 | continue; | ||
132 | if (tmp >= (unsigned long) &_etext) | ||
133 | continue; | ||
134 | printk("[<%lx>]", tmp); | ||
135 | print_symbol(" %s", tmp); | ||
136 | printk("\n"); | ||
137 | if (i > 40) { | ||
138 | printk(" ..."); | ||
139 | break; | ||
140 | } | ||
141 | } | ||
142 | printk("\n"); | ||
143 | } | ||
144 | |||
145 | static int kstack_depth_to_print = 24; | ||
146 | |||
147 | void show_stack(struct task_struct *task, unsigned long *sp) | ||
148 | { | ||
149 | unsigned long *stack; | ||
150 | int i; | ||
151 | |||
152 | /* | ||
153 | * debugging aid: "show_stack(NULL);" prints the | ||
154 | * back trace for this cpu. | ||
155 | */ | ||
156 | if(sp==NULL) | ||
157 | sp=(unsigned long*)&sp; | ||
158 | |||
159 | stack = sp; | ||
160 | for(i=0; i < kstack_depth_to_print; i++) { | ||
161 | if (((long) stack & (THREAD_SIZE-1)) == 0) | ||
162 | break; | ||
163 | if (i && ((i % 4) == 0)) | ||
164 | printk("\n "); | ||
165 | printk("%016lx ", *stack++); | ||
166 | } | ||
167 | printk("\n"); | ||
168 | dik_show_trace(sp); | ||
169 | } | ||
170 | |||
171 | void dump_stack(void) | ||
172 | { | ||
173 | show_stack(NULL, NULL); | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL(dump_stack); | ||
177 | |||
178 | void | ||
179 | die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) | ||
180 | { | ||
181 | if (regs->ps & 8) | ||
182 | return; | ||
183 | #ifdef CONFIG_SMP | ||
184 | printk("CPU %d ", hard_smp_processor_id()); | ||
185 | #endif | ||
186 | printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err); | ||
187 | dik_show_regs(regs, r9_15); | ||
188 | dik_show_trace((unsigned long *)(regs+1)); | ||
189 | dik_show_code((unsigned int *)regs->pc); | ||
190 | |||
191 | if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { | ||
192 | printk("die_if_kernel recursion detected.\n"); | ||
193 | local_irq_enable(); | ||
194 | while (1); | ||
195 | } | ||
196 | do_exit(SIGSEGV); | ||
197 | } | ||
198 | |||
199 | #ifndef CONFIG_MATHEMU | ||
200 | static long dummy_emul(void) { return 0; } | ||
201 | long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) | ||
202 | = (void *)dummy_emul; | ||
203 | long (*alpha_fp_emul) (unsigned long pc) | ||
204 | = (void *)dummy_emul; | ||
205 | #else | ||
206 | long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); | ||
207 | long alpha_fp_emul (unsigned long pc); | ||
208 | #endif | ||
209 | |||
210 | asmlinkage void | ||
211 | do_entArith(unsigned long summary, unsigned long write_mask, | ||
212 | struct pt_regs *regs) | ||
213 | { | ||
214 | long si_code = FPE_FLTINV; | ||
215 | siginfo_t info; | ||
216 | |||
217 | if (summary & 1) { | ||
218 | /* Software-completion summary bit is set, so try to | ||
219 | emulate the instruction. If the processor supports | ||
220 | precise exceptions, we don't have to search. */ | ||
221 | if (!amask(AMASK_PRECISE_TRAP)) | ||
222 | si_code = alpha_fp_emul(regs->pc - 4); | ||
223 | else | ||
224 | si_code = alpha_fp_emul_imprecise(regs, write_mask); | ||
225 | if (si_code == 0) | ||
226 | return; | ||
227 | } | ||
228 | die_if_kernel("Arithmetic fault", regs, 0, NULL); | ||
229 | |||
230 | info.si_signo = SIGFPE; | ||
231 | info.si_errno = 0; | ||
232 | info.si_code = si_code; | ||
233 | info.si_addr = (void __user *) regs->pc; | ||
234 | send_sig_info(SIGFPE, &info, current); | ||
235 | } | ||
236 | |||
237 | asmlinkage void | ||
238 | do_entIF(unsigned long type, struct pt_regs *regs) | ||
239 | { | ||
240 | siginfo_t info; | ||
241 | int signo, code; | ||
242 | |||
243 | if (regs->ps == 0) { | ||
244 | if (type == 1) { | ||
245 | const unsigned int *data | ||
246 | = (const unsigned int *) regs->pc; | ||
247 | printk("Kernel bug at %s:%d\n", | ||
248 | (const char *)(data[1] | (long)data[2] << 32), | ||
249 | data[0]); | ||
250 | } | ||
251 | die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), | ||
252 | regs, type, NULL); | ||
253 | } | ||
254 | |||
255 | switch (type) { | ||
256 | case 0: /* breakpoint */ | ||
257 | info.si_signo = SIGTRAP; | ||
258 | info.si_errno = 0; | ||
259 | info.si_code = TRAP_BRKPT; | ||
260 | info.si_trapno = 0; | ||
261 | info.si_addr = (void __user *) regs->pc; | ||
262 | |||
263 | if (ptrace_cancel_bpt(current)) { | ||
264 | regs->pc -= 4; /* make pc point to former bpt */ | ||
265 | } | ||
266 | |||
267 | send_sig_info(SIGTRAP, &info, current); | ||
268 | return; | ||
269 | |||
270 | case 1: /* bugcheck */ | ||
271 | info.si_signo = SIGTRAP; | ||
272 | info.si_errno = 0; | ||
273 | info.si_code = __SI_FAULT; | ||
274 | info.si_addr = (void __user *) regs->pc; | ||
275 | info.si_trapno = 0; | ||
276 | send_sig_info(SIGTRAP, &info, current); | ||
277 | return; | ||
278 | |||
279 | case 2: /* gentrap */ | ||
280 | info.si_addr = (void __user *) regs->pc; | ||
281 | info.si_trapno = regs->r16; | ||
282 | switch ((long) regs->r16) { | ||
283 | case GEN_INTOVF: | ||
284 | signo = SIGFPE; | ||
285 | code = FPE_INTOVF; | ||
286 | break; | ||
287 | case GEN_INTDIV: | ||
288 | signo = SIGFPE; | ||
289 | code = FPE_INTDIV; | ||
290 | break; | ||
291 | case GEN_FLTOVF: | ||
292 | signo = SIGFPE; | ||
293 | code = FPE_FLTOVF; | ||
294 | break; | ||
295 | case GEN_FLTDIV: | ||
296 | signo = SIGFPE; | ||
297 | code = FPE_FLTDIV; | ||
298 | break; | ||
299 | case GEN_FLTUND: | ||
300 | signo = SIGFPE; | ||
301 | code = FPE_FLTUND; | ||
302 | break; | ||
303 | case GEN_FLTINV: | ||
304 | signo = SIGFPE; | ||
305 | code = FPE_FLTINV; | ||
306 | break; | ||
307 | case GEN_FLTINE: | ||
308 | signo = SIGFPE; | ||
309 | code = FPE_FLTRES; | ||
310 | break; | ||
311 | case GEN_ROPRAND: | ||
312 | signo = SIGFPE; | ||
313 | code = __SI_FAULT; | ||
314 | break; | ||
315 | |||
316 | case GEN_DECOVF: | ||
317 | case GEN_DECDIV: | ||
318 | case GEN_DECINV: | ||
319 | case GEN_ASSERTERR: | ||
320 | case GEN_NULPTRERR: | ||
321 | case GEN_STKOVF: | ||
322 | case GEN_STRLENERR: | ||
323 | case GEN_SUBSTRERR: | ||
324 | case GEN_RANGERR: | ||
325 | case GEN_SUBRNG: | ||
326 | case GEN_SUBRNG1: | ||
327 | case GEN_SUBRNG2: | ||
328 | case GEN_SUBRNG3: | ||
329 | case GEN_SUBRNG4: | ||
330 | case GEN_SUBRNG5: | ||
331 | case GEN_SUBRNG6: | ||
332 | case GEN_SUBRNG7: | ||
333 | default: | ||
334 | signo = SIGTRAP; | ||
335 | code = __SI_FAULT; | ||
336 | break; | ||
337 | } | ||
338 | |||
339 | info.si_signo = signo; | ||
340 | info.si_errno = 0; | ||
341 | info.si_code = code; | ||
342 | info.si_addr = (void __user *) regs->pc; | ||
343 | send_sig_info(signo, &info, current); | ||
344 | return; | ||
345 | |||
346 | case 4: /* opDEC */ | ||
347 | if (implver() == IMPLVER_EV4) { | ||
348 | long si_code; | ||
349 | |||
350 | /* The some versions of SRM do not handle | ||
351 | the opDEC properly - they return the PC of the | ||
352 | opDEC fault, not the instruction after as the | ||
353 | Alpha architecture requires. Here we fix it up. | ||
354 | We do this by intentionally causing an opDEC | ||
355 | fault during the boot sequence and testing if | ||
356 | we get the correct PC. If not, we set a flag | ||
357 | to correct it every time through. */ | ||
358 | regs->pc += opDEC_fix; | ||
359 | |||
360 | /* EV4 does not implement anything except normal | ||
361 | rounding. Everything else will come here as | ||
362 | an illegal instruction. Emulate them. */ | ||
363 | si_code = alpha_fp_emul(regs->pc - 4); | ||
364 | if (si_code == 0) | ||
365 | return; | ||
366 | if (si_code > 0) { | ||
367 | info.si_signo = SIGFPE; | ||
368 | info.si_errno = 0; | ||
369 | info.si_code = si_code; | ||
370 | info.si_addr = (void __user *) regs->pc; | ||
371 | send_sig_info(SIGFPE, &info, current); | ||
372 | return; | ||
373 | } | ||
374 | } | ||
375 | break; | ||
376 | |||
377 | case 3: /* FEN fault */ | ||
378 | /* Irritating users can call PAL_clrfen to disable the | ||
379 | FPU for the process. The kernel will then trap in | ||
380 | do_switch_stack and undo_switch_stack when we try | ||
381 | to save and restore the FP registers. | ||
382 | |||
383 | Given that GCC by default generates code that uses the | ||
384 | FP registers, PAL_clrfen is not useful except for DoS | ||
385 | attacks. So turn the bleeding FPU back on and be done | ||
386 | with it. */ | ||
387 | current_thread_info()->pcb.flags |= 1; | ||
388 | __reload_thread(¤t_thread_info()->pcb); | ||
389 | return; | ||
390 | |||
391 | case 5: /* illoc */ | ||
392 | default: /* unexpected instruction-fault type */ | ||
393 | ; | ||
394 | } | ||
395 | |||
396 | info.si_signo = SIGILL; | ||
397 | info.si_errno = 0; | ||
398 | info.si_code = ILL_ILLOPC; | ||
399 | info.si_addr = (void __user *) regs->pc; | ||
400 | send_sig_info(SIGILL, &info, current); | ||
401 | } | ||
402 | |||
403 | /* There is an ifdef in the PALcode in MILO that enables a | ||
404 | "kernel debugging entry point" as an unprivileged call_pal. | ||
405 | |||
406 | We don't want to have anything to do with it, but unfortunately | ||
407 | several versions of MILO included in distributions have it enabled, | ||
408 | and if we don't put something on the entry point we'll oops. */ | ||
409 | |||
410 | asmlinkage void | ||
411 | do_entDbg(struct pt_regs *regs) | ||
412 | { | ||
413 | siginfo_t info; | ||
414 | |||
415 | die_if_kernel("Instruction fault", regs, 0, NULL); | ||
416 | |||
417 | info.si_signo = SIGILL; | ||
418 | info.si_errno = 0; | ||
419 | info.si_code = ILL_ILLOPC; | ||
420 | info.si_addr = (void __user *) regs->pc; | ||
421 | force_sig_info(SIGILL, &info, current); | ||
422 | } | ||
423 | |||
424 | |||
425 | /* | ||
426 | * entUna has a different register layout to be reasonably simple. It | ||
427 | * needs access to all the integer registers (the kernel doesn't use | ||
428 | * fp-regs), and it needs to have them in order for simpler access. | ||
429 | * | ||
430 | * Due to the non-standard register layout (and because we don't want | ||
431 | * to handle floating-point regs), user-mode unaligned accesses are | ||
432 | * handled separately by do_entUnaUser below. | ||
433 | * | ||
434 | * Oh, btw, we don't handle the "gp" register correctly, but if we fault | ||
435 | * on a gp-register unaligned load/store, something is _very_ wrong | ||
436 | * in the kernel anyway.. | ||
437 | */ | ||
438 | struct allregs { | ||
439 | unsigned long regs[32]; | ||
440 | unsigned long ps, pc, gp, a0, a1, a2; | ||
441 | }; | ||
442 | |||
443 | struct unaligned_stat { | ||
444 | unsigned long count, va, pc; | ||
445 | } unaligned[2]; | ||
446 | |||
447 | |||
448 | /* Macro for exception fixup code to access integer registers. */ | ||
449 | #define una_reg(r) (regs.regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)]) | ||
450 | |||
451 | |||
452 | asmlinkage void | ||
453 | do_entUna(void * va, unsigned long opcode, unsigned long reg, | ||
454 | unsigned long a3, unsigned long a4, unsigned long a5, | ||
455 | struct allregs regs) | ||
456 | { | ||
457 | long error, tmp1, tmp2, tmp3, tmp4; | ||
458 | unsigned long pc = regs.pc - 4; | ||
459 | const struct exception_table_entry *fixup; | ||
460 | |||
461 | unaligned[0].count++; | ||
462 | unaligned[0].va = (unsigned long) va; | ||
463 | unaligned[0].pc = pc; | ||
464 | |||
465 | /* We don't want to use the generic get/put unaligned macros as | ||
466 | we want to trap exceptions. Only if we actually get an | ||
467 | exception will we decide whether we should have caught it. */ | ||
468 | |||
469 | switch (opcode) { | ||
470 | case 0x0c: /* ldwu */ | ||
471 | __asm__ __volatile__( | ||
472 | "1: ldq_u %1,0(%3)\n" | ||
473 | "2: ldq_u %2,1(%3)\n" | ||
474 | " extwl %1,%3,%1\n" | ||
475 | " extwh %2,%3,%2\n" | ||
476 | "3:\n" | ||
477 | ".section __ex_table,\"a\"\n" | ||
478 | " .long 1b - .\n" | ||
479 | " lda %1,3b-1b(%0)\n" | ||
480 | " .long 2b - .\n" | ||
481 | " lda %2,3b-2b(%0)\n" | ||
482 | ".previous" | ||
483 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) | ||
484 | : "r"(va), "0"(0)); | ||
485 | if (error) | ||
486 | goto got_exception; | ||
487 | una_reg(reg) = tmp1|tmp2; | ||
488 | return; | ||
489 | |||
490 | case 0x28: /* ldl */ | ||
491 | __asm__ __volatile__( | ||
492 | "1: ldq_u %1,0(%3)\n" | ||
493 | "2: ldq_u %2,3(%3)\n" | ||
494 | " extll %1,%3,%1\n" | ||
495 | " extlh %2,%3,%2\n" | ||
496 | "3:\n" | ||
497 | ".section __ex_table,\"a\"\n" | ||
498 | " .long 1b - .\n" | ||
499 | " lda %1,3b-1b(%0)\n" | ||
500 | " .long 2b - .\n" | ||
501 | " lda %2,3b-2b(%0)\n" | ||
502 | ".previous" | ||
503 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) | ||
504 | : "r"(va), "0"(0)); | ||
505 | if (error) | ||
506 | goto got_exception; | ||
507 | una_reg(reg) = (int)(tmp1|tmp2); | ||
508 | return; | ||
509 | |||
510 | case 0x29: /* ldq */ | ||
511 | __asm__ __volatile__( | ||
512 | "1: ldq_u %1,0(%3)\n" | ||
513 | "2: ldq_u %2,7(%3)\n" | ||
514 | " extql %1,%3,%1\n" | ||
515 | " extqh %2,%3,%2\n" | ||
516 | "3:\n" | ||
517 | ".section __ex_table,\"a\"\n" | ||
518 | " .long 1b - .\n" | ||
519 | " lda %1,3b-1b(%0)\n" | ||
520 | " .long 2b - .\n" | ||
521 | " lda %2,3b-2b(%0)\n" | ||
522 | ".previous" | ||
523 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) | ||
524 | : "r"(va), "0"(0)); | ||
525 | if (error) | ||
526 | goto got_exception; | ||
527 | una_reg(reg) = tmp1|tmp2; | ||
528 | return; | ||
529 | |||
530 | /* Note that the store sequences do not indicate that they change | ||
531 | memory because it _should_ be affecting nothing in this context. | ||
532 | (Otherwise we have other, much larger, problems.) */ | ||
533 | case 0x0d: /* stw */ | ||
534 | __asm__ __volatile__( | ||
535 | "1: ldq_u %2,1(%5)\n" | ||
536 | "2: ldq_u %1,0(%5)\n" | ||
537 | " inswh %6,%5,%4\n" | ||
538 | " inswl %6,%5,%3\n" | ||
539 | " mskwh %2,%5,%2\n" | ||
540 | " mskwl %1,%5,%1\n" | ||
541 | " or %2,%4,%2\n" | ||
542 | " or %1,%3,%1\n" | ||
543 | "3: stq_u %2,1(%5)\n" | ||
544 | "4: stq_u %1,0(%5)\n" | ||
545 | "5:\n" | ||
546 | ".section __ex_table,\"a\"\n" | ||
547 | " .long 1b - .\n" | ||
548 | " lda %2,5b-1b(%0)\n" | ||
549 | " .long 2b - .\n" | ||
550 | " lda %1,5b-2b(%0)\n" | ||
551 | " .long 3b - .\n" | ||
552 | " lda $31,5b-3b(%0)\n" | ||
553 | " .long 4b - .\n" | ||
554 | " lda $31,5b-4b(%0)\n" | ||
555 | ".previous" | ||
556 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), | ||
557 | "=&r"(tmp3), "=&r"(tmp4) | ||
558 | : "r"(va), "r"(una_reg(reg)), "0"(0)); | ||
559 | if (error) | ||
560 | goto got_exception; | ||
561 | return; | ||
562 | |||
563 | case 0x2c: /* stl */ | ||
564 | __asm__ __volatile__( | ||
565 | "1: ldq_u %2,3(%5)\n" | ||
566 | "2: ldq_u %1,0(%5)\n" | ||
567 | " inslh %6,%5,%4\n" | ||
568 | " insll %6,%5,%3\n" | ||
569 | " msklh %2,%5,%2\n" | ||
570 | " mskll %1,%5,%1\n" | ||
571 | " or %2,%4,%2\n" | ||
572 | " or %1,%3,%1\n" | ||
573 | "3: stq_u %2,3(%5)\n" | ||
574 | "4: stq_u %1,0(%5)\n" | ||
575 | "5:\n" | ||
576 | ".section __ex_table,\"a\"\n" | ||
577 | " .long 1b - .\n" | ||
578 | " lda %2,5b-1b(%0)\n" | ||
579 | " .long 2b - .\n" | ||
580 | " lda %1,5b-2b(%0)\n" | ||
581 | " .long 3b - .\n" | ||
582 | " lda $31,5b-3b(%0)\n" | ||
583 | " .long 4b - .\n" | ||
584 | " lda $31,5b-4b(%0)\n" | ||
585 | ".previous" | ||
586 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), | ||
587 | "=&r"(tmp3), "=&r"(tmp4) | ||
588 | : "r"(va), "r"(una_reg(reg)), "0"(0)); | ||
589 | if (error) | ||
590 | goto got_exception; | ||
591 | return; | ||
592 | |||
593 | case 0x2d: /* stq */ | ||
594 | __asm__ __volatile__( | ||
595 | "1: ldq_u %2,7(%5)\n" | ||
596 | "2: ldq_u %1,0(%5)\n" | ||
597 | " insqh %6,%5,%4\n" | ||
598 | " insql %6,%5,%3\n" | ||
599 | " mskqh %2,%5,%2\n" | ||
600 | " mskql %1,%5,%1\n" | ||
601 | " or %2,%4,%2\n" | ||
602 | " or %1,%3,%1\n" | ||
603 | "3: stq_u %2,7(%5)\n" | ||
604 | "4: stq_u %1,0(%5)\n" | ||
605 | "5:\n" | ||
606 | ".section __ex_table,\"a\"\n\t" | ||
607 | " .long 1b - .\n" | ||
608 | " lda %2,5b-1b(%0)\n" | ||
609 | " .long 2b - .\n" | ||
610 | " lda %1,5b-2b(%0)\n" | ||
611 | " .long 3b - .\n" | ||
612 | " lda $31,5b-3b(%0)\n" | ||
613 | " .long 4b - .\n" | ||
614 | " lda $31,5b-4b(%0)\n" | ||
615 | ".previous" | ||
616 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), | ||
617 | "=&r"(tmp3), "=&r"(tmp4) | ||
618 | : "r"(va), "r"(una_reg(reg)), "0"(0)); | ||
619 | if (error) | ||
620 | goto got_exception; | ||
621 | return; | ||
622 | } | ||
623 | |||
624 | lock_kernel(); | ||
625 | printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n", | ||
626 | pc, va, opcode, reg); | ||
627 | do_exit(SIGSEGV); | ||
628 | |||
629 | got_exception: | ||
630 | /* Ok, we caught the exception, but we don't want it. Is there | ||
631 | someone to pass it along to? */ | ||
632 | if ((fixup = search_exception_tables(pc)) != 0) { | ||
633 | unsigned long newpc; | ||
634 | newpc = fixup_exception(una_reg, fixup, pc); | ||
635 | |||
636 | printk("Forwarding unaligned exception at %lx (%lx)\n", | ||
637 | pc, newpc); | ||
638 | |||
639 | (®s)->pc = newpc; | ||
640 | return; | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * Yikes! No one to forward the exception to. | ||
645 | * Since the registers are in a weird format, dump them ourselves. | ||
646 | */ | ||
647 | lock_kernel(); | ||
648 | |||
649 | printk("%s(%d): unhandled unaligned exception\n", | ||
650 | current->comm, current->pid); | ||
651 | |||
652 | printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", | ||
653 | pc, una_reg(26), regs.ps); | ||
654 | printk("r0 = %016lx r1 = %016lx r2 = %016lx\n", | ||
655 | una_reg(0), una_reg(1), una_reg(2)); | ||
656 | printk("r3 = %016lx r4 = %016lx r5 = %016lx\n", | ||
657 | una_reg(3), una_reg(4), una_reg(5)); | ||
658 | printk("r6 = %016lx r7 = %016lx r8 = %016lx\n", | ||
659 | una_reg(6), una_reg(7), una_reg(8)); | ||
660 | printk("r9 = %016lx r10= %016lx r11= %016lx\n", | ||
661 | una_reg(9), una_reg(10), una_reg(11)); | ||
662 | printk("r12= %016lx r13= %016lx r14= %016lx\n", | ||
663 | una_reg(12), una_reg(13), una_reg(14)); | ||
664 | printk("r15= %016lx\n", una_reg(15)); | ||
665 | printk("r16= %016lx r17= %016lx r18= %016lx\n", | ||
666 | una_reg(16), una_reg(17), una_reg(18)); | ||
667 | printk("r19= %016lx r20= %016lx r21= %016lx\n", | ||
668 | una_reg(19), una_reg(20), una_reg(21)); | ||
669 | printk("r22= %016lx r23= %016lx r24= %016lx\n", | ||
670 | una_reg(22), una_reg(23), una_reg(24)); | ||
671 | printk("r25= %016lx r27= %016lx r28= %016lx\n", | ||
672 | una_reg(25), una_reg(27), una_reg(28)); | ||
673 | printk("gp = %016lx sp = %p\n", regs.gp, ®s+1); | ||
674 | |||
675 | dik_show_code((unsigned int *)pc); | ||
676 | dik_show_trace((unsigned long *)(®s+1)); | ||
677 | |||
678 | if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { | ||
679 | printk("die_if_kernel recursion detected.\n"); | ||
680 | local_irq_enable(); | ||
681 | while (1); | ||
682 | } | ||
683 | do_exit(SIGSEGV); | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * Convert an s-floating point value in memory format to the | ||
688 | * corresponding value in register format. The exponent | ||
689 | * needs to be remapped to preserve non-finite values | ||
690 | * (infinities, not-a-numbers, denormals). | ||
691 | */ | ||
692 | static inline unsigned long | ||
693 | s_mem_to_reg (unsigned long s_mem) | ||
694 | { | ||
695 | unsigned long frac = (s_mem >> 0) & 0x7fffff; | ||
696 | unsigned long sign = (s_mem >> 31) & 0x1; | ||
697 | unsigned long exp_msb = (s_mem >> 30) & 0x1; | ||
698 | unsigned long exp_low = (s_mem >> 23) & 0x7f; | ||
699 | unsigned long exp; | ||
700 | |||
701 | exp = (exp_msb << 10) | exp_low; /* common case */ | ||
702 | if (exp_msb) { | ||
703 | if (exp_low == 0x7f) { | ||
704 | exp = 0x7ff; | ||
705 | } | ||
706 | } else { | ||
707 | if (exp_low == 0x00) { | ||
708 | exp = 0x000; | ||
709 | } else { | ||
710 | exp |= (0x7 << 7); | ||
711 | } | ||
712 | } | ||
713 | return (sign << 63) | (exp << 52) | (frac << 29); | ||
714 | } | ||
715 | |||
716 | /* | ||
717 | * Convert an s-floating point value in register format to the | ||
718 | * corresponding value in memory format. | ||
719 | */ | ||
720 | static inline unsigned long | ||
721 | s_reg_to_mem (unsigned long s_reg) | ||
722 | { | ||
723 | return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34); | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * Handle user-level unaligned fault. Handling user-level unaligned | ||
728 | * faults is *extremely* slow and produces nasty messages. A user | ||
729 | * program *should* fix unaligned faults ASAP. | ||
730 | * | ||
731 | * Notice that we have (almost) the regular kernel stack layout here, | ||
732 | * so finding the appropriate registers is a little more difficult | ||
733 | * than in the kernel case. | ||
734 | * | ||
735 | * Finally, we handle regular integer load/stores only. In | ||
736 | * particular, load-linked/store-conditionally and floating point | ||
737 | * load/stores are not supported. The former make no sense with | ||
738 | * unaligned faults (they are guaranteed to fail) and I don't think | ||
739 | * the latter will occur in any decent program. | ||
740 | * | ||
741 | * Sigh. We *do* have to handle some FP operations, because GCC will | ||
742 | * uses them as temporary storage for integer memory to memory copies. | ||
743 | * However, we need to deal with stt/ldt and sts/lds only. | ||
744 | */ | ||
745 | |||
746 | #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \ | ||
747 | | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \ | ||
748 | | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \ | ||
749 | | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */ | ||
750 | |||
751 | #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \ | ||
752 | | 1L << 0x2c | 1L << 0x2d /* stl stq */ \ | ||
753 | | 1L << 0x0d | 1L << 0x0e ) /* stw stb */ | ||
754 | |||
755 | #define R(x) ((size_t) &((struct pt_regs *)0)->x) | ||
756 | |||
757 | static int unauser_reg_offsets[32] = { | ||
758 | R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), | ||
759 | /* r9 ... r15 are stored in front of regs. */ | ||
760 | -56, -48, -40, -32, -24, -16, -8, | ||
761 | R(r16), R(r17), R(r18), | ||
762 | R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), | ||
763 | R(r27), R(r28), R(gp), | ||
764 | 0, 0 | ||
765 | }; | ||
766 | |||
767 | #undef R | ||
768 | |||
769 | asmlinkage void | ||
770 | do_entUnaUser(void __user * va, unsigned long opcode, | ||
771 | unsigned long reg, struct pt_regs *regs) | ||
772 | { | ||
773 | static int cnt = 0; | ||
774 | static long last_time = 0; | ||
775 | |||
776 | unsigned long tmp1, tmp2, tmp3, tmp4; | ||
777 | unsigned long fake_reg, *reg_addr = &fake_reg; | ||
778 | siginfo_t info; | ||
779 | long error; | ||
780 | |||
781 | /* Check the UAC bits to decide what the user wants us to do | ||
782 | with the unaliged access. */ | ||
783 | |||
784 | if (!test_thread_flag (TIF_UAC_NOPRINT)) { | ||
785 | if (cnt >= 5 && jiffies - last_time > 5*HZ) { | ||
786 | cnt = 0; | ||
787 | } | ||
788 | if (++cnt < 5) { | ||
789 | printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", | ||
790 | current->comm, current->pid, | ||
791 | regs->pc - 4, va, opcode, reg); | ||
792 | } | ||
793 | last_time = jiffies; | ||
794 | } | ||
795 | if (test_thread_flag (TIF_UAC_SIGBUS)) | ||
796 | goto give_sigbus; | ||
797 | /* Not sure why you'd want to use this, but... */ | ||
798 | if (test_thread_flag (TIF_UAC_NOFIX)) | ||
799 | return; | ||
800 | |||
801 | /* Don't bother reading ds in the access check since we already | ||
802 | know that this came from the user. Also rely on the fact that | ||
803 | the page at TASK_SIZE is unmapped and so can't be touched anyway. */ | ||
804 | if (!__access_ok((unsigned long)va, 0, USER_DS)) | ||
805 | goto give_sigsegv; | ||
806 | |||
807 | ++unaligned[1].count; | ||
808 | unaligned[1].va = (unsigned long)va; | ||
809 | unaligned[1].pc = regs->pc - 4; | ||
810 | |||
811 | if ((1L << opcode) & OP_INT_MASK) { | ||
812 | /* it's an integer load/store */ | ||
813 | if (reg < 30) { | ||
814 | reg_addr = (unsigned long *) | ||
815 | ((char *)regs + unauser_reg_offsets[reg]); | ||
816 | } else if (reg == 30) { | ||
817 | /* usp in PAL regs */ | ||
818 | fake_reg = rdusp(); | ||
819 | } else { | ||
820 | /* zero "register" */ | ||
821 | fake_reg = 0; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | /* We don't want to use the generic get/put unaligned macros as | ||
826 | we want to trap exceptions. Only if we actually get an | ||
827 | exception will we decide whether we should have caught it. */ | ||
828 | |||
829 | switch (opcode) { | ||
830 | case 0x0c: /* ldwu */ | ||
831 | __asm__ __volatile__( | ||
832 | "1: ldq_u %1,0(%3)\n" | ||
833 | "2: ldq_u %2,1(%3)\n" | ||
834 | " extwl %1,%3,%1\n" | ||
835 | " extwh %2,%3,%2\n" | ||
836 | "3:\n" | ||
837 | ".section __ex_table,\"a\"\n" | ||
838 | " .long 1b - .\n" | ||
839 | " lda %1,3b-1b(%0)\n" | ||
840 | " .long 2b - .\n" | ||
841 | " lda %2,3b-2b(%0)\n" | ||
842 | ".previous" | ||
843 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) | ||
844 | : "r"(va), "0"(0)); | ||
845 | if (error) | ||
846 | goto give_sigsegv; | ||
847 | *reg_addr = tmp1|tmp2; | ||
848 | break; | ||
849 | |||
850 | case 0x22: /* lds */ | ||
851 | __asm__ __volatile__( | ||
852 | "1: ldq_u %1,0(%3)\n" | ||
853 | "2: ldq_u %2,3(%3)\n" | ||
854 | " extll %1,%3,%1\n" | ||
855 | " extlh %2,%3,%2\n" | ||
856 | "3:\n" | ||
857 | ".section __ex_table,\"a\"\n" | ||
858 | " .long 1b - .\n" | ||
859 | " lda %1,3b-1b(%0)\n" | ||
860 | " .long 2b - .\n" | ||
861 | " lda %2,3b-2b(%0)\n" | ||
862 | ".previous" | ||
863 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) | ||
864 | : "r"(va), "0"(0)); | ||
865 | if (error) | ||
866 | goto give_sigsegv; | ||
867 | alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2))); | ||
868 | return; | ||
869 | |||
870 | case 0x23: /* ldt */ | ||
871 | __asm__ __volatile__( | ||
872 | "1: ldq_u %1,0(%3)\n" | ||
873 | "2: ldq_u %2,7(%3)\n" | ||
874 | " extql %1,%3,%1\n" | ||
875 | " extqh %2,%3,%2\n" | ||
876 | "3:\n" | ||
877 | ".section __ex_table,\"a\"\n" | ||
878 | " .long 1b - .\n" | ||
879 | " lda %1,3b-1b(%0)\n" | ||
880 | " .long 2b - .\n" | ||
881 | " lda %2,3b-2b(%0)\n" | ||
882 | ".previous" | ||
883 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) | ||
884 | : "r"(va), "0"(0)); | ||
885 | if (error) | ||
886 | goto give_sigsegv; | ||
887 | alpha_write_fp_reg(reg, tmp1|tmp2); | ||
888 | return; | ||
889 | |||
890 | case 0x28: /* ldl */ | ||
891 | __asm__ __volatile__( | ||
892 | "1: ldq_u %1,0(%3)\n" | ||
893 | "2: ldq_u %2,3(%3)\n" | ||
894 | " extll %1,%3,%1\n" | ||
895 | " extlh %2,%3,%2\n" | ||
896 | "3:\n" | ||
897 | ".section __ex_table,\"a\"\n" | ||
898 | " .long 1b - .\n" | ||
899 | " lda %1,3b-1b(%0)\n" | ||
900 | " .long 2b - .\n" | ||
901 | " lda %2,3b-2b(%0)\n" | ||
902 | ".previous" | ||
903 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) | ||
904 | : "r"(va), "0"(0)); | ||
905 | if (error) | ||
906 | goto give_sigsegv; | ||
907 | *reg_addr = (int)(tmp1|tmp2); | ||
908 | break; | ||
909 | |||
910 | case 0x29: /* ldq */ | ||
911 | __asm__ __volatile__( | ||
912 | "1: ldq_u %1,0(%3)\n" | ||
913 | "2: ldq_u %2,7(%3)\n" | ||
914 | " extql %1,%3,%1\n" | ||
915 | " extqh %2,%3,%2\n" | ||
916 | "3:\n" | ||
917 | ".section __ex_table,\"a\"\n" | ||
918 | " .long 1b - .\n" | ||
919 | " lda %1,3b-1b(%0)\n" | ||
920 | " .long 2b - .\n" | ||
921 | " lda %2,3b-2b(%0)\n" | ||
922 | ".previous" | ||
923 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) | ||
924 | : "r"(va), "0"(0)); | ||
925 | if (error) | ||
926 | goto give_sigsegv; | ||
927 | *reg_addr = tmp1|tmp2; | ||
928 | break; | ||
929 | |||
930 | /* Note that the store sequences do not indicate that they change | ||
931 | memory because it _should_ be affecting nothing in this context. | ||
932 | (Otherwise we have other, much larger, problems.) */ | ||
933 | case 0x0d: /* stw */ | ||
934 | __asm__ __volatile__( | ||
935 | "1: ldq_u %2,1(%5)\n" | ||
936 | "2: ldq_u %1,0(%5)\n" | ||
937 | " inswh %6,%5,%4\n" | ||
938 | " inswl %6,%5,%3\n" | ||
939 | " mskwh %2,%5,%2\n" | ||
940 | " mskwl %1,%5,%1\n" | ||
941 | " or %2,%4,%2\n" | ||
942 | " or %1,%3,%1\n" | ||
943 | "3: stq_u %2,1(%5)\n" | ||
944 | "4: stq_u %1,0(%5)\n" | ||
945 | "5:\n" | ||
946 | ".section __ex_table,\"a\"\n" | ||
947 | " .long 1b - .\n" | ||
948 | " lda %2,5b-1b(%0)\n" | ||
949 | " .long 2b - .\n" | ||
950 | " lda %1,5b-2b(%0)\n" | ||
951 | " .long 3b - .\n" | ||
952 | " lda $31,5b-3b(%0)\n" | ||
953 | " .long 4b - .\n" | ||
954 | " lda $31,5b-4b(%0)\n" | ||
955 | ".previous" | ||
956 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), | ||
957 | "=&r"(tmp3), "=&r"(tmp4) | ||
958 | : "r"(va), "r"(*reg_addr), "0"(0)); | ||
959 | if (error) | ||
960 | goto give_sigsegv; | ||
961 | return; | ||
962 | |||
963 | case 0x26: /* sts */ | ||
964 | fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg)); | ||
965 | /* FALLTHRU */ | ||
966 | |||
967 | case 0x2c: /* stl */ | ||
968 | __asm__ __volatile__( | ||
969 | "1: ldq_u %2,3(%5)\n" | ||
970 | "2: ldq_u %1,0(%5)\n" | ||
971 | " inslh %6,%5,%4\n" | ||
972 | " insll %6,%5,%3\n" | ||
973 | " msklh %2,%5,%2\n" | ||
974 | " mskll %1,%5,%1\n" | ||
975 | " or %2,%4,%2\n" | ||
976 | " or %1,%3,%1\n" | ||
977 | "3: stq_u %2,3(%5)\n" | ||
978 | "4: stq_u %1,0(%5)\n" | ||
979 | "5:\n" | ||
980 | ".section __ex_table,\"a\"\n" | ||
981 | " .long 1b - .\n" | ||
982 | " lda %2,5b-1b(%0)\n" | ||
983 | " .long 2b - .\n" | ||
984 | " lda %1,5b-2b(%0)\n" | ||
985 | " .long 3b - .\n" | ||
986 | " lda $31,5b-3b(%0)\n" | ||
987 | " .long 4b - .\n" | ||
988 | " lda $31,5b-4b(%0)\n" | ||
989 | ".previous" | ||
990 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), | ||
991 | "=&r"(tmp3), "=&r"(tmp4) | ||
992 | : "r"(va), "r"(*reg_addr), "0"(0)); | ||
993 | if (error) | ||
994 | goto give_sigsegv; | ||
995 | return; | ||
996 | |||
997 | case 0x27: /* stt */ | ||
998 | fake_reg = alpha_read_fp_reg(reg); | ||
999 | /* FALLTHRU */ | ||
1000 | |||
1001 | case 0x2d: /* stq */ | ||
1002 | __asm__ __volatile__( | ||
1003 | "1: ldq_u %2,7(%5)\n" | ||
1004 | "2: ldq_u %1,0(%5)\n" | ||
1005 | " insqh %6,%5,%4\n" | ||
1006 | " insql %6,%5,%3\n" | ||
1007 | " mskqh %2,%5,%2\n" | ||
1008 | " mskql %1,%5,%1\n" | ||
1009 | " or %2,%4,%2\n" | ||
1010 | " or %1,%3,%1\n" | ||
1011 | "3: stq_u %2,7(%5)\n" | ||
1012 | "4: stq_u %1,0(%5)\n" | ||
1013 | "5:\n" | ||
1014 | ".section __ex_table,\"a\"\n\t" | ||
1015 | " .long 1b - .\n" | ||
1016 | " lda %2,5b-1b(%0)\n" | ||
1017 | " .long 2b - .\n" | ||
1018 | " lda %1,5b-2b(%0)\n" | ||
1019 | " .long 3b - .\n" | ||
1020 | " lda $31,5b-3b(%0)\n" | ||
1021 | " .long 4b - .\n" | ||
1022 | " lda $31,5b-4b(%0)\n" | ||
1023 | ".previous" | ||
1024 | : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), | ||
1025 | "=&r"(tmp3), "=&r"(tmp4) | ||
1026 | : "r"(va), "r"(*reg_addr), "0"(0)); | ||
1027 | if (error) | ||
1028 | goto give_sigsegv; | ||
1029 | return; | ||
1030 | |||
1031 | default: | ||
1032 | /* What instruction were you trying to use, exactly? */ | ||
1033 | goto give_sigbus; | ||
1034 | } | ||
1035 | |||
1036 | /* Only integer loads should get here; everyone else returns early. */ | ||
1037 | if (reg == 30) | ||
1038 | wrusp(fake_reg); | ||
1039 | return; | ||
1040 | |||
1041 | give_sigsegv: | ||
1042 | regs->pc -= 4; /* make pc point to faulting insn */ | ||
1043 | info.si_signo = SIGSEGV; | ||
1044 | info.si_errno = 0; | ||
1045 | |||
1046 | /* We need to replicate some of the logic in mm/fault.c, | ||
1047 | since we don't have access to the fault code in the | ||
1048 | exception handling return path. */ | ||
1049 | if (!__access_ok((unsigned long)va, 0, USER_DS)) | ||
1050 | info.si_code = SEGV_ACCERR; | ||
1051 | else { | ||
1052 | struct mm_struct *mm = current->mm; | ||
1053 | down_read(&mm->mmap_sem); | ||
1054 | if (find_vma(mm, (unsigned long)va)) | ||
1055 | info.si_code = SEGV_ACCERR; | ||
1056 | else | ||
1057 | info.si_code = SEGV_MAPERR; | ||
1058 | up_read(&mm->mmap_sem); | ||
1059 | } | ||
1060 | info.si_addr = va; | ||
1061 | send_sig_info(SIGSEGV, &info, current); | ||
1062 | return; | ||
1063 | |||
1064 | give_sigbus: | ||
1065 | regs->pc -= 4; | ||
1066 | info.si_signo = SIGBUS; | ||
1067 | info.si_errno = 0; | ||
1068 | info.si_code = BUS_ADRALN; | ||
1069 | info.si_addr = va; | ||
1070 | send_sig_info(SIGBUS, &info, current); | ||
1071 | return; | ||
1072 | } | ||
1073 | |||
1074 | void __init | ||
1075 | trap_init(void) | ||
1076 | { | ||
1077 | /* Tell PAL-code what global pointer we want in the kernel. */ | ||
1078 | register unsigned long gptr __asm__("$29"); | ||
1079 | wrkgp(gptr); | ||
1080 | |||
1081 | /* Hack for Multia (UDB) and JENSEN: some of their SRMs have | ||
1082 | a bug in the handling of the opDEC fault. Fix it up if so. */ | ||
1083 | if (implver() == IMPLVER_EV4) | ||
1084 | opDEC_check(); | ||
1085 | |||
1086 | wrent(entArith, 1); | ||
1087 | wrent(entMM, 2); | ||
1088 | wrent(entIF, 3); | ||
1089 | wrent(entUna, 4); | ||
1090 | wrent(entSys, 5); | ||
1091 | wrent(entDbg, 6); | ||
1092 | } | ||
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..0922e0785ddb --- /dev/null +++ b/arch/alpha/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,149 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <asm-generic/vmlinux.lds.h> | ||
3 | |||
4 | OUTPUT_FORMAT("elf64-alpha") | ||
5 | OUTPUT_ARCH(alpha) | ||
6 | ENTRY(__start) | ||
7 | PHDRS { kernel PT_LOAD ; } | ||
8 | jiffies = jiffies_64; | ||
9 | SECTIONS | ||
10 | { | ||
11 | #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS | ||
12 | . = 0xfffffc0000310000; | ||
13 | #else | ||
14 | . = 0xfffffc0001010000; | ||
15 | #endif | ||
16 | |||
17 | _text = .; /* Text and read-only data */ | ||
18 | .text : { | ||
19 | *(.text) | ||
20 | SCHED_TEXT | ||
21 | LOCK_TEXT | ||
22 | *(.fixup) | ||
23 | *(.gnu.warning) | ||
24 | } :kernel | ||
25 | _etext = .; /* End of text section */ | ||
26 | |||
27 | . = ALIGN(16); | ||
28 | __start___ex_table = .; /* Exception table */ | ||
29 | __ex_table : { *(__ex_table) } | ||
30 | __stop___ex_table = .; | ||
31 | |||
32 | RODATA | ||
33 | |||
34 | /* Will be freed after init */ | ||
35 | . = ALIGN(8192); /* Init code and data */ | ||
36 | __init_begin = .; | ||
37 | .init.text : { | ||
38 | _sinittext = .; | ||
39 | *(.init.text) | ||
40 | _einittext = .; | ||
41 | } | ||
42 | .init.data : { *(.init.data) } | ||
43 | |||
44 | . = ALIGN(16); | ||
45 | __setup_start = .; | ||
46 | .init.setup : { *(.init.setup) } | ||
47 | __setup_end = .; | ||
48 | |||
49 | . = ALIGN(8); | ||
50 | __initcall_start = .; | ||
51 | .initcall.init : { | ||
52 | *(.initcall1.init) | ||
53 | *(.initcall2.init) | ||
54 | *(.initcall3.init) | ||
55 | *(.initcall4.init) | ||
56 | *(.initcall5.init) | ||
57 | *(.initcall6.init) | ||
58 | *(.initcall7.init) | ||
59 | } | ||
60 | __initcall_end = .; | ||
61 | |||
62 | . = ALIGN(8192); | ||
63 | __initramfs_start = .; | ||
64 | .init.ramfs : { *(.init.ramfs) } | ||
65 | __initramfs_end = .; | ||
66 | |||
67 | . = ALIGN(8); | ||
68 | .con_initcall.init : { | ||
69 | __con_initcall_start = .; | ||
70 | *(.con_initcall.init) | ||
71 | __con_initcall_end = .; | ||
72 | } | ||
73 | |||
74 | . = ALIGN(8); | ||
75 | SECURITY_INIT | ||
76 | |||
77 | . = ALIGN(64); | ||
78 | __per_cpu_start = .; | ||
79 | .data.percpu : { *(.data.percpu) } | ||
80 | __per_cpu_end = .; | ||
81 | |||
82 | . = ALIGN(2*8192); | ||
83 | __init_end = .; | ||
84 | /* Freed after init ends here */ | ||
85 | |||
86 | /* Note 2 page alignment above. */ | ||
87 | .data.init_thread : { *(.data.init_thread) } | ||
88 | |||
89 | . = ALIGN(8192); | ||
90 | .data.page_aligned : { *(.data.page_aligned) } | ||
91 | |||
92 | . = ALIGN(64); | ||
93 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | ||
94 | |||
95 | _data = .; | ||
96 | .data : { /* Data */ | ||
97 | *(.data) | ||
98 | CONSTRUCTORS | ||
99 | } | ||
100 | |||
101 | .got : { *(.got) } | ||
102 | .sdata : { *(.sdata) } | ||
103 | |||
104 | _edata = .; /* End of data section */ | ||
105 | |||
106 | __bss_start = .; | ||
107 | .sbss : { *(.sbss) *(.scommon) } | ||
108 | .bss : { *(.bss) *(COMMON) } | ||
109 | __bss_stop = .; | ||
110 | |||
111 | _end = .; | ||
112 | |||
113 | /* Sections to be discarded */ | ||
114 | /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) } | ||
115 | |||
116 | .mdebug 0 : { *(.mdebug) } | ||
117 | .note 0 : { *(.note) } | ||
118 | .comment 0 : { *(.comment) } | ||
119 | |||
120 | /* Stabs debugging sections */ | ||
121 | .stab 0 : { *(.stab) } | ||
122 | .stabstr 0 : { *(.stabstr) } | ||
123 | .stab.excl 0 : { *(.stab.excl) } | ||
124 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
125 | .stab.index 0 : { *(.stab.index) } | ||
126 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
127 | /* DWARF 1 */ | ||
128 | .debug 0 : { *(.debug) } | ||
129 | .line 0 : { *(.line) } | ||
130 | /* GNU DWARF 1 extensions */ | ||
131 | .debug_srcinfo 0 : { *(.debug_srcinfo) } | ||
132 | .debug_sfnames 0 : { *(.debug_sfnames) } | ||
133 | /* DWARF 1.1 and DWARF 2 */ | ||
134 | .debug_aranges 0 : { *(.debug_aranges) } | ||
135 | .debug_pubnames 0 : { *(.debug_pubnames) } | ||
136 | /* DWARF 2 */ | ||
137 | .debug_info 0 : { *(.debug_info) } | ||
138 | .debug_abbrev 0 : { *(.debug_abbrev) } | ||
139 | .debug_line 0 : { *(.debug_line) } | ||
140 | .debug_frame 0 : { *(.debug_frame) } | ||
141 | .debug_str 0 : { *(.debug_str) } | ||
142 | .debug_loc 0 : { *(.debug_loc) } | ||
143 | .debug_macinfo 0 : { *(.debug_macinfo) } | ||
144 | /* SGI/MIPS DWARF 2 extensions */ | ||
145 | .debug_weaknames 0 : { *(.debug_weaknames) } | ||
146 | .debug_funcnames 0 : { *(.debug_funcnames) } | ||
147 | .debug_typenames 0 : { *(.debug_typenames) } | ||
148 | .debug_varnames 0 : { *(.debug_varnames) } | ||
149 | } | ||