diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ia64/machvec.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ia64/machvec.h')
-rw-r--r-- | include/asm-ia64/machvec.h | 390 |
1 files changed, 390 insertions, 0 deletions
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h new file mode 100644 index 000000000000..79e89a7db566 --- /dev/null +++ b/include/asm-ia64/machvec.h | |||
@@ -0,0 +1,390 @@ | |||
1 | /* | ||
2 | * Machine vector for IA-64. | ||
3 | * | ||
4 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
5 | * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> | ||
6 | * Copyright (C) Vijay Chander <vijay@engr.sgi.com> | ||
7 | * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co. | ||
8 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
9 | */ | ||
10 | #ifndef _ASM_IA64_MACHVEC_H | ||
11 | #define _ASM_IA64_MACHVEC_H | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/types.h> | ||
15 | |||
16 | /* forward declarations: */ | ||
17 | struct device; | ||
18 | struct pt_regs; | ||
19 | struct scatterlist; | ||
20 | struct page; | ||
21 | struct mm_struct; | ||
22 | struct pci_bus; | ||
23 | |||
24 | typedef void ia64_mv_setup_t (char **); | ||
25 | typedef void ia64_mv_cpu_init_t (void); | ||
26 | typedef void ia64_mv_irq_init_t (void); | ||
27 | typedef void ia64_mv_send_ipi_t (int, int, int, int); | ||
28 | typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *); | ||
29 | typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long); | ||
30 | typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *); | ||
31 | typedef unsigned int ia64_mv_local_vector_to_irq (u8); | ||
32 | typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); | ||
33 | typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val, | ||
34 | u8 size); | ||
35 | typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, | ||
36 | u8 size); | ||
37 | |||
38 | /* DMA-mapping interface: */ | ||
39 | typedef void ia64_mv_dma_init (void); | ||
40 | typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int); | ||
41 | typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); | ||
42 | typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); | ||
43 | typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); | ||
44 | typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); | ||
45 | typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); | ||
46 | typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int); | ||
47 | typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); | ||
48 | typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); | ||
49 | typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); | ||
50 | typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); | ||
51 | typedef int ia64_mv_dma_supported (struct device *, u64); | ||
52 | |||
53 | /* | ||
54 | * WARNING: The legacy I/O space is _architected_. Platforms are | ||
55 | * expected to follow this architected model (see Section 10.7 in the | ||
56 | * IA-64 Architecture Software Developer's Manual). Unfortunately, | ||
57 | * some broken machines do not follow that model, which is why we have | ||
58 | * to make the inX/outX operations part of the machine vector. | ||
59 | * Platform designers should follow the architected model whenever | ||
60 | * possible. | ||
61 | */ | ||
62 | typedef unsigned int ia64_mv_inb_t (unsigned long); | ||
63 | typedef unsigned int ia64_mv_inw_t (unsigned long); | ||
64 | typedef unsigned int ia64_mv_inl_t (unsigned long); | ||
65 | typedef void ia64_mv_outb_t (unsigned char, unsigned long); | ||
66 | typedef void ia64_mv_outw_t (unsigned short, unsigned long); | ||
67 | typedef void ia64_mv_outl_t (unsigned int, unsigned long); | ||
68 | typedef void ia64_mv_mmiowb_t (void); | ||
69 | typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *); | ||
70 | typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *); | ||
71 | typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *); | ||
72 | typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *); | ||
73 | typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *); | ||
74 | typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *); | ||
75 | typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *); | ||
76 | typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *); | ||
77 | |||
78 | static inline void | ||
79 | machvec_noop (void) | ||
80 | { | ||
81 | } | ||
82 | |||
83 | static inline void | ||
84 | machvec_noop_mm (struct mm_struct *mm) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | extern void machvec_setup (char **); | ||
89 | extern void machvec_timer_interrupt (int, void *, struct pt_regs *); | ||
90 | extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); | ||
91 | extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); | ||
92 | extern void machvec_tlb_migrate_finish (struct mm_struct *); | ||
93 | |||
94 | # if defined (CONFIG_IA64_HP_SIM) | ||
95 | # include <asm/machvec_hpsim.h> | ||
96 | # elif defined (CONFIG_IA64_DIG) | ||
97 | # include <asm/machvec_dig.h> | ||
98 | # elif defined (CONFIG_IA64_HP_ZX1) | ||
99 | # include <asm/machvec_hpzx1.h> | ||
100 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) | ||
101 | # include <asm/machvec_hpzx1_swiotlb.h> | ||
102 | # elif defined (CONFIG_IA64_SGI_SN2) | ||
103 | # include <asm/machvec_sn2.h> | ||
104 | # elif defined (CONFIG_IA64_GENERIC) | ||
105 | |||
106 | # ifdef MACHVEC_PLATFORM_HEADER | ||
107 | # include MACHVEC_PLATFORM_HEADER | ||
108 | # else | ||
109 | # define platform_name ia64_mv.name | ||
110 | # define platform_setup ia64_mv.setup | ||
111 | # define platform_cpu_init ia64_mv.cpu_init | ||
112 | # define platform_irq_init ia64_mv.irq_init | ||
113 | # define platform_send_ipi ia64_mv.send_ipi | ||
114 | # define platform_timer_interrupt ia64_mv.timer_interrupt | ||
115 | # define platform_global_tlb_purge ia64_mv.global_tlb_purge | ||
116 | # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish | ||
117 | # define platform_dma_init ia64_mv.dma_init | ||
118 | # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent | ||
119 | # define platform_dma_free_coherent ia64_mv.dma_free_coherent | ||
120 | # define platform_dma_map_single ia64_mv.dma_map_single | ||
121 | # define platform_dma_unmap_single ia64_mv.dma_unmap_single | ||
122 | # define platform_dma_map_sg ia64_mv.dma_map_sg | ||
123 | # define platform_dma_unmap_sg ia64_mv.dma_unmap_sg | ||
124 | # define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu | ||
125 | # define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu | ||
126 | # define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device | ||
127 | # define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device | ||
128 | # define platform_dma_mapping_error ia64_mv.dma_mapping_error | ||
129 | # define platform_dma_supported ia64_mv.dma_supported | ||
130 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq | ||
131 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem | ||
132 | # define platform_pci_legacy_read ia64_mv.pci_legacy_read | ||
133 | # define platform_pci_legacy_write ia64_mv.pci_legacy_write | ||
134 | # define platform_inb ia64_mv.inb | ||
135 | # define platform_inw ia64_mv.inw | ||
136 | # define platform_inl ia64_mv.inl | ||
137 | # define platform_outb ia64_mv.outb | ||
138 | # define platform_outw ia64_mv.outw | ||
139 | # define platform_outl ia64_mv.outl | ||
140 | # define platform_mmiowb ia64_mv.mmiowb | ||
141 | # define platform_readb ia64_mv.readb | ||
142 | # define platform_readw ia64_mv.readw | ||
143 | # define platform_readl ia64_mv.readl | ||
144 | # define platform_readq ia64_mv.readq | ||
145 | # define platform_readb_relaxed ia64_mv.readb_relaxed | ||
146 | # define platform_readw_relaxed ia64_mv.readw_relaxed | ||
147 | # define platform_readl_relaxed ia64_mv.readl_relaxed | ||
148 | # define platform_readq_relaxed ia64_mv.readq_relaxed | ||
149 | # endif | ||
150 | |||
151 | /* __attribute__((__aligned__(16))) is required to make size of the | ||
152 | * structure multiple of 16 bytes. | ||
153 | * This will fillup the holes created because of section 3.3.1 in | ||
154 | * Software Conventions guide. | ||
155 | */ | ||
156 | struct ia64_machine_vector { | ||
157 | const char *name; | ||
158 | ia64_mv_setup_t *setup; | ||
159 | ia64_mv_cpu_init_t *cpu_init; | ||
160 | ia64_mv_irq_init_t *irq_init; | ||
161 | ia64_mv_send_ipi_t *send_ipi; | ||
162 | ia64_mv_timer_interrupt_t *timer_interrupt; | ||
163 | ia64_mv_global_tlb_purge_t *global_tlb_purge; | ||
164 | ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; | ||
165 | ia64_mv_dma_init *dma_init; | ||
166 | ia64_mv_dma_alloc_coherent *dma_alloc_coherent; | ||
167 | ia64_mv_dma_free_coherent *dma_free_coherent; | ||
168 | ia64_mv_dma_map_single *dma_map_single; | ||
169 | ia64_mv_dma_unmap_single *dma_unmap_single; | ||
170 | ia64_mv_dma_map_sg *dma_map_sg; | ||
171 | ia64_mv_dma_unmap_sg *dma_unmap_sg; | ||
172 | ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; | ||
173 | ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; | ||
174 | ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; | ||
175 | ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; | ||
176 | ia64_mv_dma_mapping_error *dma_mapping_error; | ||
177 | ia64_mv_dma_supported *dma_supported; | ||
178 | ia64_mv_local_vector_to_irq *local_vector_to_irq; | ||
179 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; | ||
180 | ia64_mv_pci_legacy_read_t *pci_legacy_read; | ||
181 | ia64_mv_pci_legacy_write_t *pci_legacy_write; | ||
182 | ia64_mv_inb_t *inb; | ||
183 | ia64_mv_inw_t *inw; | ||
184 | ia64_mv_inl_t *inl; | ||
185 | ia64_mv_outb_t *outb; | ||
186 | ia64_mv_outw_t *outw; | ||
187 | ia64_mv_outl_t *outl; | ||
188 | ia64_mv_mmiowb_t *mmiowb; | ||
189 | ia64_mv_readb_t *readb; | ||
190 | ia64_mv_readw_t *readw; | ||
191 | ia64_mv_readl_t *readl; | ||
192 | ia64_mv_readq_t *readq; | ||
193 | ia64_mv_readb_relaxed_t *readb_relaxed; | ||
194 | ia64_mv_readw_relaxed_t *readw_relaxed; | ||
195 | ia64_mv_readl_relaxed_t *readl_relaxed; | ||
196 | ia64_mv_readq_relaxed_t *readq_relaxed; | ||
197 | } __attribute__((__aligned__(16))); /* align attrib? see above comment */ | ||
198 | |||
199 | #define MACHVEC_INIT(name) \ | ||
200 | { \ | ||
201 | #name, \ | ||
202 | platform_setup, \ | ||
203 | platform_cpu_init, \ | ||
204 | platform_irq_init, \ | ||
205 | platform_send_ipi, \ | ||
206 | platform_timer_interrupt, \ | ||
207 | platform_global_tlb_purge, \ | ||
208 | platform_tlb_migrate_finish, \ | ||
209 | platform_dma_init, \ | ||
210 | platform_dma_alloc_coherent, \ | ||
211 | platform_dma_free_coherent, \ | ||
212 | platform_dma_map_single, \ | ||
213 | platform_dma_unmap_single, \ | ||
214 | platform_dma_map_sg, \ | ||
215 | platform_dma_unmap_sg, \ | ||
216 | platform_dma_sync_single_for_cpu, \ | ||
217 | platform_dma_sync_sg_for_cpu, \ | ||
218 | platform_dma_sync_single_for_device, \ | ||
219 | platform_dma_sync_sg_for_device, \ | ||
220 | platform_dma_mapping_error, \ | ||
221 | platform_dma_supported, \ | ||
222 | platform_local_vector_to_irq, \ | ||
223 | platform_pci_get_legacy_mem, \ | ||
224 | platform_pci_legacy_read, \ | ||
225 | platform_pci_legacy_write, \ | ||
226 | platform_inb, \ | ||
227 | platform_inw, \ | ||
228 | platform_inl, \ | ||
229 | platform_outb, \ | ||
230 | platform_outw, \ | ||
231 | platform_outl, \ | ||
232 | platform_mmiowb, \ | ||
233 | platform_readb, \ | ||
234 | platform_readw, \ | ||
235 | platform_readl, \ | ||
236 | platform_readq, \ | ||
237 | platform_readb_relaxed, \ | ||
238 | platform_readw_relaxed, \ | ||
239 | platform_readl_relaxed, \ | ||
240 | platform_readq_relaxed, \ | ||
241 | } | ||
242 | |||
243 | extern struct ia64_machine_vector ia64_mv; | ||
244 | extern void machvec_init (const char *name); | ||
245 | |||
246 | # else | ||
247 | # error Unknown configuration. Update asm-ia64/machvec.h. | ||
248 | # endif /* CONFIG_IA64_GENERIC */ | ||
249 | |||
250 | /* | ||
251 | * Declare default routines which aren't declared anywhere else: | ||
252 | */ | ||
253 | extern ia64_mv_dma_init swiotlb_init; | ||
254 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; | ||
255 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; | ||
256 | extern ia64_mv_dma_map_single swiotlb_map_single; | ||
257 | extern ia64_mv_dma_unmap_single swiotlb_unmap_single; | ||
258 | extern ia64_mv_dma_map_sg swiotlb_map_sg; | ||
259 | extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; | ||
260 | extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; | ||
261 | extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; | ||
262 | extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; | ||
263 | extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device; | ||
264 | extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; | ||
265 | extern ia64_mv_dma_supported swiotlb_dma_supported; | ||
266 | |||
267 | /* | ||
268 | * Define default versions so we can extend machvec for new platforms without having | ||
269 | * to update the machvec files for all existing platforms. | ||
270 | */ | ||
271 | #ifndef platform_setup | ||
272 | # define platform_setup machvec_setup | ||
273 | #endif | ||
274 | #ifndef platform_cpu_init | ||
275 | # define platform_cpu_init machvec_noop | ||
276 | #endif | ||
277 | #ifndef platform_irq_init | ||
278 | # define platform_irq_init machvec_noop | ||
279 | #endif | ||
280 | |||
281 | #ifndef platform_send_ipi | ||
282 | # define platform_send_ipi ia64_send_ipi /* default to architected version */ | ||
283 | #endif | ||
284 | #ifndef platform_timer_interrupt | ||
285 | # define platform_timer_interrupt machvec_timer_interrupt | ||
286 | #endif | ||
287 | #ifndef platform_global_tlb_purge | ||
288 | # define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ | ||
289 | #endif | ||
290 | #ifndef platform_tlb_migrate_finish | ||
291 | # define platform_tlb_migrate_finish machvec_noop_mm | ||
292 | #endif | ||
293 | #ifndef platform_dma_init | ||
294 | # define platform_dma_init swiotlb_init | ||
295 | #endif | ||
296 | #ifndef platform_dma_alloc_coherent | ||
297 | # define platform_dma_alloc_coherent swiotlb_alloc_coherent | ||
298 | #endif | ||
299 | #ifndef platform_dma_free_coherent | ||
300 | # define platform_dma_free_coherent swiotlb_free_coherent | ||
301 | #endif | ||
302 | #ifndef platform_dma_map_single | ||
303 | # define platform_dma_map_single swiotlb_map_single | ||
304 | #endif | ||
305 | #ifndef platform_dma_unmap_single | ||
306 | # define platform_dma_unmap_single swiotlb_unmap_single | ||
307 | #endif | ||
308 | #ifndef platform_dma_map_sg | ||
309 | # define platform_dma_map_sg swiotlb_map_sg | ||
310 | #endif | ||
311 | #ifndef platform_dma_unmap_sg | ||
312 | # define platform_dma_unmap_sg swiotlb_unmap_sg | ||
313 | #endif | ||
314 | #ifndef platform_dma_sync_single_for_cpu | ||
315 | # define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu | ||
316 | #endif | ||
317 | #ifndef platform_dma_sync_sg_for_cpu | ||
318 | # define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu | ||
319 | #endif | ||
320 | #ifndef platform_dma_sync_single_for_device | ||
321 | # define platform_dma_sync_single_for_device swiotlb_sync_single_for_device | ||
322 | #endif | ||
323 | #ifndef platform_dma_sync_sg_for_device | ||
324 | # define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device | ||
325 | #endif | ||
326 | #ifndef platform_dma_mapping_error | ||
327 | # define platform_dma_mapping_error swiotlb_dma_mapping_error | ||
328 | #endif | ||
329 | #ifndef platform_dma_supported | ||
330 | # define platform_dma_supported swiotlb_dma_supported | ||
331 | #endif | ||
332 | #ifndef platform_local_vector_to_irq | ||
333 | # define platform_local_vector_to_irq __ia64_local_vector_to_irq | ||
334 | #endif | ||
335 | #ifndef platform_pci_get_legacy_mem | ||
336 | # define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem | ||
337 | #endif | ||
338 | #ifndef platform_pci_legacy_read | ||
339 | # define platform_pci_legacy_read ia64_pci_legacy_read | ||
340 | #endif | ||
341 | #ifndef platform_pci_legacy_write | ||
342 | # define platform_pci_legacy_write ia64_pci_legacy_write | ||
343 | #endif | ||
344 | #ifndef platform_inb | ||
345 | # define platform_inb __ia64_inb | ||
346 | #endif | ||
347 | #ifndef platform_inw | ||
348 | # define platform_inw __ia64_inw | ||
349 | #endif | ||
350 | #ifndef platform_inl | ||
351 | # define platform_inl __ia64_inl | ||
352 | #endif | ||
353 | #ifndef platform_outb | ||
354 | # define platform_outb __ia64_outb | ||
355 | #endif | ||
356 | #ifndef platform_outw | ||
357 | # define platform_outw __ia64_outw | ||
358 | #endif | ||
359 | #ifndef platform_outl | ||
360 | # define platform_outl __ia64_outl | ||
361 | #endif | ||
362 | #ifndef platform_mmiowb | ||
363 | # define platform_mmiowb __ia64_mmiowb | ||
364 | #endif | ||
365 | #ifndef platform_readb | ||
366 | # define platform_readb __ia64_readb | ||
367 | #endif | ||
368 | #ifndef platform_readw | ||
369 | # define platform_readw __ia64_readw | ||
370 | #endif | ||
371 | #ifndef platform_readl | ||
372 | # define platform_readl __ia64_readl | ||
373 | #endif | ||
374 | #ifndef platform_readq | ||
375 | # define platform_readq __ia64_readq | ||
376 | #endif | ||
377 | #ifndef platform_readb_relaxed | ||
378 | # define platform_readb_relaxed __ia64_readb_relaxed | ||
379 | #endif | ||
380 | #ifndef platform_readw_relaxed | ||
381 | # define platform_readw_relaxed __ia64_readw_relaxed | ||
382 | #endif | ||
383 | #ifndef platform_readl_relaxed | ||
384 | # define platform_readl_relaxed __ia64_readl_relaxed | ||
385 | #endif | ||
386 | #ifndef platform_readq_relaxed | ||
387 | # define platform_readq_relaxed __ia64_readq_relaxed | ||
388 | #endif | ||
389 | |||
390 | #endif /* _ASM_IA64_MACHVEC_H */ | ||