diff options
Diffstat (limited to 'arch/ia64/hp')
-rw-r--r-- | arch/ia64/hp/common/Makefile | 10 | ||||
-rw-r--r-- | arch/ia64/hp/common/hwsw_iommu.c | 185 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 2121 | ||||
-rw-r--r-- | arch/ia64/hp/sim/Kconfig | 20 | ||||
-rw-r--r-- | arch/ia64/hp/sim/Makefile | 16 | ||||
-rw-r--r-- | arch/ia64/hp/sim/boot/Makefile | 37 | ||||
-rw-r--r-- | arch/ia64/hp/sim/boot/boot_head.S | 144 | ||||
-rw-r--r-- | arch/ia64/hp/sim/boot/bootloader.c | 176 | ||||
-rw-r--r-- | arch/ia64/hp/sim/boot/bootloader.lds | 65 | ||||
-rw-r--r-- | arch/ia64/hp/sim/boot/fw-emu.c | 398 | ||||
-rw-r--r-- | arch/ia64/hp/sim/boot/ssc.h | 35 | ||||
-rw-r--r-- | arch/ia64/hp/sim/hpsim.S | 10 | ||||
-rw-r--r-- | arch/ia64/hp/sim/hpsim_console.c | 65 | ||||
-rw-r--r-- | arch/ia64/hp/sim/hpsim_irq.c | 51 | ||||
-rw-r--r-- | arch/ia64/hp/sim/hpsim_machvec.c | 3 | ||||
-rw-r--r-- | arch/ia64/hp/sim/hpsim_setup.c | 52 | ||||
-rw-r--r-- | arch/ia64/hp/sim/hpsim_ssc.h | 36 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simeth.c | 530 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simscsi.c | 404 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simserial.c | 1032 | ||||
-rw-r--r-- | arch/ia64/hp/zx1/Makefile | 8 | ||||
-rw-r--r-- | arch/ia64/hp/zx1/hpzx1_machvec.c | 3 | ||||
-rw-r--r-- | arch/ia64/hp/zx1/hpzx1_swiotlb_machvec.c | 3 |
23 files changed, 5404 insertions, 0 deletions
diff --git a/arch/ia64/hp/common/Makefile b/arch/ia64/hp/common/Makefile new file mode 100644 index 000000000000..f61a60057ff7 --- /dev/null +++ b/arch/ia64/hp/common/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | # | ||
2 | # ia64/platform/hp/common/Makefile | ||
3 | # | ||
4 | # Copyright (C) 2002 Hewlett Packard | ||
5 | # Copyright (C) Alex Williamson (alex_williamson@hp.com) | ||
6 | # | ||
7 | |||
8 | obj-y := sba_iommu.o | ||
9 | obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += hwsw_iommu.o | ||
10 | obj-$(CONFIG_IA64_GENERIC) += hwsw_iommu.o | ||
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c new file mode 100644 index 000000000000..80f8ef013939 --- /dev/null +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. | ||
3 | * Contributed by David Mosberger-Tang <davidm@hpl.hp.com> | ||
4 | * | ||
5 | * This is a pseudo I/O MMU which dispatches to the hardware I/O MMU | ||
6 | * whenever possible. We assume that the hardware I/O MMU requires | ||
7 | * full 32-bit addressability, as is the case, e.g., for HP zx1-based | ||
8 | * systems (there, the I/O MMU window is mapped at 3-4GB). If a | ||
9 | * device doesn't provide full 32-bit addressability, we fall back on | ||
10 | * the sw I/O TLB. This is good enough to let us support broken | ||
11 | * hardware such as soundcards which have a DMA engine that can | ||
12 | * address only 28 bits. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | |||
17 | #include <asm/machvec.h> | ||
18 | |||
19 | /* swiotlb declarations & definitions: */ | ||
20 | extern void swiotlb_init_with_default_size (size_t size); | ||
21 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; | ||
22 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; | ||
23 | extern ia64_mv_dma_map_single swiotlb_map_single; | ||
24 | extern ia64_mv_dma_unmap_single swiotlb_unmap_single; | ||
25 | extern ia64_mv_dma_map_sg swiotlb_map_sg; | ||
26 | extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; | ||
27 | extern ia64_mv_dma_supported swiotlb_dma_supported; | ||
28 | extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; | ||
29 | |||
30 | /* hwiommu declarations & definitions: */ | ||
31 | |||
32 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | ||
33 | extern ia64_mv_dma_free_coherent sba_free_coherent; | ||
34 | extern ia64_mv_dma_map_single sba_map_single; | ||
35 | extern ia64_mv_dma_unmap_single sba_unmap_single; | ||
36 | extern ia64_mv_dma_map_sg sba_map_sg; | ||
37 | extern ia64_mv_dma_unmap_sg sba_unmap_sg; | ||
38 | extern ia64_mv_dma_supported sba_dma_supported; | ||
39 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | ||
40 | |||
41 | #define hwiommu_alloc_coherent sba_alloc_coherent | ||
42 | #define hwiommu_free_coherent sba_free_coherent | ||
43 | #define hwiommu_map_single sba_map_single | ||
44 | #define hwiommu_unmap_single sba_unmap_single | ||
45 | #define hwiommu_map_sg sba_map_sg | ||
46 | #define hwiommu_unmap_sg sba_unmap_sg | ||
47 | #define hwiommu_dma_supported sba_dma_supported | ||
48 | #define hwiommu_dma_mapping_error sba_dma_mapping_error | ||
49 | #define hwiommu_sync_single_for_cpu machvec_dma_sync_single | ||
50 | #define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg | ||
51 | #define hwiommu_sync_single_for_device machvec_dma_sync_single | ||
52 | #define hwiommu_sync_sg_for_device machvec_dma_sync_sg | ||
53 | |||
54 | |||
55 | /* | ||
56 | * Note: we need to make the determination of whether or not to use | ||
57 | * the sw I/O TLB based purely on the device structure. Anything else | ||
58 | * would be unreliable or would be too intrusive. | ||
59 | */ | ||
60 | static inline int | ||
61 | use_swiotlb (struct device *dev) | ||
62 | { | ||
63 | return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); | ||
64 | } | ||
65 | |||
66 | void | ||
67 | hwsw_init (void) | ||
68 | { | ||
69 | /* default to a smallish 2MB sw I/O TLB */ | ||
70 | swiotlb_init_with_default_size (2 * (1<<20)); | ||
71 | } | ||
72 | |||
73 | void * | ||
74 | hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags) | ||
75 | { | ||
76 | if (use_swiotlb(dev)) | ||
77 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | ||
78 | else | ||
79 | return hwiommu_alloc_coherent(dev, size, dma_handle, flags); | ||
80 | } | ||
81 | |||
82 | void | ||
83 | hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
84 | { | ||
85 | if (use_swiotlb(dev)) | ||
86 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | ||
87 | else | ||
88 | hwiommu_free_coherent(dev, size, vaddr, dma_handle); | ||
89 | } | ||
90 | |||
91 | dma_addr_t | ||
92 | hwsw_map_single (struct device *dev, void *addr, size_t size, int dir) | ||
93 | { | ||
94 | if (use_swiotlb(dev)) | ||
95 | return swiotlb_map_single(dev, addr, size, dir); | ||
96 | else | ||
97 | return hwiommu_map_single(dev, addr, size, dir); | ||
98 | } | ||
99 | |||
100 | void | ||
101 | hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir) | ||
102 | { | ||
103 | if (use_swiotlb(dev)) | ||
104 | return swiotlb_unmap_single(dev, iova, size, dir); | ||
105 | else | ||
106 | return hwiommu_unmap_single(dev, iova, size, dir); | ||
107 | } | ||
108 | |||
109 | |||
110 | int | ||
111 | hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | ||
112 | { | ||
113 | if (use_swiotlb(dev)) | ||
114 | return swiotlb_map_sg(dev, sglist, nents, dir); | ||
115 | else | ||
116 | return hwiommu_map_sg(dev, sglist, nents, dir); | ||
117 | } | ||
118 | |||
119 | void | ||
120 | hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | ||
121 | { | ||
122 | if (use_swiotlb(dev)) | ||
123 | return swiotlb_unmap_sg(dev, sglist, nents, dir); | ||
124 | else | ||
125 | return hwiommu_unmap_sg(dev, sglist, nents, dir); | ||
126 | } | ||
127 | |||
128 | void | ||
129 | hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
130 | { | ||
131 | if (use_swiotlb(dev)) | ||
132 | swiotlb_sync_single_for_cpu(dev, addr, size, dir); | ||
133 | else | ||
134 | hwiommu_sync_single_for_cpu(dev, addr, size, dir); | ||
135 | } | ||
136 | |||
137 | void | ||
138 | hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
139 | { | ||
140 | if (use_swiotlb(dev)) | ||
141 | swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
142 | else | ||
143 | hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
144 | } | ||
145 | |||
146 | void | ||
147 | hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
148 | { | ||
149 | if (use_swiotlb(dev)) | ||
150 | swiotlb_sync_single_for_device(dev, addr, size, dir); | ||
151 | else | ||
152 | hwiommu_sync_single_for_device(dev, addr, size, dir); | ||
153 | } | ||
154 | |||
155 | void | ||
156 | hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
157 | { | ||
158 | if (use_swiotlb(dev)) | ||
159 | swiotlb_sync_sg_for_device(dev, sg, nelems, dir); | ||
160 | else | ||
161 | hwiommu_sync_sg_for_device(dev, sg, nelems, dir); | ||
162 | } | ||
163 | |||
164 | int | ||
165 | hwsw_dma_supported (struct device *dev, u64 mask) | ||
166 | { | ||
167 | if (hwiommu_dma_supported(dev, mask)) | ||
168 | return 1; | ||
169 | return swiotlb_dma_supported(dev, mask); | ||
170 | } | ||
171 | |||
172 | int | ||
173 | hwsw_dma_mapping_error (dma_addr_t dma_addr) | ||
174 | { | ||
175 | return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr); | ||
176 | } | ||
177 | |||
178 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | ||
179 | EXPORT_SYMBOL(hwsw_map_single); | ||
180 | EXPORT_SYMBOL(hwsw_unmap_single); | ||
181 | EXPORT_SYMBOL(hwsw_map_sg); | ||
182 | EXPORT_SYMBOL(hwsw_unmap_sg); | ||
183 | EXPORT_SYMBOL(hwsw_dma_supported); | ||
184 | EXPORT_SYMBOL(hwsw_alloc_coherent); | ||
185 | EXPORT_SYMBOL(hwsw_free_coherent); | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c new file mode 100644 index 000000000000..017c9ab5fc1b --- /dev/null +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -0,0 +1,2121 @@ | |||
1 | /* | ||
2 | ** IA64 System Bus Adapter (SBA) I/O MMU manager | ||
3 | ** | ||
4 | ** (c) Copyright 2002-2004 Alex Williamson | ||
5 | ** (c) Copyright 2002-2003 Grant Grundler | ||
6 | ** (c) Copyright 2002-2004 Hewlett-Packard Company | ||
7 | ** | ||
8 | ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) | ||
9 | ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) | ||
10 | ** | ||
11 | ** This program is free software; you can redistribute it and/or modify | ||
12 | ** it under the terms of the GNU General Public License as published by | ||
13 | ** the Free Software Foundation; either version 2 of the License, or | ||
14 | ** (at your option) any later version. | ||
15 | ** | ||
16 | ** | ||
17 | ** This module initializes the IOC (I/O Controller) found on HP | ||
18 | ** McKinley machines and their successors. | ||
19 | ** | ||
20 | */ | ||
21 | |||
22 | #include <linux/config.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/proc_fs.h> | ||
33 | #include <linux/seq_file.h> | ||
34 | #include <linux/acpi.h> | ||
35 | #include <linux/efi.h> | ||
36 | #include <linux/nodemask.h> | ||
37 | #include <linux/bitops.h> /* hweight64() */ | ||
38 | |||
39 | #include <asm/delay.h> /* ia64_get_itc() */ | ||
40 | #include <asm/io.h> | ||
41 | #include <asm/page.h> /* PAGE_OFFSET */ | ||
42 | #include <asm/dma.h> | ||
43 | #include <asm/system.h> /* wmb() */ | ||
44 | |||
45 | #include <asm/acpi-ext.h> | ||
46 | |||
47 | #define PFX "IOC: " | ||
48 | |||
49 | /* | ||
50 | ** Enabling timing search of the pdir resource map. Output in /proc. | ||
51 | ** Disabled by default to optimize performance. | ||
52 | */ | ||
53 | #undef PDIR_SEARCH_TIMING | ||
54 | |||
55 | /* | ||
56 | ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If | ||
57 | ** not defined, all DMA will be 32bit and go through the TLB. | ||
58 | ** There's potentially a conflict in the bio merge code with us | ||
59 | ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing | ||
60 | ** appears to give more performance than bio-level virtual merging, we'll | ||
61 | ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to | ||
62 | ** completely restrict DMA to the IOMMU. | ||
63 | */ | ||
64 | #define ALLOW_IOV_BYPASS | ||
65 | |||
66 | /* | ||
67 | ** This option specifically allows/disallows bypassing scatterlists with | ||
68 | ** multiple entries. Coalescing these entries can allow better DMA streaming | ||
69 | ** and in some cases shows better performance than entirely bypassing the | ||
70 | ** IOMMU. Performance increase on the order of 1-2% sequential output/input | ||
71 | ** using bonnie++ on a RAID0 MD device (sym2 & mpt). | ||
72 | */ | ||
73 | #undef ALLOW_IOV_BYPASS_SG | ||
74 | |||
75 | /* | ||
76 | ** If a device prefetches beyond the end of a valid pdir entry, it will cause | ||
77 | ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should | ||
78 | ** disconnect on 4k boundaries and prevent such issues. If the device is | ||
79 | ** particularly agressive, this option will keep the entire pdir valid such | ||
80 | ** that prefetching will hit a valid address. This could severely impact | ||
81 | ** error containment, and is therefore off by default. The page that is | ||
82 | ** used for spill-over is poisoned, so that should help debugging somewhat. | ||
83 | */ | ||
84 | #undef FULL_VALID_PDIR | ||
85 | |||
86 | #define ENABLE_MARK_CLEAN | ||
87 | |||
88 | /* | ||
89 | ** The number of debug flags is a clue - this code is fragile. NOTE: since | ||
90 | ** tightening the use of res_lock the resource bitmap and actual pdir are no | ||
91 | ** longer guaranteed to stay in sync. The sanity checking code isn't going to | ||
92 | ** like that. | ||
93 | */ | ||
94 | #undef DEBUG_SBA_INIT | ||
95 | #undef DEBUG_SBA_RUN | ||
96 | #undef DEBUG_SBA_RUN_SG | ||
97 | #undef DEBUG_SBA_RESOURCE | ||
98 | #undef ASSERT_PDIR_SANITY | ||
99 | #undef DEBUG_LARGE_SG_ENTRIES | ||
100 | #undef DEBUG_BYPASS | ||
101 | |||
102 | #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY) | ||
103 | #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive | ||
104 | #endif | ||
105 | |||
106 | #define SBA_INLINE __inline__ | ||
107 | /* #define SBA_INLINE */ | ||
108 | |||
109 | #ifdef DEBUG_SBA_INIT | ||
110 | #define DBG_INIT(x...) printk(x) | ||
111 | #else | ||
112 | #define DBG_INIT(x...) | ||
113 | #endif | ||
114 | |||
115 | #ifdef DEBUG_SBA_RUN | ||
116 | #define DBG_RUN(x...) printk(x) | ||
117 | #else | ||
118 | #define DBG_RUN(x...) | ||
119 | #endif | ||
120 | |||
121 | #ifdef DEBUG_SBA_RUN_SG | ||
122 | #define DBG_RUN_SG(x...) printk(x) | ||
123 | #else | ||
124 | #define DBG_RUN_SG(x...) | ||
125 | #endif | ||
126 | |||
127 | |||
128 | #ifdef DEBUG_SBA_RESOURCE | ||
129 | #define DBG_RES(x...) printk(x) | ||
130 | #else | ||
131 | #define DBG_RES(x...) | ||
132 | #endif | ||
133 | |||
134 | #ifdef DEBUG_BYPASS | ||
135 | #define DBG_BYPASS(x...) printk(x) | ||
136 | #else | ||
137 | #define DBG_BYPASS(x...) | ||
138 | #endif | ||
139 | |||
140 | #ifdef ASSERT_PDIR_SANITY | ||
141 | #define ASSERT(expr) \ | ||
142 | if(!(expr)) { \ | ||
143 | printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \ | ||
144 | panic(#expr); \ | ||
145 | } | ||
146 | #else | ||
147 | #define ASSERT(expr) | ||
148 | #endif | ||
149 | |||
150 | /* | ||
151 | ** The number of pdir entries to "free" before issuing | ||
152 | ** a read to PCOM register to flush out PCOM writes. | ||
153 | ** Interacts with allocation granularity (ie 4 or 8 entries | ||
154 | ** allocated and free'd/purged at a time might make this | ||
155 | ** less interesting). | ||
156 | */ | ||
157 | #define DELAYED_RESOURCE_CNT 64 | ||
158 | |||
159 | #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP) | ||
160 | #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP) | ||
161 | #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP) | ||
162 | #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP) | ||
163 | |||
164 | #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ | ||
165 | |||
166 | #define IOC_FUNC_ID 0x000 | ||
167 | #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */ | ||
168 | #define IOC_IBASE 0x300 /* IO TLB */ | ||
169 | #define IOC_IMASK 0x308 | ||
170 | #define IOC_PCOM 0x310 | ||
171 | #define IOC_TCNFG 0x318 | ||
172 | #define IOC_PDIR_BASE 0x320 | ||
173 | |||
174 | #define IOC_ROPE0_CFG 0x500 | ||
175 | #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */ | ||
176 | |||
177 | |||
178 | /* AGP GART driver looks for this */ | ||
179 | #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL | ||
180 | |||
181 | /* | ||
182 | ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register) | ||
183 | ** | ||
184 | ** Some IOCs (sx1000) can run at the above pages sizes, but are | ||
185 | ** really only supported using the IOC at a 4k page size. | ||
186 | ** | ||
187 | ** iovp_size could only be greater than PAGE_SIZE if we are | ||
188 | ** confident the drivers really only touch the next physical | ||
189 | ** page iff that driver instance owns it. | ||
190 | */ | ||
191 | static unsigned long iovp_size; | ||
192 | static unsigned long iovp_shift; | ||
193 | static unsigned long iovp_mask; | ||
194 | |||
195 | struct ioc { | ||
196 | void __iomem *ioc_hpa; /* I/O MMU base address */ | ||
197 | char *res_map; /* resource map, bit == pdir entry */ | ||
198 | u64 *pdir_base; /* physical base address */ | ||
199 | unsigned long ibase; /* pdir IOV Space base */ | ||
200 | unsigned long imask; /* pdir IOV Space mask */ | ||
201 | |||
202 | unsigned long *res_hint; /* next avail IOVP - circular search */ | ||
203 | unsigned long dma_mask; | ||
204 | spinlock_t res_lock; /* protects the resource bitmap, but must be held when */ | ||
205 | /* clearing pdir to prevent races with allocations. */ | ||
206 | unsigned int res_bitshift; /* from the RIGHT! */ | ||
207 | unsigned int res_size; /* size of resource map in bytes */ | ||
208 | #ifdef CONFIG_NUMA | ||
209 | unsigned int node; /* node where this IOC lives */ | ||
210 | #endif | ||
211 | #if DELAYED_RESOURCE_CNT > 0 | ||
212 | spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */ | ||
213 | /* than res_lock for bigger systems. */ | ||
214 | int saved_cnt; | ||
215 | struct sba_dma_pair { | ||
216 | dma_addr_t iova; | ||
217 | size_t size; | ||
218 | } saved[DELAYED_RESOURCE_CNT]; | ||
219 | #endif | ||
220 | |||
221 | #ifdef PDIR_SEARCH_TIMING | ||
222 | #define SBA_SEARCH_SAMPLE 0x100 | ||
223 | unsigned long avg_search[SBA_SEARCH_SAMPLE]; | ||
224 | unsigned long avg_idx; /* current index into avg_search */ | ||
225 | #endif | ||
226 | |||
227 | /* Stuff we don't need in performance path */ | ||
228 | struct ioc *next; /* list of IOC's in system */ | ||
229 | acpi_handle handle; /* for multiple IOC's */ | ||
230 | const char *name; | ||
231 | unsigned int func_id; | ||
232 | unsigned int rev; /* HW revision of chip */ | ||
233 | u32 iov_size; | ||
234 | unsigned int pdir_size; /* in bytes, determined by IOV Space size */ | ||
235 | struct pci_dev *sac_only_dev; | ||
236 | }; | ||
237 | |||
238 | static struct ioc *ioc_list; | ||
239 | static int reserve_sba_gart = 1; | ||
240 | |||
241 | static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t); | ||
242 | static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t); | ||
243 | |||
244 | #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) | ||
245 | |||
246 | #ifdef FULL_VALID_PDIR | ||
247 | static u64 prefetch_spill_page; | ||
248 | #endif | ||
249 | |||
250 | #ifdef CONFIG_PCI | ||
251 | # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \ | ||
252 | ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) | ||
253 | #else | ||
254 | # define GET_IOC(dev) NULL | ||
255 | #endif | ||
256 | |||
257 | /* | ||
258 | ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up | ||
259 | ** (or rather not merge) DMA's into managable chunks. | ||
260 | ** On parisc, this is more of the software/tuning constraint | ||
261 | ** rather than the HW. I/O MMU allocation alogorithms can be | ||
262 | ** faster with smaller size is (to some degree). | ||
263 | */ | ||
264 | #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size) | ||
265 | |||
266 | #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) | ||
267 | |||
268 | /************************************ | ||
269 | ** SBA register read and write support | ||
270 | ** | ||
271 | ** BE WARNED: register writes are posted. | ||
272 | ** (ie follow writes which must reach HW with a read) | ||
273 | ** | ||
274 | */ | ||
275 | #define READ_REG(addr) __raw_readq(addr) | ||
276 | #define WRITE_REG(val, addr) __raw_writeq(val, addr) | ||
277 | |||
278 | #ifdef DEBUG_SBA_INIT | ||
279 | |||
280 | /** | ||
281 | * sba_dump_tlb - debugging only - print IOMMU operating parameters | ||
282 | * @hpa: base address of the IOMMU | ||
283 | * | ||
284 | * Print the size/location of the IO MMU PDIR. | ||
285 | */ | ||
286 | static void | ||
287 | sba_dump_tlb(char *hpa) | ||
288 | { | ||
289 | DBG_INIT("IO TLB at 0x%p\n", (void *)hpa); | ||
290 | DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE)); | ||
291 | DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK)); | ||
292 | DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG)); | ||
293 | DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE)); | ||
294 | DBG_INIT("\n"); | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | |||
299 | #ifdef ASSERT_PDIR_SANITY | ||
300 | |||
301 | /** | ||
302 | * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry | ||
303 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
304 | * @msg: text to print ont the output line. | ||
305 | * @pide: pdir index. | ||
306 | * | ||
307 | * Print one entry of the IO MMU PDIR in human readable form. | ||
308 | */ | ||
309 | static void | ||
310 | sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) | ||
311 | { | ||
312 | /* start printing from lowest pde in rval */ | ||
313 | u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]; | ||
314 | unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)]; | ||
315 | uint rcnt; | ||
316 | |||
317 | printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", | ||
318 | msg, rptr, pide & (BITS_PER_LONG - 1), *rptr); | ||
319 | |||
320 | rcnt = 0; | ||
321 | while (rcnt < BITS_PER_LONG) { | ||
322 | printk(KERN_DEBUG "%s %2d %p %016Lx\n", | ||
323 | (rcnt == (pide & (BITS_PER_LONG - 1))) | ||
324 | ? " -->" : " ", | ||
325 | rcnt, ptr, (unsigned long long) *ptr ); | ||
326 | rcnt++; | ||
327 | ptr++; | ||
328 | } | ||
329 | printk(KERN_DEBUG "%s", msg); | ||
330 | } | ||
331 | |||
332 | |||
333 | /** | ||
334 | * sba_check_pdir - debugging only - consistency checker | ||
335 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
336 | * @msg: text to print ont the output line. | ||
337 | * | ||
338 | * Verify the resource map and pdir state is consistent | ||
339 | */ | ||
340 | static int | ||
341 | sba_check_pdir(struct ioc *ioc, char *msg) | ||
342 | { | ||
343 | u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]); | ||
344 | u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */ | ||
345 | u64 *pptr = ioc->pdir_base; /* pdir ptr */ | ||
346 | uint pide = 0; | ||
347 | |||
348 | while (rptr < rptr_end) { | ||
349 | u64 rval; | ||
350 | int rcnt; /* number of bits we might check */ | ||
351 | |||
352 | rval = *rptr; | ||
353 | rcnt = 64; | ||
354 | |||
355 | while (rcnt) { | ||
356 | /* Get last byte and highest bit from that */ | ||
357 | u32 pde = ((u32)((*pptr >> (63)) & 0x1)); | ||
358 | if ((rval & 0x1) ^ pde) | ||
359 | { | ||
360 | /* | ||
361 | ** BUMMER! -- res_map != pdir -- | ||
362 | ** Dump rval and matching pdir entries | ||
363 | */ | ||
364 | sba_dump_pdir_entry(ioc, msg, pide); | ||
365 | return(1); | ||
366 | } | ||
367 | rcnt--; | ||
368 | rval >>= 1; /* try the next bit */ | ||
369 | pptr++; | ||
370 | pide++; | ||
371 | } | ||
372 | rptr++; /* look at next word of res_map */ | ||
373 | } | ||
374 | /* It'd be nice if we always got here :^) */ | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | |||
379 | /** | ||
380 | * sba_dump_sg - debugging only - print Scatter-Gather list | ||
381 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
382 | * @startsg: head of the SG list | ||
383 | * @nents: number of entries in SG list | ||
384 | * | ||
385 | * print the SG list so we can verify it's correct by hand. | ||
386 | */ | ||
387 | static void | ||
388 | sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | ||
389 | { | ||
390 | while (nents-- > 0) { | ||
391 | printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, | ||
392 | startsg->dma_address, startsg->dma_length, | ||
393 | sba_sg_address(startsg)); | ||
394 | startsg++; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static void | ||
399 | sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | ||
400 | { | ||
401 | struct scatterlist *the_sg = startsg; | ||
402 | int the_nents = nents; | ||
403 | |||
404 | while (the_nents-- > 0) { | ||
405 | if (sba_sg_address(the_sg) == 0x0UL) | ||
406 | sba_dump_sg(NULL, startsg, nents); | ||
407 | the_sg++; | ||
408 | } | ||
409 | } | ||
410 | |||
411 | #endif /* ASSERT_PDIR_SANITY */ | ||
412 | |||
413 | |||
414 | |||
415 | |||
416 | /************************************************************** | ||
417 | * | ||
418 | * I/O Pdir Resource Management | ||
419 | * | ||
420 | * Bits set in the resource map are in use. | ||
421 | * Each bit can represent a number of pages. | ||
422 | * LSbs represent lower addresses (IOVA's). | ||
423 | * | ||
424 | ***************************************************************/ | ||
425 | #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ | ||
426 | |||
427 | /* Convert from IOVP to IOVA and vice versa. */ | ||
428 | #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset)) | ||
429 | #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase)) | ||
430 | |||
431 | #define PDIR_ENTRY_SIZE sizeof(u64) | ||
432 | |||
433 | #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift) | ||
434 | |||
435 | #define RESMAP_MASK(n) ~(~0UL << (n)) | ||
436 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) | ||
437 | |||
438 | |||
439 | /** | ||
440 | * For most cases the normal get_order is sufficient, however it limits us | ||
441 | * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity. | ||
442 | * It only incurs about 1 clock cycle to use this one with the static variable | ||
443 | * and makes the code more intuitive. | ||
444 | */ | ||
445 | static SBA_INLINE int | ||
446 | get_iovp_order (unsigned long size) | ||
447 | { | ||
448 | long double d = size - 1; | ||
449 | long order; | ||
450 | |||
451 | order = ia64_getf_exp(d); | ||
452 | order = order - iovp_shift - 0xffff + 1; | ||
453 | if (order < 0) | ||
454 | order = 0; | ||
455 | return order; | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | ||
460 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
461 | * @bits_wanted: number of entries we need. | ||
462 | * | ||
463 | * Find consecutive free bits in resource bitmap. | ||
464 | * Each bit represents one entry in the IO Pdir. | ||
465 | * Cool perf optimization: search for log2(size) bits at a time. | ||
466 | */ | ||
467 | static SBA_INLINE unsigned long | ||
468 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | ||
469 | { | ||
470 | unsigned long *res_ptr = ioc->res_hint; | ||
471 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); | ||
472 | unsigned long pide = ~0UL; | ||
473 | |||
474 | ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); | ||
475 | ASSERT(res_ptr < res_end); | ||
476 | |||
477 | /* | ||
478 | * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts | ||
479 | * if a TLB entry is purged while in use. sba_mark_invalid() | ||
480 | * purges IOTLB entries in power-of-two sizes, so we also | ||
481 | * allocate IOVA space in power-of-two sizes. | ||
482 | */ | ||
483 | bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift); | ||
484 | |||
485 | if (likely(bits_wanted == 1)) { | ||
486 | unsigned int bitshiftcnt; | ||
487 | for(; res_ptr < res_end ; res_ptr++) { | ||
488 | if (likely(*res_ptr != ~0UL)) { | ||
489 | bitshiftcnt = ffz(*res_ptr); | ||
490 | *res_ptr |= (1UL << bitshiftcnt); | ||
491 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | ||
492 | pide <<= 3; /* convert to bit address */ | ||
493 | pide += bitshiftcnt; | ||
494 | ioc->res_bitshift = bitshiftcnt + bits_wanted; | ||
495 | goto found_it; | ||
496 | } | ||
497 | } | ||
498 | goto not_found; | ||
499 | |||
500 | } | ||
501 | |||
502 | if (likely(bits_wanted <= BITS_PER_LONG/2)) { | ||
503 | /* | ||
504 | ** Search the resource bit map on well-aligned values. | ||
505 | ** "o" is the alignment. | ||
506 | ** We need the alignment to invalidate I/O TLB using | ||
507 | ** SBA HW features in the unmap path. | ||
508 | */ | ||
509 | unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift); | ||
510 | uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); | ||
511 | unsigned long mask, base_mask; | ||
512 | |||
513 | base_mask = RESMAP_MASK(bits_wanted); | ||
514 | mask = base_mask << bitshiftcnt; | ||
515 | |||
516 | DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); | ||
517 | for(; res_ptr < res_end ; res_ptr++) | ||
518 | { | ||
519 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | ||
520 | ASSERT(0 != mask); | ||
521 | for (; mask ; mask <<= o, bitshiftcnt += o) { | ||
522 | if(0 == ((*res_ptr) & mask)) { | ||
523 | *res_ptr |= mask; /* mark resources busy! */ | ||
524 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | ||
525 | pide <<= 3; /* convert to bit address */ | ||
526 | pide += bitshiftcnt; | ||
527 | ioc->res_bitshift = bitshiftcnt + bits_wanted; | ||
528 | goto found_it; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | bitshiftcnt = 0; | ||
533 | mask = base_mask; | ||
534 | |||
535 | } | ||
536 | |||
537 | } else { | ||
538 | int qwords, bits, i; | ||
539 | unsigned long *end; | ||
540 | |||
541 | qwords = bits_wanted >> 6; /* /64 */ | ||
542 | bits = bits_wanted - (qwords * BITS_PER_LONG); | ||
543 | |||
544 | end = res_end - qwords; | ||
545 | |||
546 | for (; res_ptr < end; res_ptr++) { | ||
547 | for (i = 0 ; i < qwords ; i++) { | ||
548 | if (res_ptr[i] != 0) | ||
549 | goto next_ptr; | ||
550 | } | ||
551 | if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits)) | ||
552 | continue; | ||
553 | |||
554 | /* Found it, mark it */ | ||
555 | for (i = 0 ; i < qwords ; i++) | ||
556 | res_ptr[i] = ~0UL; | ||
557 | res_ptr[i] |= RESMAP_MASK(bits); | ||
558 | |||
559 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | ||
560 | pide <<= 3; /* convert to bit address */ | ||
561 | res_ptr += qwords; | ||
562 | ioc->res_bitshift = bits; | ||
563 | goto found_it; | ||
564 | next_ptr: | ||
565 | ; | ||
566 | } | ||
567 | } | ||
568 | |||
569 | not_found: | ||
570 | prefetch(ioc->res_map); | ||
571 | ioc->res_hint = (unsigned long *) ioc->res_map; | ||
572 | ioc->res_bitshift = 0; | ||
573 | return (pide); | ||
574 | |||
575 | found_it: | ||
576 | ioc->res_hint = res_ptr; | ||
577 | return (pide); | ||
578 | } | ||
579 | |||
580 | |||
581 | /** | ||
582 | * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap | ||
583 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
584 | * @size: number of bytes to create a mapping for | ||
585 | * | ||
586 | * Given a size, find consecutive unmarked and then mark those bits in the | ||
587 | * resource bit map. | ||
588 | */ | ||
589 | static int | ||
590 | sba_alloc_range(struct ioc *ioc, size_t size) | ||
591 | { | ||
592 | unsigned int pages_needed = size >> iovp_shift; | ||
593 | #ifdef PDIR_SEARCH_TIMING | ||
594 | unsigned long itc_start; | ||
595 | #endif | ||
596 | unsigned long pide; | ||
597 | unsigned long flags; | ||
598 | |||
599 | ASSERT(pages_needed); | ||
600 | ASSERT(0 == (size & ~iovp_mask)); | ||
601 | |||
602 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
603 | |||
604 | #ifdef PDIR_SEARCH_TIMING | ||
605 | itc_start = ia64_get_itc(); | ||
606 | #endif | ||
607 | /* | ||
608 | ** "seek and ye shall find"...praying never hurts either... | ||
609 | */ | ||
610 | pide = sba_search_bitmap(ioc, pages_needed); | ||
611 | if (unlikely(pide >= (ioc->res_size << 3))) { | ||
612 | pide = sba_search_bitmap(ioc, pages_needed); | ||
613 | if (unlikely(pide >= (ioc->res_size << 3))) { | ||
614 | #if DELAYED_RESOURCE_CNT > 0 | ||
615 | /* | ||
616 | ** With delayed resource freeing, we can give this one more shot. We're | ||
617 | ** getting close to being in trouble here, so do what we can to make this | ||
618 | ** one count. | ||
619 | */ | ||
620 | spin_lock(&ioc->saved_lock); | ||
621 | if (ioc->saved_cnt > 0) { | ||
622 | struct sba_dma_pair *d; | ||
623 | int cnt = ioc->saved_cnt; | ||
624 | |||
625 | d = &(ioc->saved[ioc->saved_cnt]); | ||
626 | |||
627 | while (cnt--) { | ||
628 | sba_mark_invalid(ioc, d->iova, d->size); | ||
629 | sba_free_range(ioc, d->iova, d->size); | ||
630 | d--; | ||
631 | } | ||
632 | ioc->saved_cnt = 0; | ||
633 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | ||
634 | } | ||
635 | spin_unlock(&ioc->saved_lock); | ||
636 | |||
637 | pide = sba_search_bitmap(ioc, pages_needed); | ||
638 | if (unlikely(pide >= (ioc->res_size << 3))) | ||
639 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | ||
640 | ioc->ioc_hpa); | ||
641 | #else | ||
642 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | ||
643 | ioc->ioc_hpa); | ||
644 | #endif | ||
645 | } | ||
646 | } | ||
647 | |||
648 | #ifdef PDIR_SEARCH_TIMING | ||
649 | ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed; | ||
650 | ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; | ||
651 | #endif | ||
652 | |||
653 | prefetchw(&(ioc->pdir_base[pide])); | ||
654 | |||
655 | #ifdef ASSERT_PDIR_SANITY | ||
656 | /* verify the first enable bit is clear */ | ||
657 | if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) { | ||
658 | sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); | ||
659 | } | ||
660 | #endif | ||
661 | |||
662 | DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", | ||
663 | __FUNCTION__, size, pages_needed, pide, | ||
664 | (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), | ||
665 | ioc->res_bitshift ); | ||
666 | |||
667 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
668 | |||
669 | return (pide); | ||
670 | } | ||
671 | |||
672 | |||
673 | /** | ||
674 | * sba_free_range - unmark bits in IO PDIR resource bitmap | ||
675 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
676 | * @iova: IO virtual address which was previously allocated. | ||
677 | * @size: number of bytes to create a mapping for | ||
678 | * | ||
679 | * clear bits in the ioc's resource map | ||
680 | */ | ||
681 | static SBA_INLINE void | ||
682 | sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) | ||
683 | { | ||
684 | unsigned long iovp = SBA_IOVP(ioc, iova); | ||
685 | unsigned int pide = PDIR_INDEX(iovp); | ||
686 | unsigned int ridx = pide >> 3; /* convert bit to byte address */ | ||
687 | unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); | ||
688 | int bits_not_wanted = size >> iovp_shift; | ||
689 | unsigned long m; | ||
690 | |||
691 | /* Round up to power-of-two size: see AR2305 note above */ | ||
692 | bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift); | ||
693 | for (; bits_not_wanted > 0 ; res_ptr++) { | ||
694 | |||
695 | if (unlikely(bits_not_wanted > BITS_PER_LONG)) { | ||
696 | |||
697 | /* these mappings start 64bit aligned */ | ||
698 | *res_ptr = 0UL; | ||
699 | bits_not_wanted -= BITS_PER_LONG; | ||
700 | pide += BITS_PER_LONG; | ||
701 | |||
702 | } else { | ||
703 | |||
704 | /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ | ||
705 | m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); | ||
706 | bits_not_wanted = 0; | ||
707 | |||
708 | DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size, | ||
709 | bits_not_wanted, m, pide, res_ptr, *res_ptr); | ||
710 | |||
711 | ASSERT(m != 0); | ||
712 | ASSERT(bits_not_wanted); | ||
713 | ASSERT((*res_ptr & m) == m); /* verify same bits are set */ | ||
714 | *res_ptr &= ~m; | ||
715 | } | ||
716 | } | ||
717 | } | ||
718 | |||
719 | |||
720 | /************************************************************** | ||
721 | * | ||
722 | * "Dynamic DMA Mapping" support (aka "Coherent I/O") | ||
723 | * | ||
724 | ***************************************************************/ | ||
725 | |||
726 | /** | ||
727 | * sba_io_pdir_entry - fill in one IO PDIR entry | ||
728 | * @pdir_ptr: pointer to IO PDIR entry | ||
729 | * @vba: Virtual CPU address of buffer to map | ||
730 | * | ||
731 | * SBA Mapping Routine | ||
732 | * | ||
733 | * Given a virtual address (vba, arg1) sba_io_pdir_entry() | ||
734 | * loads the I/O PDIR entry pointed to by pdir_ptr (arg0). | ||
735 | * Each IO Pdir entry consists of 8 bytes as shown below | ||
736 | * (LSB == bit 0): | ||
737 | * | ||
738 | * 63 40 11 7 0 | ||
739 | * +-+---------------------+----------------------------------+----+--------+ | ||
740 | * |V| U | PPN[39:12] | U | FF | | ||
741 | * +-+---------------------+----------------------------------+----+--------+ | ||
742 | * | ||
743 | * V == Valid Bit | ||
744 | * U == Unused | ||
745 | * PPN == Physical Page Number | ||
746 | * | ||
747 | * The physical address fields are filled with the results of virt_to_phys() | ||
748 | * on the vba. | ||
749 | */ | ||
750 | |||
751 | #if 1 | ||
752 | #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \ | ||
753 | | 0x8000000000000000ULL) | ||
754 | #else | ||
755 | void SBA_INLINE | ||
756 | sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba) | ||
757 | { | ||
758 | *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL); | ||
759 | } | ||
760 | #endif | ||
761 | |||
762 | #ifdef ENABLE_MARK_CLEAN | ||
763 | /** | ||
764 | * Since DMA is i-cache coherent, any (complete) pages that were written via | ||
765 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | ||
766 | * flush them when they get mapped into an executable vm-area. | ||
767 | */ | ||
768 | static void | ||
769 | mark_clean (void *addr, size_t size) | ||
770 | { | ||
771 | unsigned long pg_addr, end; | ||
772 | |||
773 | pg_addr = PAGE_ALIGN((unsigned long) addr); | ||
774 | end = (unsigned long) addr + size; | ||
775 | while (pg_addr + PAGE_SIZE <= end) { | ||
776 | struct page *page = virt_to_page((void *)pg_addr); | ||
777 | set_bit(PG_arch_1, &page->flags); | ||
778 | pg_addr += PAGE_SIZE; | ||
779 | } | ||
780 | } | ||
781 | #endif | ||
782 | |||
783 | /** | ||
784 | * sba_mark_invalid - invalidate one or more IO PDIR entries | ||
785 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
786 | * @iova: IO Virtual Address mapped earlier | ||
787 | * @byte_cnt: number of bytes this mapping covers. | ||
788 | * | ||
789 | * Marking the IO PDIR entry(ies) as Invalid and invalidate | ||
790 | * corresponding IO TLB entry. The PCOM (Purge Command Register) | ||
791 | * is to purge stale entries in the IO TLB when unmapping entries. | ||
792 | * | ||
793 | * The PCOM register supports purging of multiple pages, with a minium | ||
794 | * of 1 page and a maximum of 2GB. Hardware requires the address be | ||
795 | * aligned to the size of the range being purged. The size of the range | ||
796 | * must be a power of 2. The "Cool perf optimization" in the | ||
797 | * allocation routine helps keep that true. | ||
798 | */ | ||
799 | static SBA_INLINE void | ||
800 | sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | ||
801 | { | ||
802 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | ||
803 | |||
804 | int off = PDIR_INDEX(iovp); | ||
805 | |||
806 | /* Must be non-zero and rounded up */ | ||
807 | ASSERT(byte_cnt > 0); | ||
808 | ASSERT(0 == (byte_cnt & ~iovp_mask)); | ||
809 | |||
810 | #ifdef ASSERT_PDIR_SANITY | ||
811 | /* Assert first pdir entry is set */ | ||
812 | if (!(ioc->pdir_base[off] >> 60)) { | ||
813 | sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); | ||
814 | } | ||
815 | #endif | ||
816 | |||
817 | if (byte_cnt <= iovp_size) | ||
818 | { | ||
819 | ASSERT(off < ioc->pdir_size); | ||
820 | |||
821 | iovp |= iovp_shift; /* set "size" field for PCOM */ | ||
822 | |||
823 | #ifndef FULL_VALID_PDIR | ||
824 | /* | ||
825 | ** clear I/O PDIR entry "valid" bit | ||
826 | ** Do NOT clear the rest - save it for debugging. | ||
827 | ** We should only clear bits that have previously | ||
828 | ** been enabled. | ||
829 | */ | ||
830 | ioc->pdir_base[off] &= ~(0x80000000000000FFULL); | ||
831 | #else | ||
832 | /* | ||
833 | ** If we want to maintain the PDIR as valid, put in | ||
834 | ** the spill page so devices prefetching won't | ||
835 | ** cause a hard fail. | ||
836 | */ | ||
837 | ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); | ||
838 | #endif | ||
839 | } else { | ||
840 | u32 t = get_iovp_order(byte_cnt) + iovp_shift; | ||
841 | |||
842 | iovp |= t; | ||
843 | ASSERT(t <= 31); /* 2GB! Max value of "size" field */ | ||
844 | |||
845 | do { | ||
846 | /* verify this pdir entry is enabled */ | ||
847 | ASSERT(ioc->pdir_base[off] >> 63); | ||
848 | #ifndef FULL_VALID_PDIR | ||
849 | /* clear I/O Pdir entry "valid" bit first */ | ||
850 | ioc->pdir_base[off] &= ~(0x80000000000000FFULL); | ||
851 | #else | ||
852 | ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); | ||
853 | #endif | ||
854 | off++; | ||
855 | byte_cnt -= iovp_size; | ||
856 | } while (byte_cnt > 0); | ||
857 | } | ||
858 | |||
859 | WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM); | ||
860 | } | ||
861 | |||
862 | /** | ||
863 | * sba_map_single - map one buffer and return IOVA for DMA | ||
864 | * @dev: instance of PCI owned by the driver that's asking. | ||
865 | * @addr: driver buffer to map. | ||
866 | * @size: number of bytes to map in driver buffer. | ||
867 | * @dir: R/W or both. | ||
868 | * | ||
869 | * See Documentation/DMA-mapping.txt | ||
870 | */ | ||
871 | dma_addr_t | ||
872 | sba_map_single(struct device *dev, void *addr, size_t size, int dir) | ||
873 | { | ||
874 | struct ioc *ioc; | ||
875 | dma_addr_t iovp; | ||
876 | dma_addr_t offset; | ||
877 | u64 *pdir_start; | ||
878 | int pide; | ||
879 | #ifdef ASSERT_PDIR_SANITY | ||
880 | unsigned long flags; | ||
881 | #endif | ||
882 | #ifdef ALLOW_IOV_BYPASS | ||
883 | unsigned long pci_addr = virt_to_phys(addr); | ||
884 | #endif | ||
885 | |||
886 | #ifdef ALLOW_IOV_BYPASS | ||
887 | ASSERT(to_pci_dev(dev)->dma_mask); | ||
888 | /* | ||
889 | ** Check if the PCI device can DMA to ptr... if so, just return ptr | ||
890 | */ | ||
891 | if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) { | ||
892 | /* | ||
893 | ** Device is bit capable of DMA'ing to the buffer... | ||
894 | ** just return the PCI address of ptr | ||
895 | */ | ||
896 | DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", | ||
897 | to_pci_dev(dev)->dma_mask, pci_addr); | ||
898 | return pci_addr; | ||
899 | } | ||
900 | #endif | ||
901 | ioc = GET_IOC(dev); | ||
902 | ASSERT(ioc); | ||
903 | |||
904 | prefetch(ioc->res_hint); | ||
905 | |||
906 | ASSERT(size > 0); | ||
907 | ASSERT(size <= DMA_CHUNK_SIZE); | ||
908 | |||
909 | /* save offset bits */ | ||
910 | offset = ((dma_addr_t) (long) addr) & ~iovp_mask; | ||
911 | |||
912 | /* round up to nearest iovp_size */ | ||
913 | size = (size + offset + ~iovp_mask) & iovp_mask; | ||
914 | |||
915 | #ifdef ASSERT_PDIR_SANITY | ||
916 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
917 | if (sba_check_pdir(ioc,"Check before sba_map_single()")) | ||
918 | panic("Sanity check failed"); | ||
919 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
920 | #endif | ||
921 | |||
922 | pide = sba_alloc_range(ioc, size); | ||
923 | |||
924 | iovp = (dma_addr_t) pide << iovp_shift; | ||
925 | |||
926 | DBG_RUN("%s() 0x%p -> 0x%lx\n", | ||
927 | __FUNCTION__, addr, (long) iovp | offset); | ||
928 | |||
929 | pdir_start = &(ioc->pdir_base[pide]); | ||
930 | |||
931 | while (size > 0) { | ||
932 | ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ | ||
933 | sba_io_pdir_entry(pdir_start, (unsigned long) addr); | ||
934 | |||
935 | DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start); | ||
936 | |||
937 | addr += iovp_size; | ||
938 | size -= iovp_size; | ||
939 | pdir_start++; | ||
940 | } | ||
941 | /* force pdir update */ | ||
942 | wmb(); | ||
943 | |||
944 | /* form complete address */ | ||
945 | #ifdef ASSERT_PDIR_SANITY | ||
946 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
947 | sba_check_pdir(ioc,"Check after sba_map_single()"); | ||
948 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
949 | #endif | ||
950 | return SBA_IOVA(ioc, iovp, offset); | ||
951 | } | ||
952 | |||
953 | /** | ||
954 | * sba_unmap_single - unmap one IOVA and free resources | ||
955 | * @dev: instance of PCI owned by the driver that's asking. | ||
956 | * @iova: IOVA of driver buffer previously mapped. | ||
957 | * @size: number of bytes mapped in driver buffer. | ||
958 | * @dir: R/W or both. | ||
959 | * | ||
960 | * See Documentation/DMA-mapping.txt | ||
961 | */ | ||
962 | void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | ||
963 | { | ||
964 | struct ioc *ioc; | ||
965 | #if DELAYED_RESOURCE_CNT > 0 | ||
966 | struct sba_dma_pair *d; | ||
967 | #endif | ||
968 | unsigned long flags; | ||
969 | dma_addr_t offset; | ||
970 | |||
971 | ioc = GET_IOC(dev); | ||
972 | ASSERT(ioc); | ||
973 | |||
974 | #ifdef ALLOW_IOV_BYPASS | ||
975 | if (likely((iova & ioc->imask) != ioc->ibase)) { | ||
976 | /* | ||
977 | ** Address does not fall w/in IOVA, must be bypassing | ||
978 | */ | ||
979 | DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); | ||
980 | |||
981 | #ifdef ENABLE_MARK_CLEAN | ||
982 | if (dir == DMA_FROM_DEVICE) { | ||
983 | mark_clean(phys_to_virt(iova), size); | ||
984 | } | ||
985 | #endif | ||
986 | return; | ||
987 | } | ||
988 | #endif | ||
989 | offset = iova & ~iovp_mask; | ||
990 | |||
991 | DBG_RUN("%s() iovp 0x%lx/%x\n", | ||
992 | __FUNCTION__, (long) iova, size); | ||
993 | |||
994 | iova ^= offset; /* clear offset bits */ | ||
995 | size += offset; | ||
996 | size = ROUNDUP(size, iovp_size); | ||
997 | |||
998 | |||
999 | #if DELAYED_RESOURCE_CNT > 0 | ||
1000 | spin_lock_irqsave(&ioc->saved_lock, flags); | ||
1001 | d = &(ioc->saved[ioc->saved_cnt]); | ||
1002 | d->iova = iova; | ||
1003 | d->size = size; | ||
1004 | if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) { | ||
1005 | int cnt = ioc->saved_cnt; | ||
1006 | spin_lock(&ioc->res_lock); | ||
1007 | while (cnt--) { | ||
1008 | sba_mark_invalid(ioc, d->iova, d->size); | ||
1009 | sba_free_range(ioc, d->iova, d->size); | ||
1010 | d--; | ||
1011 | } | ||
1012 | ioc->saved_cnt = 0; | ||
1013 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | ||
1014 | spin_unlock(&ioc->res_lock); | ||
1015 | } | ||
1016 | spin_unlock_irqrestore(&ioc->saved_lock, flags); | ||
1017 | #else /* DELAYED_RESOURCE_CNT == 0 */ | ||
1018 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1019 | sba_mark_invalid(ioc, iova, size); | ||
1020 | sba_free_range(ioc, iova, size); | ||
1021 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | ||
1022 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1023 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | ||
1024 | #ifdef ENABLE_MARK_CLEAN | ||
1025 | if (dir == DMA_FROM_DEVICE) { | ||
1026 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | ||
1027 | int off = PDIR_INDEX(iovp); | ||
1028 | void *addr; | ||
1029 | |||
1030 | if (size <= iovp_size) { | ||
1031 | addr = phys_to_virt(ioc->pdir_base[off] & | ||
1032 | ~0xE000000000000FFFULL); | ||
1033 | mark_clean(addr, size); | ||
1034 | } else { | ||
1035 | size_t byte_cnt = size; | ||
1036 | |||
1037 | do { | ||
1038 | addr = phys_to_virt(ioc->pdir_base[off] & | ||
1039 | ~0xE000000000000FFFULL); | ||
1040 | mark_clean(addr, min(byte_cnt, iovp_size)); | ||
1041 | off++; | ||
1042 | byte_cnt -= iovp_size; | ||
1043 | |||
1044 | } while (byte_cnt > 0); | ||
1045 | } | ||
1046 | } | ||
1047 | #endif | ||
1048 | } | ||
1049 | |||
1050 | |||
1051 | /** | ||
1052 | * sba_alloc_coherent - allocate/map shared mem for DMA | ||
1053 | * @dev: instance of PCI owned by the driver that's asking. | ||
1054 | * @size: number of bytes mapped in driver buffer. | ||
1055 | * @dma_handle: IOVA of new buffer. | ||
1056 | * | ||
1057 | * See Documentation/DMA-mapping.txt | ||
1058 | */ | ||
1059 | void * | ||
1060 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags) | ||
1061 | { | ||
1062 | struct ioc *ioc; | ||
1063 | void *addr; | ||
1064 | |||
1065 | ioc = GET_IOC(dev); | ||
1066 | ASSERT(ioc); | ||
1067 | |||
1068 | #ifdef CONFIG_NUMA | ||
1069 | { | ||
1070 | struct page *page; | ||
1071 | page = alloc_pages_node(ioc->node == MAX_NUMNODES ? | ||
1072 | numa_node_id() : ioc->node, flags, | ||
1073 | get_order(size)); | ||
1074 | |||
1075 | if (unlikely(!page)) | ||
1076 | return NULL; | ||
1077 | |||
1078 | addr = page_address(page); | ||
1079 | } | ||
1080 | #else | ||
1081 | addr = (void *) __get_free_pages(flags, get_order(size)); | ||
1082 | #endif | ||
1083 | if (unlikely(!addr)) | ||
1084 | return NULL; | ||
1085 | |||
1086 | memset(addr, 0, size); | ||
1087 | *dma_handle = virt_to_phys(addr); | ||
1088 | |||
1089 | #ifdef ALLOW_IOV_BYPASS | ||
1090 | ASSERT(dev->coherent_dma_mask); | ||
1091 | /* | ||
1092 | ** Check if the PCI device can DMA to ptr... if so, just return ptr | ||
1093 | */ | ||
1094 | if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) { | ||
1095 | DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n", | ||
1096 | dev->coherent_dma_mask, *dma_handle); | ||
1097 | |||
1098 | return addr; | ||
1099 | } | ||
1100 | #endif | ||
1101 | |||
1102 | /* | ||
1103 | * If device can't bypass or bypass is disabled, pass the 32bit fake | ||
1104 | * device to map single to get an iova mapping. | ||
1105 | */ | ||
1106 | *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0); | ||
1107 | |||
1108 | return addr; | ||
1109 | } | ||
1110 | |||
1111 | |||
1112 | /** | ||
1113 | * sba_free_coherent - free/unmap shared mem for DMA | ||
1114 | * @dev: instance of PCI owned by the driver that's asking. | ||
1115 | * @size: number of bytes mapped in driver buffer. | ||
1116 | * @vaddr: virtual address IOVA of "consistent" buffer. | ||
1117 | * @dma_handler: IO virtual address of "consistent" buffer. | ||
1118 | * | ||
1119 | * See Documentation/DMA-mapping.txt | ||
1120 | */ | ||
1121 | void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
1122 | { | ||
1123 | sba_unmap_single(dev, dma_handle, size, 0); | ||
1124 | free_pages((unsigned long) vaddr, get_order(size)); | ||
1125 | } | ||
1126 | |||
1127 | |||
1128 | /* | ||
1129 | ** Since 0 is a valid pdir_base index value, can't use that | ||
1130 | ** to determine if a value is valid or not. Use a flag to indicate | ||
1131 | ** the SG list entry contains a valid pdir index. | ||
1132 | */ | ||
1133 | #define PIDE_FLAG 0x1UL | ||
1134 | |||
1135 | #ifdef DEBUG_LARGE_SG_ENTRIES | ||
1136 | int dump_run_sg = 0; | ||
1137 | #endif | ||
1138 | |||
1139 | |||
1140 | /** | ||
1141 | * sba_fill_pdir - write allocated SG entries into IO PDIR | ||
1142 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
1143 | * @startsg: list of IOVA/size pairs | ||
1144 | * @nents: number of entries in startsg list | ||
1145 | * | ||
1146 | * Take preprocessed SG list and write corresponding entries | ||
1147 | * in the IO PDIR. | ||
1148 | */ | ||
1149 | |||
1150 | static SBA_INLINE int | ||
1151 | sba_fill_pdir( | ||
1152 | struct ioc *ioc, | ||
1153 | struct scatterlist *startsg, | ||
1154 | int nents) | ||
1155 | { | ||
1156 | struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ | ||
1157 | int n_mappings = 0; | ||
1158 | u64 *pdirp = NULL; | ||
1159 | unsigned long dma_offset = 0; | ||
1160 | |||
1161 | dma_sg--; | ||
1162 | while (nents-- > 0) { | ||
1163 | int cnt = startsg->dma_length; | ||
1164 | startsg->dma_length = 0; | ||
1165 | |||
1166 | #ifdef DEBUG_LARGE_SG_ENTRIES | ||
1167 | if (dump_run_sg) | ||
1168 | printk(" %2d : %08lx/%05x %p\n", | ||
1169 | nents, startsg->dma_address, cnt, | ||
1170 | sba_sg_address(startsg)); | ||
1171 | #else | ||
1172 | DBG_RUN_SG(" %d : %08lx/%05x %p\n", | ||
1173 | nents, startsg->dma_address, cnt, | ||
1174 | sba_sg_address(startsg)); | ||
1175 | #endif | ||
1176 | /* | ||
1177 | ** Look for the start of a new DMA stream | ||
1178 | */ | ||
1179 | if (startsg->dma_address & PIDE_FLAG) { | ||
1180 | u32 pide = startsg->dma_address & ~PIDE_FLAG; | ||
1181 | dma_offset = (unsigned long) pide & ~iovp_mask; | ||
1182 | startsg->dma_address = 0; | ||
1183 | dma_sg++; | ||
1184 | dma_sg->dma_address = pide | ioc->ibase; | ||
1185 | pdirp = &(ioc->pdir_base[pide >> iovp_shift]); | ||
1186 | n_mappings++; | ||
1187 | } | ||
1188 | |||
1189 | /* | ||
1190 | ** Look for a VCONTIG chunk | ||
1191 | */ | ||
1192 | if (cnt) { | ||
1193 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | ||
1194 | ASSERT(pdirp); | ||
1195 | |||
1196 | /* Since multiple Vcontig blocks could make up | ||
1197 | ** one DMA stream, *add* cnt to dma_len. | ||
1198 | */ | ||
1199 | dma_sg->dma_length += cnt; | ||
1200 | cnt += dma_offset; | ||
1201 | dma_offset=0; /* only want offset on first chunk */ | ||
1202 | cnt = ROUNDUP(cnt, iovp_size); | ||
1203 | do { | ||
1204 | sba_io_pdir_entry(pdirp, vaddr); | ||
1205 | vaddr += iovp_size; | ||
1206 | cnt -= iovp_size; | ||
1207 | pdirp++; | ||
1208 | } while (cnt > 0); | ||
1209 | } | ||
1210 | startsg++; | ||
1211 | } | ||
1212 | /* force pdir update */ | ||
1213 | wmb(); | ||
1214 | |||
1215 | #ifdef DEBUG_LARGE_SG_ENTRIES | ||
1216 | dump_run_sg = 0; | ||
1217 | #endif | ||
1218 | return(n_mappings); | ||
1219 | } | ||
1220 | |||
1221 | |||
1222 | /* | ||
1223 | ** Two address ranges are DMA contiguous *iff* "end of prev" and | ||
1224 | ** "start of next" are both on an IOV page boundary. | ||
1225 | ** | ||
1226 | ** (shift left is a quick trick to mask off upper bits) | ||
1227 | */ | ||
1228 | #define DMA_CONTIG(__X, __Y) \ | ||
1229 | (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL) | ||
1230 | |||
1231 | |||
1232 | /** | ||
1233 | * sba_coalesce_chunks - preprocess the SG list | ||
1234 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
1235 | * @startsg: list of IOVA/size pairs | ||
1236 | * @nents: number of entries in startsg list | ||
1237 | * | ||
1238 | * First pass is to walk the SG list and determine where the breaks are | ||
1239 | * in the DMA stream. Allocates PDIR entries but does not fill them. | ||
1240 | * Returns the number of DMA chunks. | ||
1241 | * | ||
1242 | * Doing the fill separate from the coalescing/allocation keeps the | ||
1243 | * code simpler. Future enhancement could make one pass through | ||
1244 | * the sglist do both. | ||
1245 | */ | ||
1246 | static SBA_INLINE int | ||
1247 | sba_coalesce_chunks( struct ioc *ioc, | ||
1248 | struct scatterlist *startsg, | ||
1249 | int nents) | ||
1250 | { | ||
1251 | struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ | ||
1252 | unsigned long vcontig_len; /* len of VCONTIG chunk */ | ||
1253 | unsigned long vcontig_end; | ||
1254 | struct scatterlist *dma_sg; /* next DMA stream head */ | ||
1255 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | ||
1256 | int n_mappings = 0; | ||
1257 | |||
1258 | while (nents > 0) { | ||
1259 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | ||
1260 | |||
1261 | /* | ||
1262 | ** Prepare for first/next DMA stream | ||
1263 | */ | ||
1264 | dma_sg = vcontig_sg = startsg; | ||
1265 | dma_len = vcontig_len = vcontig_end = startsg->length; | ||
1266 | vcontig_end += vaddr; | ||
1267 | dma_offset = vaddr & ~iovp_mask; | ||
1268 | |||
1269 | /* PARANOID: clear entries */ | ||
1270 | startsg->dma_address = startsg->dma_length = 0; | ||
1271 | |||
1272 | /* | ||
1273 | ** This loop terminates one iteration "early" since | ||
1274 | ** it's always looking one "ahead". | ||
1275 | */ | ||
1276 | while (--nents > 0) { | ||
1277 | unsigned long vaddr; /* tmp */ | ||
1278 | |||
1279 | startsg++; | ||
1280 | |||
1281 | /* PARANOID */ | ||
1282 | startsg->dma_address = startsg->dma_length = 0; | ||
1283 | |||
1284 | /* catch brokenness in SCSI layer */ | ||
1285 | ASSERT(startsg->length <= DMA_CHUNK_SIZE); | ||
1286 | |||
1287 | /* | ||
1288 | ** First make sure current dma stream won't | ||
1289 | ** exceed DMA_CHUNK_SIZE if we coalesce the | ||
1290 | ** next entry. | ||
1291 | */ | ||
1292 | if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask) | ||
1293 | > DMA_CHUNK_SIZE) | ||
1294 | break; | ||
1295 | |||
1296 | /* | ||
1297 | ** Then look for virtually contiguous blocks. | ||
1298 | ** | ||
1299 | ** append the next transaction? | ||
1300 | */ | ||
1301 | vaddr = (unsigned long) sba_sg_address(startsg); | ||
1302 | if (vcontig_end == vaddr) | ||
1303 | { | ||
1304 | vcontig_len += startsg->length; | ||
1305 | vcontig_end += startsg->length; | ||
1306 | dma_len += startsg->length; | ||
1307 | continue; | ||
1308 | } | ||
1309 | |||
1310 | #ifdef DEBUG_LARGE_SG_ENTRIES | ||
1311 | dump_run_sg = (vcontig_len > iovp_size); | ||
1312 | #endif | ||
1313 | |||
1314 | /* | ||
1315 | ** Not virtually contigous. | ||
1316 | ** Terminate prev chunk. | ||
1317 | ** Start a new chunk. | ||
1318 | ** | ||
1319 | ** Once we start a new VCONTIG chunk, dma_offset | ||
1320 | ** can't change. And we need the offset from the first | ||
1321 | ** chunk - not the last one. Ergo Successive chunks | ||
1322 | ** must start on page boundaries and dove tail | ||
1323 | ** with it's predecessor. | ||
1324 | */ | ||
1325 | vcontig_sg->dma_length = vcontig_len; | ||
1326 | |||
1327 | vcontig_sg = startsg; | ||
1328 | vcontig_len = startsg->length; | ||
1329 | |||
1330 | /* | ||
1331 | ** 3) do the entries end/start on page boundaries? | ||
1332 | ** Don't update vcontig_end until we've checked. | ||
1333 | */ | ||
1334 | if (DMA_CONTIG(vcontig_end, vaddr)) | ||
1335 | { | ||
1336 | vcontig_end = vcontig_len + vaddr; | ||
1337 | dma_len += vcontig_len; | ||
1338 | continue; | ||
1339 | } else { | ||
1340 | break; | ||
1341 | } | ||
1342 | } | ||
1343 | |||
1344 | /* | ||
1345 | ** End of DMA Stream | ||
1346 | ** Terminate last VCONTIG block. | ||
1347 | ** Allocate space for DMA stream. | ||
1348 | */ | ||
1349 | vcontig_sg->dma_length = vcontig_len; | ||
1350 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; | ||
1351 | ASSERT(dma_len <= DMA_CHUNK_SIZE); | ||
1352 | dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG | ||
1353 | | (sba_alloc_range(ioc, dma_len) << iovp_shift) | ||
1354 | | dma_offset); | ||
1355 | n_mappings++; | ||
1356 | } | ||
1357 | |||
1358 | return n_mappings; | ||
1359 | } | ||
1360 | |||
1361 | |||
1362 | /** | ||
1363 | * sba_map_sg - map Scatter/Gather list | ||
1364 | * @dev: instance of PCI owned by the driver that's asking. | ||
1365 | * @sglist: array of buffer/length pairs | ||
1366 | * @nents: number of entries in list | ||
1367 | * @dir: R/W or both. | ||
1368 | * | ||
1369 | * See Documentation/DMA-mapping.txt | ||
1370 | */ | ||
1371 | int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir) | ||
1372 | { | ||
1373 | struct ioc *ioc; | ||
1374 | int coalesced, filled = 0; | ||
1375 | #ifdef ASSERT_PDIR_SANITY | ||
1376 | unsigned long flags; | ||
1377 | #endif | ||
1378 | #ifdef ALLOW_IOV_BYPASS_SG | ||
1379 | struct scatterlist *sg; | ||
1380 | #endif | ||
1381 | |||
1382 | DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); | ||
1383 | ioc = GET_IOC(dev); | ||
1384 | ASSERT(ioc); | ||
1385 | |||
1386 | #ifdef ALLOW_IOV_BYPASS_SG | ||
1387 | ASSERT(to_pci_dev(dev)->dma_mask); | ||
1388 | if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { | ||
1389 | for (sg = sglist ; filled < nents ; filled++, sg++){ | ||
1390 | sg->dma_length = sg->length; | ||
1391 | sg->dma_address = virt_to_phys(sba_sg_address(sg)); | ||
1392 | } | ||
1393 | return filled; | ||
1394 | } | ||
1395 | #endif | ||
1396 | /* Fast path single entry scatterlists. */ | ||
1397 | if (nents == 1) { | ||
1398 | sglist->dma_length = sglist->length; | ||
1399 | sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir); | ||
1400 | return 1; | ||
1401 | } | ||
1402 | |||
1403 | #ifdef ASSERT_PDIR_SANITY | ||
1404 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1405 | if (sba_check_pdir(ioc,"Check before sba_map_sg()")) | ||
1406 | { | ||
1407 | sba_dump_sg(ioc, sglist, nents); | ||
1408 | panic("Check before sba_map_sg()"); | ||
1409 | } | ||
1410 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1411 | #endif | ||
1412 | |||
1413 | prefetch(ioc->res_hint); | ||
1414 | |||
1415 | /* | ||
1416 | ** First coalesce the chunks and allocate I/O pdir space | ||
1417 | ** | ||
1418 | ** If this is one DMA stream, we can properly map using the | ||
1419 | ** correct virtual address associated with each DMA page. | ||
1420 | ** w/o this association, we wouldn't have coherent DMA! | ||
1421 | ** Access to the virtual address is what forces a two pass algorithm. | ||
1422 | */ | ||
1423 | coalesced = sba_coalesce_chunks(ioc, sglist, nents); | ||
1424 | |||
1425 | /* | ||
1426 | ** Program the I/O Pdir | ||
1427 | ** | ||
1428 | ** map the virtual addresses to the I/O Pdir | ||
1429 | ** o dma_address will contain the pdir index | ||
1430 | ** o dma_len will contain the number of bytes to map | ||
1431 | ** o address contains the virtual address. | ||
1432 | */ | ||
1433 | filled = sba_fill_pdir(ioc, sglist, nents); | ||
1434 | |||
1435 | #ifdef ASSERT_PDIR_SANITY | ||
1436 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1437 | if (sba_check_pdir(ioc,"Check after sba_map_sg()")) | ||
1438 | { | ||
1439 | sba_dump_sg(ioc, sglist, nents); | ||
1440 | panic("Check after sba_map_sg()\n"); | ||
1441 | } | ||
1442 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1443 | #endif | ||
1444 | |||
1445 | ASSERT(coalesced == filled); | ||
1446 | DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); | ||
1447 | |||
1448 | return filled; | ||
1449 | } | ||
1450 | |||
1451 | |||
1452 | /** | ||
1453 | * sba_unmap_sg - unmap Scatter/Gather list | ||
1454 | * @dev: instance of PCI owned by the driver that's asking. | ||
1455 | * @sglist: array of buffer/length pairs | ||
1456 | * @nents: number of entries in list | ||
1457 | * @dir: R/W or both. | ||
1458 | * | ||
1459 | * See Documentation/DMA-mapping.txt | ||
1460 | */ | ||
1461 | void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | ||
1462 | { | ||
1463 | #ifdef ASSERT_PDIR_SANITY | ||
1464 | struct ioc *ioc; | ||
1465 | unsigned long flags; | ||
1466 | #endif | ||
1467 | |||
1468 | DBG_RUN_SG("%s() START %d entries, %p,%x\n", | ||
1469 | __FUNCTION__, nents, sba_sg_address(sglist), sglist->length); | ||
1470 | |||
1471 | #ifdef ASSERT_PDIR_SANITY | ||
1472 | ioc = GET_IOC(dev); | ||
1473 | ASSERT(ioc); | ||
1474 | |||
1475 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1476 | sba_check_pdir(ioc,"Check before sba_unmap_sg()"); | ||
1477 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1478 | #endif | ||
1479 | |||
1480 | while (nents && sglist->dma_length) { | ||
1481 | |||
1482 | sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); | ||
1483 | sglist++; | ||
1484 | nents--; | ||
1485 | } | ||
1486 | |||
1487 | DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); | ||
1488 | |||
1489 | #ifdef ASSERT_PDIR_SANITY | ||
1490 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1491 | sba_check_pdir(ioc,"Check after sba_unmap_sg()"); | ||
1492 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1493 | #endif | ||
1494 | |||
1495 | } | ||
1496 | |||
1497 | /************************************************************** | ||
1498 | * | ||
1499 | * Initialization and claim | ||
1500 | * | ||
1501 | ***************************************************************/ | ||
1502 | |||
1503 | static void __init | ||
1504 | ioc_iova_init(struct ioc *ioc) | ||
1505 | { | ||
1506 | int tcnfg; | ||
1507 | int agp_found = 0; | ||
1508 | struct pci_dev *device = NULL; | ||
1509 | #ifdef FULL_VALID_PDIR | ||
1510 | unsigned long index; | ||
1511 | #endif | ||
1512 | |||
1513 | /* | ||
1514 | ** Firmware programs the base and size of a "safe IOVA space" | ||
1515 | ** (one that doesn't overlap memory or LMMIO space) in the | ||
1516 | ** IBASE and IMASK registers. | ||
1517 | */ | ||
1518 | ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL; | ||
1519 | ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL; | ||
1520 | |||
1521 | ioc->iov_size = ~ioc->imask + 1; | ||
1522 | |||
1523 | DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", | ||
1524 | __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask, | ||
1525 | ioc->iov_size >> 20); | ||
1526 | |||
1527 | switch (iovp_size) { | ||
1528 | case 4*1024: tcnfg = 0; break; | ||
1529 | case 8*1024: tcnfg = 1; break; | ||
1530 | case 16*1024: tcnfg = 2; break; | ||
1531 | case 64*1024: tcnfg = 3; break; | ||
1532 | default: | ||
1533 | panic(PFX "Unsupported IOTLB page size %ldK", | ||
1534 | iovp_size >> 10); | ||
1535 | break; | ||
1536 | } | ||
1537 | WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); | ||
1538 | |||
1539 | ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE; | ||
1540 | ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, | ||
1541 | get_order(ioc->pdir_size)); | ||
1542 | if (!ioc->pdir_base) | ||
1543 | panic(PFX "Couldn't allocate I/O Page Table\n"); | ||
1544 | |||
1545 | memset(ioc->pdir_base, 0, ioc->pdir_size); | ||
1546 | |||
1547 | DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__, | ||
1548 | iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); | ||
1549 | |||
1550 | ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); | ||
1551 | WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); | ||
1552 | |||
1553 | /* | ||
1554 | ** If an AGP device is present, only use half of the IOV space | ||
1555 | ** for PCI DMA. Unfortunately we can't know ahead of time | ||
1556 | ** whether GART support will actually be used, for now we | ||
1557 | ** can just key on an AGP device found in the system. | ||
1558 | ** We program the next pdir index after we stop w/ a key for | ||
1559 | ** the GART code to handshake on. | ||
1560 | */ | ||
1561 | for_each_pci_dev(device) | ||
1562 | agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); | ||
1563 | |||
1564 | if (agp_found && reserve_sba_gart) { | ||
1565 | printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n", | ||
1566 | ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2); | ||
1567 | ioc->pdir_size /= 2; | ||
1568 | ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE; | ||
1569 | } | ||
1570 | #ifdef FULL_VALID_PDIR | ||
1571 | /* | ||
1572 | ** Check to see if the spill page has been allocated, we don't need more than | ||
1573 | ** one across multiple SBAs. | ||
1574 | */ | ||
1575 | if (!prefetch_spill_page) { | ||
1576 | char *spill_poison = "SBAIOMMU POISON"; | ||
1577 | int poison_size = 16; | ||
1578 | void *poison_addr, *addr; | ||
1579 | |||
1580 | addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size)); | ||
1581 | if (!addr) | ||
1582 | panic(PFX "Couldn't allocate PDIR spill page\n"); | ||
1583 | |||
1584 | poison_addr = addr; | ||
1585 | for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size) | ||
1586 | memcpy(poison_addr, spill_poison, poison_size); | ||
1587 | |||
1588 | prefetch_spill_page = virt_to_phys(addr); | ||
1589 | |||
1590 | DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); | ||
1591 | } | ||
1592 | /* | ||
1593 | ** Set all the PDIR entries valid w/ the spill page as the target | ||
1594 | */ | ||
1595 | for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++) | ||
1596 | ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page); | ||
1597 | #endif | ||
1598 | |||
1599 | /* Clear I/O TLB of any possible entries */ | ||
1600 | WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM); | ||
1601 | READ_REG(ioc->ioc_hpa + IOC_PCOM); | ||
1602 | |||
1603 | /* Enable IOVA translation */ | ||
1604 | WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); | ||
1605 | READ_REG(ioc->ioc_hpa + IOC_IBASE); | ||
1606 | } | ||
1607 | |||
1608 | static void __init | ||
1609 | ioc_resource_init(struct ioc *ioc) | ||
1610 | { | ||
1611 | spin_lock_init(&ioc->res_lock); | ||
1612 | #if DELAYED_RESOURCE_CNT > 0 | ||
1613 | spin_lock_init(&ioc->saved_lock); | ||
1614 | #endif | ||
1615 | |||
1616 | /* resource map size dictated by pdir_size */ | ||
1617 | ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ | ||
1618 | ioc->res_size >>= 3; /* convert bit count to byte count */ | ||
1619 | DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); | ||
1620 | |||
1621 | ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, | ||
1622 | get_order(ioc->res_size)); | ||
1623 | if (!ioc->res_map) | ||
1624 | panic(PFX "Couldn't allocate resource map\n"); | ||
1625 | |||
1626 | memset(ioc->res_map, 0, ioc->res_size); | ||
1627 | /* next available IOVP - circular search */ | ||
1628 | ioc->res_hint = (unsigned long *) ioc->res_map; | ||
1629 | |||
1630 | #ifdef ASSERT_PDIR_SANITY | ||
1631 | /* Mark first bit busy - ie no IOVA 0 */ | ||
1632 | ioc->res_map[0] = 0x1; | ||
1633 | ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE; | ||
1634 | #endif | ||
1635 | #ifdef FULL_VALID_PDIR | ||
1636 | /* Mark the last resource used so we don't prefetch beyond IOVA space */ | ||
1637 | ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */ | ||
1638 | ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF | ||
1639 | | prefetch_spill_page); | ||
1640 | #endif | ||
1641 | |||
1642 | DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, | ||
1643 | ioc->res_size, (void *) ioc->res_map); | ||
1644 | } | ||
1645 | |||
1646 | static void __init | ||
1647 | ioc_sac_init(struct ioc *ioc) | ||
1648 | { | ||
1649 | struct pci_dev *sac = NULL; | ||
1650 | struct pci_controller *controller = NULL; | ||
1651 | |||
1652 | /* | ||
1653 | * pci_alloc_coherent() must return a DMA address which is | ||
1654 | * SAC (single address cycle) addressable, so allocate a | ||
1655 | * pseudo-device to enforce that. | ||
1656 | */ | ||
1657 | sac = kmalloc(sizeof(*sac), GFP_KERNEL); | ||
1658 | if (!sac) | ||
1659 | panic(PFX "Couldn't allocate struct pci_dev"); | ||
1660 | memset(sac, 0, sizeof(*sac)); | ||
1661 | |||
1662 | controller = kmalloc(sizeof(*controller), GFP_KERNEL); | ||
1663 | if (!controller) | ||
1664 | panic(PFX "Couldn't allocate struct pci_controller"); | ||
1665 | memset(controller, 0, sizeof(*controller)); | ||
1666 | |||
1667 | controller->iommu = ioc; | ||
1668 | sac->sysdata = controller; | ||
1669 | sac->dma_mask = 0xFFFFFFFFUL; | ||
1670 | #ifdef CONFIG_PCI | ||
1671 | sac->dev.bus = &pci_bus_type; | ||
1672 | #endif | ||
1673 | ioc->sac_only_dev = sac; | ||
1674 | } | ||
1675 | |||
1676 | static void __init | ||
1677 | ioc_zx1_init(struct ioc *ioc) | ||
1678 | { | ||
1679 | unsigned long rope_config; | ||
1680 | unsigned int i; | ||
1681 | |||
1682 | if (ioc->rev < 0x20) | ||
1683 | panic(PFX "IOC 2.0 or later required for IOMMU support\n"); | ||
1684 | |||
1685 | /* 38 bit memory controller + extra bit for range displaced by MMIO */ | ||
1686 | ioc->dma_mask = (0x1UL << 39) - 1; | ||
1687 | |||
1688 | /* | ||
1689 | ** Clear ROPE(N)_CONFIG AO bit. | ||
1690 | ** Disables "NT Ordering" (~= !"Relaxed Ordering") | ||
1691 | ** Overrides bit 1 in DMA Hint Sets. | ||
1692 | ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701. | ||
1693 | */ | ||
1694 | for (i=0; i<(8*8); i+=8) { | ||
1695 | rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i); | ||
1696 | rope_config &= ~IOC_ROPE_AO; | ||
1697 | WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i); | ||
1698 | } | ||
1699 | } | ||
1700 | |||
1701 | typedef void (initfunc)(struct ioc *); | ||
1702 | |||
1703 | struct ioc_iommu { | ||
1704 | u32 func_id; | ||
1705 | char *name; | ||
1706 | initfunc *init; | ||
1707 | }; | ||
1708 | |||
1709 | static struct ioc_iommu ioc_iommu_info[] __initdata = { | ||
1710 | { ZX1_IOC_ID, "zx1", ioc_zx1_init }, | ||
1711 | { ZX2_IOC_ID, "zx2", NULL }, | ||
1712 | { SX1000_IOC_ID, "sx1000", NULL }, | ||
1713 | }; | ||
1714 | |||
1715 | static struct ioc * __init | ||
1716 | ioc_init(u64 hpa, void *handle) | ||
1717 | { | ||
1718 | struct ioc *ioc; | ||
1719 | struct ioc_iommu *info; | ||
1720 | |||
1721 | ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); | ||
1722 | if (!ioc) | ||
1723 | return NULL; | ||
1724 | |||
1725 | memset(ioc, 0, sizeof(*ioc)); | ||
1726 | |||
1727 | ioc->next = ioc_list; | ||
1728 | ioc_list = ioc; | ||
1729 | |||
1730 | ioc->handle = handle; | ||
1731 | ioc->ioc_hpa = ioremap(hpa, 0x1000); | ||
1732 | |||
1733 | ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); | ||
1734 | ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL; | ||
1735 | ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */ | ||
1736 | |||
1737 | for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) { | ||
1738 | if (ioc->func_id == info->func_id) { | ||
1739 | ioc->name = info->name; | ||
1740 | if (info->init) | ||
1741 | (info->init)(ioc); | ||
1742 | } | ||
1743 | } | ||
1744 | |||
1745 | iovp_size = (1 << iovp_shift); | ||
1746 | iovp_mask = ~(iovp_size - 1); | ||
1747 | |||
1748 | DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__, | ||
1749 | PAGE_SIZE >> 10, iovp_size >> 10); | ||
1750 | |||
1751 | if (!ioc->name) { | ||
1752 | ioc->name = kmalloc(24, GFP_KERNEL); | ||
1753 | if (ioc->name) | ||
1754 | sprintf((char *) ioc->name, "Unknown (%04x:%04x)", | ||
1755 | ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF); | ||
1756 | else | ||
1757 | ioc->name = "Unknown"; | ||
1758 | } | ||
1759 | |||
1760 | ioc_iova_init(ioc); | ||
1761 | ioc_resource_init(ioc); | ||
1762 | ioc_sac_init(ioc); | ||
1763 | |||
1764 | if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask) | ||
1765 | ia64_max_iommu_merge_mask = ~iovp_mask; | ||
1766 | |||
1767 | printk(KERN_INFO PFX | ||
1768 | "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", | ||
1769 | ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, | ||
1770 | hpa, ioc->iov_size >> 20, ioc->ibase); | ||
1771 | |||
1772 | return ioc; | ||
1773 | } | ||
1774 | |||
1775 | |||
1776 | |||
1777 | /************************************************************************** | ||
1778 | ** | ||
1779 | ** SBA initialization code (HW and SW) | ||
1780 | ** | ||
1781 | ** o identify SBA chip itself | ||
1782 | ** o FIXME: initialize DMA hints for reasonable defaults | ||
1783 | ** | ||
1784 | **************************************************************************/ | ||
1785 | |||
1786 | #ifdef CONFIG_PROC_FS | ||
1787 | static void * | ||
1788 | ioc_start(struct seq_file *s, loff_t *pos) | ||
1789 | { | ||
1790 | struct ioc *ioc; | ||
1791 | loff_t n = *pos; | ||
1792 | |||
1793 | for (ioc = ioc_list; ioc; ioc = ioc->next) | ||
1794 | if (!n--) | ||
1795 | return ioc; | ||
1796 | |||
1797 | return NULL; | ||
1798 | } | ||
1799 | |||
1800 | static void * | ||
1801 | ioc_next(struct seq_file *s, void *v, loff_t *pos) | ||
1802 | { | ||
1803 | struct ioc *ioc = v; | ||
1804 | |||
1805 | ++*pos; | ||
1806 | return ioc->next; | ||
1807 | } | ||
1808 | |||
1809 | static void | ||
1810 | ioc_stop(struct seq_file *s, void *v) | ||
1811 | { | ||
1812 | } | ||
1813 | |||
1814 | static int | ||
1815 | ioc_show(struct seq_file *s, void *v) | ||
1816 | { | ||
1817 | struct ioc *ioc = v; | ||
1818 | unsigned long *res_ptr = (unsigned long *)ioc->res_map; | ||
1819 | int i, used = 0; | ||
1820 | |||
1821 | seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n", | ||
1822 | ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF)); | ||
1823 | #ifdef CONFIG_NUMA | ||
1824 | if (ioc->node != MAX_NUMNODES) | ||
1825 | seq_printf(s, "NUMA node : %d\n", ioc->node); | ||
1826 | #endif | ||
1827 | seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024)); | ||
1828 | seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024); | ||
1829 | |||
1830 | for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr) | ||
1831 | used += hweight64(*res_ptr); | ||
1832 | |||
1833 | seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3); | ||
1834 | seq_printf(s, "PDIR used : %d entries\n", used); | ||
1835 | |||
1836 | #ifdef PDIR_SEARCH_TIMING | ||
1837 | { | ||
1838 | unsigned long i = 0, avg = 0, min, max; | ||
1839 | min = max = ioc->avg_search[0]; | ||
1840 | for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { | ||
1841 | avg += ioc->avg_search[i]; | ||
1842 | if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; | ||
1843 | if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; | ||
1844 | } | ||
1845 | avg /= SBA_SEARCH_SAMPLE; | ||
1846 | seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n", | ||
1847 | min, avg, max); | ||
1848 | } | ||
1849 | #endif | ||
1850 | #ifndef ALLOW_IOV_BYPASS | ||
1851 | seq_printf(s, "IOVA bypass disabled\n"); | ||
1852 | #endif | ||
1853 | return 0; | ||
1854 | } | ||
1855 | |||
1856 | static struct seq_operations ioc_seq_ops = { | ||
1857 | .start = ioc_start, | ||
1858 | .next = ioc_next, | ||
1859 | .stop = ioc_stop, | ||
1860 | .show = ioc_show | ||
1861 | }; | ||
1862 | |||
1863 | static int | ||
1864 | ioc_open(struct inode *inode, struct file *file) | ||
1865 | { | ||
1866 | return seq_open(file, &ioc_seq_ops); | ||
1867 | } | ||
1868 | |||
1869 | static struct file_operations ioc_fops = { | ||
1870 | .open = ioc_open, | ||
1871 | .read = seq_read, | ||
1872 | .llseek = seq_lseek, | ||
1873 | .release = seq_release | ||
1874 | }; | ||
1875 | |||
1876 | static void __init | ||
1877 | ioc_proc_init(void) | ||
1878 | { | ||
1879 | struct proc_dir_entry *dir, *entry; | ||
1880 | |||
1881 | dir = proc_mkdir("bus/mckinley", NULL); | ||
1882 | if (!dir) | ||
1883 | return; | ||
1884 | |||
1885 | entry = create_proc_entry(ioc_list->name, 0, dir); | ||
1886 | if (entry) | ||
1887 | entry->proc_fops = &ioc_fops; | ||
1888 | } | ||
1889 | #endif | ||
1890 | |||
1891 | static void | ||
1892 | sba_connect_bus(struct pci_bus *bus) | ||
1893 | { | ||
1894 | acpi_handle handle, parent; | ||
1895 | acpi_status status; | ||
1896 | struct ioc *ioc; | ||
1897 | |||
1898 | if (!PCI_CONTROLLER(bus)) | ||
1899 | panic(PFX "no sysdata on bus %d!\n", bus->number); | ||
1900 | |||
1901 | if (PCI_CONTROLLER(bus)->iommu) | ||
1902 | return; | ||
1903 | |||
1904 | handle = PCI_CONTROLLER(bus)->acpi_handle; | ||
1905 | if (!handle) | ||
1906 | return; | ||
1907 | |||
1908 | /* | ||
1909 | * The IOC scope encloses PCI root bridges in the ACPI | ||
1910 | * namespace, so work our way out until we find an IOC we | ||
1911 | * claimed previously. | ||
1912 | */ | ||
1913 | do { | ||
1914 | for (ioc = ioc_list; ioc; ioc = ioc->next) | ||
1915 | if (ioc->handle == handle) { | ||
1916 | PCI_CONTROLLER(bus)->iommu = ioc; | ||
1917 | return; | ||
1918 | } | ||
1919 | |||
1920 | status = acpi_get_parent(handle, &parent); | ||
1921 | handle = parent; | ||
1922 | } while (ACPI_SUCCESS(status)); | ||
1923 | |||
1924 | printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number); | ||
1925 | } | ||
1926 | |||
1927 | #ifdef CONFIG_NUMA | ||
1928 | static void __init | ||
1929 | sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) | ||
1930 | { | ||
1931 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
1932 | union acpi_object *obj; | ||
1933 | acpi_handle phandle; | ||
1934 | unsigned int node; | ||
1935 | |||
1936 | ioc->node = MAX_NUMNODES; | ||
1937 | |||
1938 | /* | ||
1939 | * Check for a _PXM on this node first. We don't typically see | ||
1940 | * one here, so we'll end up getting it from the parent. | ||
1941 | */ | ||
1942 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) { | ||
1943 | if (ACPI_FAILURE(acpi_get_parent(handle, &phandle))) | ||
1944 | return; | ||
1945 | |||
1946 | /* Reset the acpi buffer */ | ||
1947 | buffer.length = ACPI_ALLOCATE_BUFFER; | ||
1948 | buffer.pointer = NULL; | ||
1949 | |||
1950 | if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL, | ||
1951 | &buffer))) | ||
1952 | return; | ||
1953 | } | ||
1954 | |||
1955 | if (!buffer.length || !buffer.pointer) | ||
1956 | return; | ||
1957 | |||
1958 | obj = buffer.pointer; | ||
1959 | |||
1960 | if (obj->type != ACPI_TYPE_INTEGER || | ||
1961 | obj->integer.value >= MAX_PXM_DOMAINS) { | ||
1962 | acpi_os_free(buffer.pointer); | ||
1963 | return; | ||
1964 | } | ||
1965 | |||
1966 | node = pxm_to_nid_map[obj->integer.value]; | ||
1967 | acpi_os_free(buffer.pointer); | ||
1968 | |||
1969 | if (node >= MAX_NUMNODES || !node_online(node)) | ||
1970 | return; | ||
1971 | |||
1972 | ioc->node = node; | ||
1973 | return; | ||
1974 | } | ||
1975 | #else | ||
1976 | #define sba_map_ioc_to_node(ioc, handle) | ||
1977 | #endif | ||
1978 | |||
1979 | static int __init | ||
1980 | acpi_sba_ioc_add(struct acpi_device *device) | ||
1981 | { | ||
1982 | struct ioc *ioc; | ||
1983 | acpi_status status; | ||
1984 | u64 hpa, length; | ||
1985 | struct acpi_buffer buffer; | ||
1986 | struct acpi_device_info *dev_info; | ||
1987 | |||
1988 | status = hp_acpi_csr_space(device->handle, &hpa, &length); | ||
1989 | if (ACPI_FAILURE(status)) | ||
1990 | return 1; | ||
1991 | |||
1992 | buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; | ||
1993 | status = acpi_get_object_info(device->handle, &buffer); | ||
1994 | if (ACPI_FAILURE(status)) | ||
1995 | return 1; | ||
1996 | dev_info = buffer.pointer; | ||
1997 | |||
1998 | /* | ||
1999 | * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI | ||
2000 | * root bridges, and its CSR space includes the IOC function. | ||
2001 | */ | ||
2002 | if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) { | ||
2003 | hpa += ZX1_IOC_OFFSET; | ||
2004 | /* zx1 based systems default to kernel page size iommu pages */ | ||
2005 | if (!iovp_shift) | ||
2006 | iovp_shift = min(PAGE_SHIFT, 16); | ||
2007 | } | ||
2008 | ACPI_MEM_FREE(dev_info); | ||
2009 | |||
2010 | /* | ||
2011 | * default anything not caught above or specified on cmdline to 4k | ||
2012 | * iommu page size | ||
2013 | */ | ||
2014 | if (!iovp_shift) | ||
2015 | iovp_shift = 12; | ||
2016 | |||
2017 | ioc = ioc_init(hpa, device->handle); | ||
2018 | if (!ioc) | ||
2019 | return 1; | ||
2020 | |||
2021 | /* setup NUMA node association */ | ||
2022 | sba_map_ioc_to_node(ioc, device->handle); | ||
2023 | return 0; | ||
2024 | } | ||
2025 | |||
2026 | static struct acpi_driver acpi_sba_ioc_driver = { | ||
2027 | .name = "IOC IOMMU Driver", | ||
2028 | .ids = "HWP0001,HWP0004", | ||
2029 | .ops = { | ||
2030 | .add = acpi_sba_ioc_add, | ||
2031 | }, | ||
2032 | }; | ||
2033 | |||
2034 | static int __init | ||
2035 | sba_init(void) | ||
2036 | { | ||
2037 | acpi_bus_register_driver(&acpi_sba_ioc_driver); | ||
2038 | if (!ioc_list) | ||
2039 | return 0; | ||
2040 | |||
2041 | #ifdef CONFIG_PCI | ||
2042 | { | ||
2043 | struct pci_bus *b = NULL; | ||
2044 | while ((b = pci_find_next_bus(b)) != NULL) | ||
2045 | sba_connect_bus(b); | ||
2046 | } | ||
2047 | #endif | ||
2048 | |||
2049 | #ifdef CONFIG_PROC_FS | ||
2050 | ioc_proc_init(); | ||
2051 | #endif | ||
2052 | return 0; | ||
2053 | } | ||
2054 | |||
2055 | subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ | ||
2056 | |||
2057 | extern void dig_setup(char**); | ||
2058 | /* | ||
2059 | * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good, | ||
2060 | * so we use the platform_setup hook to fix it up. | ||
2061 | */ | ||
2062 | void __init | ||
2063 | sba_setup(char **cmdline_p) | ||
2064 | { | ||
2065 | MAX_DMA_ADDRESS = ~0UL; | ||
2066 | dig_setup(cmdline_p); | ||
2067 | } | ||
2068 | |||
2069 | static int __init | ||
2070 | nosbagart(char *str) | ||
2071 | { | ||
2072 | reserve_sba_gart = 0; | ||
2073 | return 1; | ||
2074 | } | ||
2075 | |||
2076 | int | ||
2077 | sba_dma_supported (struct device *dev, u64 mask) | ||
2078 | { | ||
2079 | /* make sure it's at least 32bit capable */ | ||
2080 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); | ||
2081 | } | ||
2082 | |||
2083 | int | ||
2084 | sba_dma_mapping_error (dma_addr_t dma_addr) | ||
2085 | { | ||
2086 | return 0; | ||
2087 | } | ||
2088 | |||
2089 | __setup("nosbagart", nosbagart); | ||
2090 | |||
2091 | static int __init | ||
2092 | sba_page_override(char *str) | ||
2093 | { | ||
2094 | unsigned long page_size; | ||
2095 | |||
2096 | page_size = memparse(str, &str); | ||
2097 | switch (page_size) { | ||
2098 | case 4096: | ||
2099 | case 8192: | ||
2100 | case 16384: | ||
2101 | case 65536: | ||
2102 | iovp_shift = ffs(page_size) - 1; | ||
2103 | break; | ||
2104 | default: | ||
2105 | printk("%s: unknown/unsupported iommu page size %ld\n", | ||
2106 | __FUNCTION__, page_size); | ||
2107 | } | ||
2108 | |||
2109 | return 1; | ||
2110 | } | ||
2111 | |||
2112 | __setup("sbapagesize=",sba_page_override); | ||
2113 | |||
2114 | EXPORT_SYMBOL(sba_dma_mapping_error); | ||
2115 | EXPORT_SYMBOL(sba_map_single); | ||
2116 | EXPORT_SYMBOL(sba_unmap_single); | ||
2117 | EXPORT_SYMBOL(sba_map_sg); | ||
2118 | EXPORT_SYMBOL(sba_unmap_sg); | ||
2119 | EXPORT_SYMBOL(sba_dma_supported); | ||
2120 | EXPORT_SYMBOL(sba_alloc_coherent); | ||
2121 | EXPORT_SYMBOL(sba_free_coherent); | ||
diff --git a/arch/ia64/hp/sim/Kconfig b/arch/ia64/hp/sim/Kconfig new file mode 100644 index 000000000000..18ccb1266e18 --- /dev/null +++ b/arch/ia64/hp/sim/Kconfig | |||
@@ -0,0 +1,20 @@ | |||
1 | |||
2 | menu "HP Simulator drivers" | ||
3 | depends on IA64_HP_SIM || IA64_GENERIC | ||
4 | |||
5 | config HP_SIMETH | ||
6 | bool "Simulated Ethernet " | ||
7 | |||
8 | config HP_SIMSERIAL | ||
9 | bool "Simulated serial driver support" | ||
10 | |||
11 | config HP_SIMSERIAL_CONSOLE | ||
12 | bool "Console for HP simulator" | ||
13 | depends on HP_SIMSERIAL | ||
14 | |||
15 | config HP_SIMSCSI | ||
16 | tristate "Simulated SCSI disk" | ||
17 | depends on SCSI | ||
18 | |||
19 | endmenu | ||
20 | |||
diff --git a/arch/ia64/hp/sim/Makefile b/arch/ia64/hp/sim/Makefile new file mode 100644 index 000000000000..d10da47931d7 --- /dev/null +++ b/arch/ia64/hp/sim/Makefile | |||
@@ -0,0 +1,16 @@ | |||
1 | # | ||
2 | # ia64/platform/hp/sim/Makefile | ||
3 | # | ||
4 | # Copyright (C) 2002 Hewlett-Packard Co. | ||
5 | # David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | # Copyright (C) 1999 Silicon Graphics, Inc. | ||
7 | # Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) | ||
8 | # | ||
9 | |||
10 | obj-y := hpsim_irq.o hpsim_setup.o hpsim.o | ||
11 | obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o | ||
12 | |||
13 | obj-$(CONFIG_HP_SIMETH) += simeth.o | ||
14 | obj-$(CONFIG_HP_SIMSERIAL) += simserial.o | ||
15 | obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o | ||
16 | obj-$(CONFIG_HP_SIMSCSI) += simscsi.o | ||
diff --git a/arch/ia64/hp/sim/boot/Makefile b/arch/ia64/hp/sim/boot/Makefile new file mode 100644 index 000000000000..df6e9968c845 --- /dev/null +++ b/arch/ia64/hp/sim/boot/Makefile | |||
@@ -0,0 +1,37 @@ | |||
1 | # | ||
2 | # ia64/boot/Makefile | ||
3 | # | ||
4 | # This file is subject to the terms and conditions of the GNU General Public | ||
5 | # License. See the file "COPYING" in the main directory of this archive | ||
6 | # for more details. | ||
7 | # | ||
8 | # Copyright (C) 1998, 2003 by David Mosberger-Tang <davidm@hpl.hp.com> | ||
9 | # | ||
10 | |||
11 | targets-$(CONFIG_IA64_HP_SIM) += bootloader | ||
12 | targets := vmlinux.bin vmlinux.gz $(targets-y) | ||
13 | |||
14 | quiet_cmd_cptotop = LN $@ | ||
15 | cmd_cptotop = ln -f $< $@ | ||
16 | |||
17 | vmlinux.gz: $(obj)/vmlinux.gz $(addprefix $(obj)/,$(targets-y)) | ||
18 | $(call cmd,cptotop) | ||
19 | @echo ' Kernel: $@ is ready' | ||
20 | |||
21 | boot: bootloader | ||
22 | |||
23 | bootloader: $(obj)/bootloader | ||
24 | $(call cmd,cptotop) | ||
25 | |||
26 | $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE | ||
27 | $(call if_changed,gzip) | ||
28 | |||
29 | $(obj)/vmlinux.bin: vmlinux FORCE | ||
30 | $(call if_changed,objcopy) | ||
31 | |||
32 | |||
33 | LDFLAGS_bootloader = -static -T | ||
34 | |||
35 | $(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \ | ||
36 | lib/lib.a arch/ia64/lib/lib.a FORCE | ||
37 | $(call if_changed,ld) | ||
diff --git a/arch/ia64/hp/sim/boot/boot_head.S b/arch/ia64/hp/sim/boot/boot_head.S new file mode 100644 index 000000000000..9364199e5632 --- /dev/null +++ b/arch/ia64/hp/sim/boot/boot_head.S | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
3 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
4 | */ | ||
5 | |||
6 | #include <asm/asmmacro.h> | ||
7 | |||
8 | .bss | ||
9 | .align 16 | ||
10 | stack_mem: | ||
11 | .skip 16834 | ||
12 | |||
13 | .text | ||
14 | |||
15 | /* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */ | ||
16 | GLOBAL_ENTRY(printk) | ||
17 | break 0 | ||
18 | END(printk) | ||
19 | |||
20 | GLOBAL_ENTRY(_start) | ||
21 | .prologue | ||
22 | .save rp, r0 | ||
23 | .body | ||
24 | movl gp = __gp | ||
25 | movl sp = stack_mem | ||
26 | bsw.1 | ||
27 | br.call.sptk.many rp=start_bootloader | ||
28 | END(_start) | ||
29 | |||
30 | /* | ||
31 | * Set a break point on this function so that symbols are available to set breakpoints in | ||
32 | * the kernel being debugged. | ||
33 | */ | ||
34 | GLOBAL_ENTRY(debug_break) | ||
35 | br.ret.sptk.many b0 | ||
36 | END(debug_break) | ||
37 | |||
38 | GLOBAL_ENTRY(ssc) | ||
39 | .regstk 5,0,0,0 | ||
40 | mov r15=in4 | ||
41 | break 0x80001 | ||
42 | br.ret.sptk.many b0 | ||
43 | END(ssc) | ||
44 | |||
45 | GLOBAL_ENTRY(jmp_to_kernel) | ||
46 | .regstk 2,0,0,0 | ||
47 | mov r28=in0 | ||
48 | mov b7=in1 | ||
49 | br.sptk.few b7 | ||
50 | END(jmp_to_kernel) | ||
51 | |||
52 | |||
53 | GLOBAL_ENTRY(pal_emulator_static) | ||
54 | mov r8=-1 | ||
55 | mov r9=256 | ||
56 | ;; | ||
57 | cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */ | ||
58 | (p6) br.cond.sptk.few static | ||
59 | ;; | ||
60 | mov r9=512 | ||
61 | ;; | ||
62 | cmp.gtu p6,p7=r9,r28 | ||
63 | (p6) br.cond.sptk.few stacked | ||
64 | ;; | ||
65 | static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ | ||
66 | (p7) br.cond.sptk.few 1f | ||
67 | ;; | ||
68 | mov r8=0 /* status = 0 */ | ||
69 | movl r9=0x100000000 /* tc.base */ | ||
70 | movl r10=0x0000000200000003 /* count[0], count[1] */ | ||
71 | movl r11=0x1000000000002000 /* stride[0], stride[1] */ | ||
72 | br.cond.sptk.few rp | ||
73 | 1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */ | ||
74 | (p7) br.cond.sptk.few 1f | ||
75 | mov r8=0 /* status = 0 */ | ||
76 | movl r9 =0x100000064 /* proc_ratio (1/100) */ | ||
77 | movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ | ||
78 | movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ | ||
79 | ;; | ||
80 | 1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */ | ||
81 | (p7) br.cond.sptk.few 1f | ||
82 | mov r8=0 /* status = 0 */ | ||
83 | mov r9=96 /* num phys stacked */ | ||
84 | mov r10=0 /* hints */ | ||
85 | mov r11=0 | ||
86 | br.cond.sptk.few rp | ||
87 | 1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */ | ||
88 | (p7) br.cond.sptk.few 1f | ||
89 | mov r9=ar.lc | ||
90 | movl r8=524288 /* flush 512k million cache lines (16MB) */ | ||
91 | ;; | ||
92 | mov ar.lc=r8 | ||
93 | movl r8=0xe000000000000000 | ||
94 | ;; | ||
95 | .loop: fc r8 | ||
96 | add r8=32,r8 | ||
97 | br.cloop.sptk.few .loop | ||
98 | sync.i | ||
99 | ;; | ||
100 | srlz.i | ||
101 | ;; | ||
102 | mov ar.lc=r9 | ||
103 | mov r8=r0 | ||
104 | ;; | ||
105 | 1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ | ||
106 | (p7) br.cond.sptk.few 1f | ||
107 | mov r8=0 /* status = 0 */ | ||
108 | movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */ | ||
109 | mov r10=0 /* reserved */ | ||
110 | mov r11=0 /* reserved */ | ||
111 | mov r16=0xffff /* implemented PMC */ | ||
112 | mov r17=0x3ffff /* implemented PMD */ | ||
113 | add r18=8,r29 /* second index */ | ||
114 | ;; | ||
115 | st8 [r29]=r16,16 /* store implemented PMC */ | ||
116 | st8 [r18]=r0,16 /* clear remaining bits */ | ||
117 | ;; | ||
118 | st8 [r29]=r0,16 /* clear remaining bits */ | ||
119 | st8 [r18]=r0,16 /* clear remaining bits */ | ||
120 | ;; | ||
121 | st8 [r29]=r17,16 /* store implemented PMD */ | ||
122 | st8 [r18]=r0,16 /* clear remaining bits */ | ||
123 | mov r16=0xf0 /* cycles count capable PMC */ | ||
124 | ;; | ||
125 | st8 [r29]=r0,16 /* clear remaining bits */ | ||
126 | st8 [r18]=r0,16 /* clear remaining bits */ | ||
127 | mov r17=0xf0 /* retired bundles capable PMC */ | ||
128 | ;; | ||
129 | st8 [r29]=r16,16 /* store cycles capable */ | ||
130 | st8 [r18]=r0,16 /* clear remaining bits */ | ||
131 | ;; | ||
132 | st8 [r29]=r0,16 /* clear remaining bits */ | ||
133 | st8 [r18]=r0,16 /* clear remaining bits */ | ||
134 | ;; | ||
135 | st8 [r29]=r17,16 /* store retired bundle capable */ | ||
136 | st8 [r18]=r0,16 /* clear remaining bits */ | ||
137 | ;; | ||
138 | st8 [r29]=r0,16 /* clear remaining bits */ | ||
139 | st8 [r18]=r0,16 /* clear remaining bits */ | ||
140 | ;; | ||
141 | 1: br.cond.sptk.few rp | ||
142 | stacked: | ||
143 | br.ret.sptk.few rp | ||
144 | END(pal_emulator_static) | ||
diff --git a/arch/ia64/hp/sim/boot/bootloader.c b/arch/ia64/hp/sim/boot/bootloader.c new file mode 100644 index 000000000000..51a7b7b4dd0e --- /dev/null +++ b/arch/ia64/hp/sim/boot/bootloader.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * arch/ia64/hp/sim/boot/bootloader.c | ||
3 | * | ||
4 | * Loads an ELF kernel. | ||
5 | * | ||
6 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
8 | * Stephane Eranian <eranian@hpl.hp.com> | ||
9 | * | ||
10 | * 01/07/99 S.Eranian modified to pass command line arguments to kernel | ||
11 | */ | ||
12 | struct task_struct; /* forward declaration for elf.h */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/elf.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | |||
19 | #include <asm/elf.h> | ||
20 | #include <asm/intrinsics.h> | ||
21 | #include <asm/pal.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/sal.h> | ||
24 | #include <asm/system.h> | ||
25 | |||
26 | #include "ssc.h" | ||
27 | |||
28 | struct disk_req { | ||
29 | unsigned long addr; | ||
30 | unsigned len; | ||
31 | }; | ||
32 | |||
33 | struct disk_stat { | ||
34 | int fd; | ||
35 | unsigned count; | ||
36 | }; | ||
37 | |||
38 | extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry); | ||
39 | extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen); | ||
40 | extern void debug_break (void); | ||
41 | |||
42 | static void | ||
43 | cons_write (const char *buf) | ||
44 | { | ||
45 | unsigned long ch; | ||
46 | |||
47 | while ((ch = *buf++) != '\0') { | ||
48 | ssc(ch, 0, 0, 0, SSC_PUTCHAR); | ||
49 | if (ch == '\n') | ||
50 | ssc('\r', 0, 0, 0, SSC_PUTCHAR); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | #define MAX_ARGS 32 | ||
55 | |||
56 | void | ||
57 | start_bootloader (void) | ||
58 | { | ||
59 | static char mem[4096]; | ||
60 | static char buffer[1024]; | ||
61 | unsigned long off; | ||
62 | int fd, i; | ||
63 | struct disk_req req; | ||
64 | struct disk_stat stat; | ||
65 | struct elfhdr *elf; | ||
66 | struct elf_phdr *elf_phdr; /* program header */ | ||
67 | unsigned long e_entry, e_phoff, e_phnum; | ||
68 | register struct ia64_boot_param *bp; | ||
69 | char *kpath, *args; | ||
70 | long arglen = 0; | ||
71 | |||
72 | ssc(0, 0, 0, 0, SSC_CONSOLE_INIT); | ||
73 | |||
74 | /* | ||
75 | * S.Eranian: extract the commandline argument from the simulator | ||
76 | * | ||
77 | * The expected format is as follows: | ||
78 | * | ||
79 | * kernelname args... | ||
80 | * | ||
81 | * Both are optional but you can't have the second one without the first. | ||
82 | */ | ||
83 | arglen = ssc((long) buffer, 0, 0, 0, SSC_GET_ARGS); | ||
84 | |||
85 | kpath = "vmlinux"; | ||
86 | args = buffer; | ||
87 | if (arglen > 0) { | ||
88 | kpath = buffer; | ||
89 | while (*args != ' ' && *args != '\0') | ||
90 | ++args, --arglen; | ||
91 | if (*args == ' ') | ||
92 | *args++ = '\0', --arglen; | ||
93 | } | ||
94 | |||
95 | if (arglen <= 0) { | ||
96 | args = ""; | ||
97 | arglen = 1; | ||
98 | } | ||
99 | |||
100 | fd = ssc((long) kpath, 1, 0, 0, SSC_OPEN); | ||
101 | |||
102 | if (fd < 0) { | ||
103 | cons_write(kpath); | ||
104 | cons_write(": file not found, reboot now\n"); | ||
105 | for(;;); | ||
106 | } | ||
107 | stat.fd = fd; | ||
108 | off = 0; | ||
109 | |||
110 | req.len = sizeof(mem); | ||
111 | req.addr = (long) mem; | ||
112 | ssc(fd, 1, (long) &req, off, SSC_READ); | ||
113 | ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); | ||
114 | |||
115 | elf = (struct elfhdr *) mem; | ||
116 | if (elf->e_ident[0] == 0x7f && strncmp(elf->e_ident + 1, "ELF", 3) != 0) { | ||
117 | cons_write("not an ELF file\n"); | ||
118 | return; | ||
119 | } | ||
120 | if (elf->e_type != ET_EXEC) { | ||
121 | cons_write("not an ELF executable\n"); | ||
122 | return; | ||
123 | } | ||
124 | if (!elf_check_arch(elf)) { | ||
125 | cons_write("kernel not for this processor\n"); | ||
126 | return; | ||
127 | } | ||
128 | |||
129 | e_entry = elf->e_entry; | ||
130 | e_phnum = elf->e_phnum; | ||
131 | e_phoff = elf->e_phoff; | ||
132 | |||
133 | cons_write("loading "); | ||
134 | cons_write(kpath); | ||
135 | cons_write("...\n"); | ||
136 | |||
137 | for (i = 0; i < e_phnum; ++i) { | ||
138 | req.len = sizeof(*elf_phdr); | ||
139 | req.addr = (long) mem; | ||
140 | ssc(fd, 1, (long) &req, e_phoff, SSC_READ); | ||
141 | ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); | ||
142 | if (stat.count != sizeof(*elf_phdr)) { | ||
143 | cons_write("failed to read phdr\n"); | ||
144 | return; | ||
145 | } | ||
146 | e_phoff += sizeof(*elf_phdr); | ||
147 | |||
148 | elf_phdr = (struct elf_phdr *) mem; | ||
149 | |||
150 | if (elf_phdr->p_type != PT_LOAD) | ||
151 | continue; | ||
152 | |||
153 | req.len = elf_phdr->p_filesz; | ||
154 | req.addr = __pa(elf_phdr->p_paddr); | ||
155 | ssc(fd, 1, (long) &req, elf_phdr->p_offset, SSC_READ); | ||
156 | ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); | ||
157 | memset((char *)__pa(elf_phdr->p_paddr) + elf_phdr->p_filesz, 0, | ||
158 | elf_phdr->p_memsz - elf_phdr->p_filesz); | ||
159 | } | ||
160 | ssc(fd, 0, 0, 0, SSC_CLOSE); | ||
161 | |||
162 | cons_write("starting kernel...\n"); | ||
163 | |||
164 | /* fake an I/O base address: */ | ||
165 | ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL); | ||
166 | |||
167 | bp = sys_fw_init(args, arglen); | ||
168 | |||
169 | ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS); | ||
170 | |||
171 | debug_break(); | ||
172 | jmp_to_kernel((unsigned long) bp, e_entry); | ||
173 | |||
174 | cons_write("kernel returned!\n"); | ||
175 | ssc(-1, 0, 0, 0, SSC_EXIT); | ||
176 | } | ||
diff --git a/arch/ia64/hp/sim/boot/bootloader.lds b/arch/ia64/hp/sim/boot/bootloader.lds new file mode 100644 index 000000000000..69ae58531033 --- /dev/null +++ b/arch/ia64/hp/sim/boot/bootloader.lds | |||
@@ -0,0 +1,65 @@ | |||
1 | OUTPUT_FORMAT("elf64-ia64-little") | ||
2 | OUTPUT_ARCH(ia64) | ||
3 | ENTRY(_start) | ||
4 | SECTIONS | ||
5 | { | ||
6 | /* Read-only sections, merged into text segment: */ | ||
7 | . = 0x100000; | ||
8 | |||
9 | _text = .; | ||
10 | .text : { *(__ivt_section) *(.text) } | ||
11 | _etext = .; | ||
12 | |||
13 | /* Global data */ | ||
14 | _data = .; | ||
15 | .rodata : { *(.rodata) *(.rodata.*) } | ||
16 | .data : { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } | ||
17 | __gp = ALIGN (8) + 0x200000; | ||
18 | .got : { *(.got.plt) *(.got) } | ||
19 | /* We want the small data sections together, so single-instruction offsets | ||
20 | can access them all, and initialized data all before uninitialized, so | ||
21 | we can shorten the on-disk segment size. */ | ||
22 | .sdata : { *(.sdata) } | ||
23 | _edata = .; | ||
24 | |||
25 | _bss = .; | ||
26 | .sbss : { *(.sbss) *(.scommon) } | ||
27 | .bss : { *(.bss) *(COMMON) } | ||
28 | . = ALIGN(64 / 8); | ||
29 | _end = . ; | ||
30 | |||
31 | /* Stabs debugging sections. */ | ||
32 | .stab 0 : { *(.stab) } | ||
33 | .stabstr 0 : { *(.stabstr) } | ||
34 | .stab.excl 0 : { *(.stab.excl) } | ||
35 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
36 | .stab.index 0 : { *(.stab.index) } | ||
37 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
38 | .comment 0 : { *(.comment) } | ||
39 | /* DWARF debug sections. | ||
40 | Symbols in the DWARF debugging sections are relative to the beginning | ||
41 | of the section so we begin them at 0. */ | ||
42 | /* DWARF 1 */ | ||
43 | .debug 0 : { *(.debug) } | ||
44 | .line 0 : { *(.line) } | ||
45 | /* GNU DWARF 1 extensions */ | ||
46 | .debug_srcinfo 0 : { *(.debug_srcinfo) } | ||
47 | .debug_sfnames 0 : { *(.debug_sfnames) } | ||
48 | /* DWARF 1.1 and DWARF 2 */ | ||
49 | .debug_aranges 0 : { *(.debug_aranges) } | ||
50 | .debug_pubnames 0 : { *(.debug_pubnames) } | ||
51 | /* DWARF 2 */ | ||
52 | .debug_info 0 : { *(.debug_info) } | ||
53 | .debug_abbrev 0 : { *(.debug_abbrev) } | ||
54 | .debug_line 0 : { *(.debug_line) } | ||
55 | .debug_frame 0 : { *(.debug_frame) } | ||
56 | .debug_str 0 : { *(.debug_str) } | ||
57 | .debug_loc 0 : { *(.debug_loc) } | ||
58 | .debug_macinfo 0 : { *(.debug_macinfo) } | ||
59 | /* SGI/MIPS DWARF 2 extensions */ | ||
60 | .debug_weaknames 0 : { *(.debug_weaknames) } | ||
61 | .debug_funcnames 0 : { *(.debug_funcnames) } | ||
62 | .debug_typenames 0 : { *(.debug_typenames) } | ||
63 | .debug_varnames 0 : { *(.debug_varnames) } | ||
64 | /* These must appear regardless of . */ | ||
65 | } | ||
diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c new file mode 100644 index 000000000000..5c46928e3dc6 --- /dev/null +++ b/arch/ia64/hp/sim/boot/fw-emu.c | |||
@@ -0,0 +1,398 @@ | |||
1 | /* | ||
2 | * PAL & SAL emulation. | ||
3 | * | ||
4 | * Copyright (C) 1998-2001 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | */ | ||
7 | #include <linux/config.h> | ||
8 | |||
9 | #ifdef CONFIG_PCI | ||
10 | # include <linux/pci.h> | ||
11 | #endif | ||
12 | |||
13 | #include <linux/efi.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/pal.h> | ||
16 | #include <asm/sal.h> | ||
17 | |||
18 | #include "ssc.h" | ||
19 | |||
20 | #define MB (1024*1024UL) | ||
21 | |||
22 | #define SIMPLE_MEMMAP 1 | ||
23 | |||
24 | #if SIMPLE_MEMMAP | ||
25 | # define NUM_MEM_DESCS 4 | ||
26 | #else | ||
27 | # define NUM_MEM_DESCS 16 | ||
28 | #endif | ||
29 | |||
30 | static char fw_mem[( sizeof(struct ia64_boot_param) | ||
31 | + sizeof(efi_system_table_t) | ||
32 | + sizeof(efi_runtime_services_t) | ||
33 | + 1*sizeof(efi_config_table_t) | ||
34 | + sizeof(struct ia64_sal_systab) | ||
35 | + sizeof(struct ia64_sal_desc_entry_point) | ||
36 | + NUM_MEM_DESCS*(sizeof(efi_memory_desc_t)) | ||
37 | + 1024)] __attribute__ ((aligned (8))); | ||
38 | |||
39 | #define SECS_PER_HOUR (60 * 60) | ||
40 | #define SECS_PER_DAY (SECS_PER_HOUR * 24) | ||
41 | |||
42 | /* Compute the `struct tm' representation of *T, | ||
43 | offset OFFSET seconds east of UTC, | ||
44 | and store year, yday, mon, mday, wday, hour, min, sec into *TP. | ||
45 | Return nonzero if successful. */ | ||
46 | int | ||
47 | offtime (unsigned long t, efi_time_t *tp) | ||
48 | { | ||
49 | const unsigned short int __mon_yday[2][13] = | ||
50 | { | ||
51 | /* Normal years. */ | ||
52 | { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, | ||
53 | /* Leap years. */ | ||
54 | { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } | ||
55 | }; | ||
56 | long int days, rem, y; | ||
57 | const unsigned short int *ip; | ||
58 | |||
59 | days = t / SECS_PER_DAY; | ||
60 | rem = t % SECS_PER_DAY; | ||
61 | while (rem < 0) { | ||
62 | rem += SECS_PER_DAY; | ||
63 | --days; | ||
64 | } | ||
65 | while (rem >= SECS_PER_DAY) { | ||
66 | rem -= SECS_PER_DAY; | ||
67 | ++days; | ||
68 | } | ||
69 | tp->hour = rem / SECS_PER_HOUR; | ||
70 | rem %= SECS_PER_HOUR; | ||
71 | tp->minute = rem / 60; | ||
72 | tp->second = rem % 60; | ||
73 | /* January 1, 1970 was a Thursday. */ | ||
74 | y = 1970; | ||
75 | |||
76 | # define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) | ||
77 | # define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) | ||
78 | # define __isleap(year) \ | ||
79 | ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) | ||
80 | |||
81 | while (days < 0 || days >= (__isleap (y) ? 366 : 365)) { | ||
82 | /* Guess a corrected year, assuming 365 days per year. */ | ||
83 | long int yg = y + days / 365 - (days % 365 < 0); | ||
84 | |||
85 | /* Adjust DAYS and Y to match the guessed year. */ | ||
86 | days -= ((yg - y) * 365 + LEAPS_THRU_END_OF (yg - 1) | ||
87 | - LEAPS_THRU_END_OF (y - 1)); | ||
88 | y = yg; | ||
89 | } | ||
90 | tp->year = y; | ||
91 | ip = __mon_yday[__isleap(y)]; | ||
92 | for (y = 11; days < (long int) ip[y]; --y) | ||
93 | continue; | ||
94 | days -= ip[y]; | ||
95 | tp->month = y + 1; | ||
96 | tp->day = days + 1; | ||
97 | return 1; | ||
98 | } | ||
99 | |||
100 | extern void pal_emulator_static (void); | ||
101 | |||
102 | /* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ | ||
103 | |||
104 | #define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3) | ||
105 | |||
106 | #define REG_OFFSET(addr) (0x00000000000000FF & (addr)) | ||
107 | #define DEVICE_FUNCTION(addr) (0x000000000000FF00 & (addr)) | ||
108 | #define BUS_NUMBER(addr) (0x0000000000FF0000 & (addr)) | ||
109 | |||
110 | static efi_status_t | ||
111 | fw_efi_get_time (efi_time_t *tm, efi_time_cap_t *tc) | ||
112 | { | ||
113 | #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) | ||
114 | struct { | ||
115 | int tv_sec; /* must be 32bits to work */ | ||
116 | int tv_usec; | ||
117 | } tv32bits; | ||
118 | |||
119 | ssc((unsigned long) &tv32bits, 0, 0, 0, SSC_GET_TOD); | ||
120 | |||
121 | memset(tm, 0, sizeof(*tm)); | ||
122 | offtime(tv32bits.tv_sec, tm); | ||
123 | |||
124 | if (tc) | ||
125 | memset(tc, 0, sizeof(*tc)); | ||
126 | #else | ||
127 | # error Not implemented yet... | ||
128 | #endif | ||
129 | return EFI_SUCCESS; | ||
130 | } | ||
131 | |||
132 | static void | ||
133 | efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data) | ||
134 | { | ||
135 | #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) | ||
136 | ssc(status, 0, 0, 0, SSC_EXIT); | ||
137 | #else | ||
138 | # error Not implemented yet... | ||
139 | #endif | ||
140 | } | ||
141 | |||
142 | static efi_status_t | ||
143 | efi_unimplemented (void) | ||
144 | { | ||
145 | return EFI_UNSUPPORTED; | ||
146 | } | ||
147 | |||
148 | static struct sal_ret_values | ||
149 | sal_emulator (long index, unsigned long in1, unsigned long in2, | ||
150 | unsigned long in3, unsigned long in4, unsigned long in5, | ||
151 | unsigned long in6, unsigned long in7) | ||
152 | { | ||
153 | long r9 = 0; | ||
154 | long r10 = 0; | ||
155 | long r11 = 0; | ||
156 | long status; | ||
157 | |||
158 | /* | ||
159 | * Don't do a "switch" here since that gives us code that | ||
160 | * isn't self-relocatable. | ||
161 | */ | ||
162 | status = 0; | ||
163 | if (index == SAL_FREQ_BASE) { | ||
164 | switch (in1) { | ||
165 | case SAL_FREQ_BASE_PLATFORM: | ||
166 | r9 = 200000000; | ||
167 | break; | ||
168 | |||
169 | case SAL_FREQ_BASE_INTERVAL_TIMER: | ||
170 | /* | ||
171 | * Is this supposed to be the cr.itc frequency | ||
172 | * or something platform specific? The SAL | ||
173 | * doc ain't exactly clear on this... | ||
174 | */ | ||
175 | r9 = 700000000; | ||
176 | break; | ||
177 | |||
178 | case SAL_FREQ_BASE_REALTIME_CLOCK: | ||
179 | r9 = 1; | ||
180 | break; | ||
181 | |||
182 | default: | ||
183 | status = -1; | ||
184 | break; | ||
185 | } | ||
186 | } else if (index == SAL_SET_VECTORS) { | ||
187 | ; | ||
188 | } else if (index == SAL_GET_STATE_INFO) { | ||
189 | ; | ||
190 | } else if (index == SAL_GET_STATE_INFO_SIZE) { | ||
191 | ; | ||
192 | } else if (index == SAL_CLEAR_STATE_INFO) { | ||
193 | ; | ||
194 | } else if (index == SAL_MC_RENDEZ) { | ||
195 | ; | ||
196 | } else if (index == SAL_MC_SET_PARAMS) { | ||
197 | ; | ||
198 | } else if (index == SAL_CACHE_FLUSH) { | ||
199 | ; | ||
200 | } else if (index == SAL_CACHE_INIT) { | ||
201 | ; | ||
202 | #ifdef CONFIG_PCI | ||
203 | } else if (index == SAL_PCI_CONFIG_READ) { | ||
204 | /* | ||
205 | * in1 contains the PCI configuration address and in2 | ||
206 | * the size of the read. The value that is read is | ||
207 | * returned via the general register r9. | ||
208 | */ | ||
209 | outl(BUILD_CMD(in1), 0xCF8); | ||
210 | if (in2 == 1) /* Reading byte */ | ||
211 | r9 = inb(0xCFC + ((REG_OFFSET(in1) & 3))); | ||
212 | else if (in2 == 2) /* Reading word */ | ||
213 | r9 = inw(0xCFC + ((REG_OFFSET(in1) & 2))); | ||
214 | else /* Reading dword */ | ||
215 | r9 = inl(0xCFC); | ||
216 | status = PCIBIOS_SUCCESSFUL; | ||
217 | } else if (index == SAL_PCI_CONFIG_WRITE) { | ||
218 | /* | ||
219 | * in1 contains the PCI configuration address, in2 the | ||
220 | * size of the write, and in3 the actual value to be | ||
221 | * written out. | ||
222 | */ | ||
223 | outl(BUILD_CMD(in1), 0xCF8); | ||
224 | if (in2 == 1) /* Writing byte */ | ||
225 | outb(in3, 0xCFC + ((REG_OFFSET(in1) & 3))); | ||
226 | else if (in2 == 2) /* Writing word */ | ||
227 | outw(in3, 0xCFC + ((REG_OFFSET(in1) & 2))); | ||
228 | else /* Writing dword */ | ||
229 | outl(in3, 0xCFC); | ||
230 | status = PCIBIOS_SUCCESSFUL; | ||
231 | #endif /* CONFIG_PCI */ | ||
232 | } else if (index == SAL_UPDATE_PAL) { | ||
233 | ; | ||
234 | } else { | ||
235 | status = -1; | ||
236 | } | ||
237 | return ((struct sal_ret_values) {status, r9, r10, r11}); | ||
238 | } | ||
239 | |||
240 | |||
241 | /* | ||
242 | * This is here to work around a bug in egcs-1.1.1b that causes the | ||
243 | * compiler to crash (seems like a bug in the new alias analysis code. | ||
244 | */ | ||
245 | void * | ||
246 | id (long addr) | ||
247 | { | ||
248 | return (void *) addr; | ||
249 | } | ||
250 | |||
251 | struct ia64_boot_param * | ||
252 | sys_fw_init (const char *args, int arglen) | ||
253 | { | ||
254 | efi_system_table_t *efi_systab; | ||
255 | efi_runtime_services_t *efi_runtime; | ||
256 | efi_config_table_t *efi_tables; | ||
257 | struct ia64_sal_systab *sal_systab; | ||
258 | efi_memory_desc_t *efi_memmap, *md; | ||
259 | unsigned long *pal_desc, *sal_desc; | ||
260 | struct ia64_sal_desc_entry_point *sal_ed; | ||
261 | struct ia64_boot_param *bp; | ||
262 | unsigned char checksum = 0; | ||
263 | char *cp, *cmd_line; | ||
264 | int i = 0; | ||
265 | # define MAKE_MD(typ, attr, start, end) \ | ||
266 | do { \ | ||
267 | md = efi_memmap + i++; \ | ||
268 | md->type = typ; \ | ||
269 | md->pad = 0; \ | ||
270 | md->phys_addr = start; \ | ||
271 | md->virt_addr = 0; \ | ||
272 | md->num_pages = (end - start) >> 12; \ | ||
273 | md->attribute = attr; \ | ||
274 | } while (0) | ||
275 | |||
276 | memset(fw_mem, 0, sizeof(fw_mem)); | ||
277 | |||
278 | pal_desc = (unsigned long *) &pal_emulator_static; | ||
279 | sal_desc = (unsigned long *) &sal_emulator; | ||
280 | |||
281 | cp = fw_mem; | ||
282 | efi_systab = (void *) cp; cp += sizeof(*efi_systab); | ||
283 | efi_runtime = (void *) cp; cp += sizeof(*efi_runtime); | ||
284 | efi_tables = (void *) cp; cp += sizeof(*efi_tables); | ||
285 | sal_systab = (void *) cp; cp += sizeof(*sal_systab); | ||
286 | sal_ed = (void *) cp; cp += sizeof(*sal_ed); | ||
287 | efi_memmap = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap); | ||
288 | bp = (void *) cp; cp += sizeof(*bp); | ||
289 | cmd_line = (void *) cp; | ||
290 | |||
291 | if (args) { | ||
292 | if (arglen >= 1024) | ||
293 | arglen = 1023; | ||
294 | memcpy(cmd_line, args, arglen); | ||
295 | } else { | ||
296 | arglen = 0; | ||
297 | } | ||
298 | cmd_line[arglen] = '\0'; | ||
299 | |||
300 | memset(efi_systab, 0, sizeof(efi_systab)); | ||
301 | efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE; | ||
302 | efi_systab->hdr.revision = EFI_SYSTEM_TABLE_REVISION; | ||
303 | efi_systab->hdr.headersize = sizeof(efi_systab->hdr); | ||
304 | efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0"); | ||
305 | efi_systab->fw_revision = 1; | ||
306 | efi_systab->runtime = (void *) __pa(efi_runtime); | ||
307 | efi_systab->nr_tables = 1; | ||
308 | efi_systab->tables = __pa(efi_tables); | ||
309 | |||
310 | efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE; | ||
311 | efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION; | ||
312 | efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr); | ||
313 | efi_runtime->get_time = __pa(&fw_efi_get_time); | ||
314 | efi_runtime->set_time = __pa(&efi_unimplemented); | ||
315 | efi_runtime->get_wakeup_time = __pa(&efi_unimplemented); | ||
316 | efi_runtime->set_wakeup_time = __pa(&efi_unimplemented); | ||
317 | efi_runtime->set_virtual_address_map = __pa(&efi_unimplemented); | ||
318 | efi_runtime->get_variable = __pa(&efi_unimplemented); | ||
319 | efi_runtime->get_next_variable = __pa(&efi_unimplemented); | ||
320 | efi_runtime->set_variable = __pa(&efi_unimplemented); | ||
321 | efi_runtime->get_next_high_mono_count = __pa(&efi_unimplemented); | ||
322 | efi_runtime->reset_system = __pa(&efi_reset_system); | ||
323 | |||
324 | efi_tables->guid = SAL_SYSTEM_TABLE_GUID; | ||
325 | efi_tables->table = __pa(sal_systab); | ||
326 | |||
327 | /* fill in the SAL system table: */ | ||
328 | memcpy(sal_systab->signature, "SST_", 4); | ||
329 | sal_systab->size = sizeof(*sal_systab); | ||
330 | sal_systab->sal_rev_minor = 1; | ||
331 | sal_systab->sal_rev_major = 0; | ||
332 | sal_systab->entry_count = 1; | ||
333 | |||
334 | #ifdef CONFIG_IA64_GENERIC | ||
335 | strcpy(sal_systab->oem_id, "Generic"); | ||
336 | strcpy(sal_systab->product_id, "IA-64 system"); | ||
337 | #endif | ||
338 | |||
339 | #ifdef CONFIG_IA64_HP_SIM | ||
340 | strcpy(sal_systab->oem_id, "Hewlett-Packard"); | ||
341 | strcpy(sal_systab->product_id, "HP-simulator"); | ||
342 | #endif | ||
343 | |||
344 | #ifdef CONFIG_IA64_SDV | ||
345 | strcpy(sal_systab->oem_id, "Intel"); | ||
346 | strcpy(sal_systab->product_id, "SDV"); | ||
347 | #endif | ||
348 | |||
349 | /* fill in an entry point: */ | ||
350 | sal_ed->type = SAL_DESC_ENTRY_POINT; | ||
351 | sal_ed->pal_proc = __pa(pal_desc[0]); | ||
352 | sal_ed->sal_proc = __pa(sal_desc[0]); | ||
353 | sal_ed->gp = __pa(sal_desc[1]); | ||
354 | |||
355 | for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp) | ||
356 | checksum += *cp; | ||
357 | |||
358 | sal_systab->checksum = -checksum; | ||
359 | |||
360 | #if SIMPLE_MEMMAP | ||
361 | /* simulate free memory at physical address zero */ | ||
362 | MAKE_MD(EFI_BOOT_SERVICES_DATA, EFI_MEMORY_WB, 0*MB, 1*MB); | ||
363 | MAKE_MD(EFI_PAL_CODE, EFI_MEMORY_WB, 1*MB, 2*MB); | ||
364 | MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 2*MB, 130*MB); | ||
365 | MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 4096*MB, 4128*MB); | ||
366 | #else | ||
367 | MAKE_MD( 4, 0x9, 0x0000000000000000, 0x0000000000001000); | ||
368 | MAKE_MD( 7, 0x9, 0x0000000000001000, 0x000000000008a000); | ||
369 | MAKE_MD( 4, 0x9, 0x000000000008a000, 0x00000000000a0000); | ||
370 | MAKE_MD( 5, 0x8000000000000009, 0x00000000000c0000, 0x0000000000100000); | ||
371 | MAKE_MD( 7, 0x9, 0x0000000000100000, 0x0000000004400000); | ||
372 | MAKE_MD( 2, 0x9, 0x0000000004400000, 0x0000000004be5000); | ||
373 | MAKE_MD( 7, 0x9, 0x0000000004be5000, 0x000000007f77e000); | ||
374 | MAKE_MD( 6, 0x8000000000000009, 0x000000007f77e000, 0x000000007fb94000); | ||
375 | MAKE_MD( 6, 0x8000000000000009, 0x000000007fb94000, 0x000000007fb95000); | ||
376 | MAKE_MD( 6, 0x8000000000000009, 0x000000007fb95000, 0x000000007fc00000); | ||
377 | MAKE_MD(13, 0x8000000000000009, 0x000000007fc00000, 0x000000007fc3a000); | ||
378 | MAKE_MD( 7, 0x9, 0x000000007fc3a000, 0x000000007fea0000); | ||
379 | MAKE_MD( 5, 0x8000000000000009, 0x000000007fea0000, 0x000000007fea8000); | ||
380 | MAKE_MD( 7, 0x9, 0x000000007fea8000, 0x000000007feab000); | ||
381 | MAKE_MD( 5, 0x8000000000000009, 0x000000007feab000, 0x000000007ffff000); | ||
382 | MAKE_MD( 7, 0x9, 0x00000000ff400000, 0x0000000104000000); | ||
383 | #endif | ||
384 | |||
385 | bp->efi_systab = __pa(&fw_mem); | ||
386 | bp->efi_memmap = __pa(efi_memmap); | ||
387 | bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t); | ||
388 | bp->efi_memdesc_size = sizeof(efi_memory_desc_t); | ||
389 | bp->efi_memdesc_version = 1; | ||
390 | bp->command_line = __pa(cmd_line); | ||
391 | bp->console_info.num_cols = 80; | ||
392 | bp->console_info.num_rows = 25; | ||
393 | bp->console_info.orig_x = 0; | ||
394 | bp->console_info.orig_y = 24; | ||
395 | bp->fpswa = 0; | ||
396 | |||
397 | return bp; | ||
398 | } | ||
diff --git a/arch/ia64/hp/sim/boot/ssc.h b/arch/ia64/hp/sim/boot/ssc.h new file mode 100644 index 000000000000..3b94c03e43a9 --- /dev/null +++ b/arch/ia64/hp/sim/boot/ssc.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
3 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
4 | * Stephane Eranian <eranian@hpl.hp.com> | ||
5 | */ | ||
6 | #ifndef ssc_h | ||
7 | #define ssc_h | ||
8 | |||
9 | /* Simulator system calls: */ | ||
10 | |||
11 | #define SSC_CONSOLE_INIT 20 | ||
12 | #define SSC_GETCHAR 21 | ||
13 | #define SSC_PUTCHAR 31 | ||
14 | #define SSC_OPEN 50 | ||
15 | #define SSC_CLOSE 51 | ||
16 | #define SSC_READ 52 | ||
17 | #define SSC_WRITE 53 | ||
18 | #define SSC_GET_COMPLETION 54 | ||
19 | #define SSC_WAIT_COMPLETION 55 | ||
20 | #define SSC_CONNECT_INTERRUPT 58 | ||
21 | #define SSC_GENERATE_INTERRUPT 59 | ||
22 | #define SSC_SET_PERIODIC_INTERRUPT 60 | ||
23 | #define SSC_GET_RTC 65 | ||
24 | #define SSC_EXIT 66 | ||
25 | #define SSC_LOAD_SYMBOLS 69 | ||
26 | #define SSC_GET_TOD 74 | ||
27 | |||
28 | #define SSC_GET_ARGS 75 | ||
29 | |||
30 | /* | ||
31 | * Simulator system call. | ||
32 | */ | ||
33 | extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr); | ||
34 | |||
35 | #endif /* ssc_h */ | ||
diff --git a/arch/ia64/hp/sim/hpsim.S b/arch/ia64/hp/sim/hpsim.S new file mode 100644 index 000000000000..ff16e8a857d1 --- /dev/null +++ b/arch/ia64/hp/sim/hpsim.S | |||
@@ -0,0 +1,10 @@ | |||
1 | #include <asm/asmmacro.h> | ||
2 | |||
3 | /* | ||
4 | * Simulator system call. | ||
5 | */ | ||
6 | GLOBAL_ENTRY(ia64_ssc) | ||
7 | mov r15=r36 | ||
8 | break 0x80001 | ||
9 | br.ret.sptk.many rp | ||
10 | END(ia64_ssc) | ||
diff --git a/arch/ia64/hp/sim/hpsim_console.c b/arch/ia64/hp/sim/hpsim_console.c new file mode 100644 index 000000000000..5deff21e5877 --- /dev/null +++ b/arch/ia64/hp/sim/hpsim_console.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Platform dependent support for HP simulator. | ||
3 | * | ||
4 | * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com> | ||
7 | */ | ||
8 | #include <linux/config.h> | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/param.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/tty.h> | ||
16 | #include <linux/kdev_t.h> | ||
17 | #include <linux/console.h> | ||
18 | |||
19 | #include <asm/delay.h> | ||
20 | #include <asm/irq.h> | ||
21 | #include <asm/pal.h> | ||
22 | #include <asm/machvec.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/sal.h> | ||
25 | |||
26 | #include "hpsim_ssc.h" | ||
27 | |||
28 | static int simcons_init (struct console *, char *); | ||
29 | static void simcons_write (struct console *, const char *, unsigned); | ||
30 | static struct tty_driver *simcons_console_device (struct console *, int *); | ||
31 | |||
32 | struct console hpsim_cons = { | ||
33 | .name = "simcons", | ||
34 | .write = simcons_write, | ||
35 | .device = simcons_console_device, | ||
36 | .setup = simcons_init, | ||
37 | .flags = CON_PRINTBUFFER, | ||
38 | .index = -1, | ||
39 | }; | ||
40 | |||
41 | static int | ||
42 | simcons_init (struct console *cons, char *options) | ||
43 | { | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static void | ||
48 | simcons_write (struct console *cons, const char *buf, unsigned count) | ||
49 | { | ||
50 | unsigned long ch; | ||
51 | |||
52 | while (count-- > 0) { | ||
53 | ch = *buf++; | ||
54 | ia64_ssc(ch, 0, 0, 0, SSC_PUTCHAR); | ||
55 | if (ch == '\n') | ||
56 | ia64_ssc('\r', 0, 0, 0, SSC_PUTCHAR); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | static struct tty_driver *simcons_console_device (struct console *c, int *index) | ||
61 | { | ||
62 | extern struct tty_driver *hp_simserial_driver; | ||
63 | *index = c->index; | ||
64 | return hp_simserial_driver; | ||
65 | } | ||
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c new file mode 100644 index 000000000000..c0d25a2a3e9c --- /dev/null +++ b/arch/ia64/hp/sim/hpsim_irq.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Platform dependent support for HP simulator. | ||
3 | * | ||
4 | * Copyright (C) 1998-2001 Hewlett-Packard Co | ||
5 | * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/irq.h> | ||
12 | |||
13 | static unsigned int | ||
14 | hpsim_irq_startup (unsigned int irq) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | static void | ||
20 | hpsim_irq_noop (unsigned int irq) | ||
21 | { | ||
22 | } | ||
23 | |||
24 | static void | ||
25 | hpsim_set_affinity_noop (unsigned int a, cpumask_t b) | ||
26 | { | ||
27 | } | ||
28 | |||
29 | static struct hw_interrupt_type irq_type_hp_sim = { | ||
30 | .typename = "hpsim", | ||
31 | .startup = hpsim_irq_startup, | ||
32 | .shutdown = hpsim_irq_noop, | ||
33 | .enable = hpsim_irq_noop, | ||
34 | .disable = hpsim_irq_noop, | ||
35 | .ack = hpsim_irq_noop, | ||
36 | .end = hpsim_irq_noop, | ||
37 | .set_affinity = hpsim_set_affinity_noop, | ||
38 | }; | ||
39 | |||
40 | void __init | ||
41 | hpsim_irq_init (void) | ||
42 | { | ||
43 | irq_desc_t *idesc; | ||
44 | int i; | ||
45 | |||
46 | for (i = 0; i < NR_IRQS; ++i) { | ||
47 | idesc = irq_descp(i); | ||
48 | if (idesc->handler == &no_irq_type) | ||
49 | idesc->handler = &irq_type_hp_sim; | ||
50 | } | ||
51 | } | ||
diff --git a/arch/ia64/hp/sim/hpsim_machvec.c b/arch/ia64/hp/sim/hpsim_machvec.c new file mode 100644 index 000000000000..c21419359185 --- /dev/null +++ b/arch/ia64/hp/sim/hpsim_machvec.c | |||
@@ -0,0 +1,3 @@ | |||
1 | #define MACHVEC_PLATFORM_NAME hpsim | ||
2 | #define MACHVEC_PLATFORM_HEADER <asm/machvec_hpsim.h> | ||
3 | #include <asm/machvec_init.h> | ||
diff --git a/arch/ia64/hp/sim/hpsim_setup.c b/arch/ia64/hp/sim/hpsim_setup.c new file mode 100644 index 000000000000..694fc86bfbd5 --- /dev/null +++ b/arch/ia64/hp/sim/hpsim_setup.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Platform dependent support for HP simulator. | ||
3 | * | ||
4 | * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com> | ||
7 | */ | ||
8 | #include <linux/config.h> | ||
9 | #include <linux/console.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kdev_t.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/major.h> | ||
14 | #include <linux/param.h> | ||
15 | #include <linux/root_dev.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/types.h> | ||
18 | |||
19 | #include <asm/delay.h> | ||
20 | #include <asm/irq.h> | ||
21 | #include <asm/pal.h> | ||
22 | #include <asm/machvec.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/sal.h> | ||
25 | |||
26 | #include "hpsim_ssc.h" | ||
27 | |||
28 | void | ||
29 | ia64_ssc_connect_irq (long intr, long irq) | ||
30 | { | ||
31 | ia64_ssc(intr, irq, 0, 0, SSC_CONNECT_INTERRUPT); | ||
32 | } | ||
33 | |||
34 | void | ||
35 | ia64_ctl_trace (long on) | ||
36 | { | ||
37 | ia64_ssc(on, 0, 0, 0, SSC_CTL_TRACE); | ||
38 | } | ||
39 | |||
40 | void __init | ||
41 | hpsim_setup (char **cmdline_p) | ||
42 | { | ||
43 | ROOT_DEV = Root_SDA1; /* default to first SCSI drive */ | ||
44 | |||
45 | #ifdef CONFIG_HP_SIMSERIAL_CONSOLE | ||
46 | { | ||
47 | extern struct console hpsim_cons; | ||
48 | if (ia64_platform_is("hpsim")) | ||
49 | register_console(&hpsim_cons); | ||
50 | } | ||
51 | #endif | ||
52 | } | ||
diff --git a/arch/ia64/hp/sim/hpsim_ssc.h b/arch/ia64/hp/sim/hpsim_ssc.h new file mode 100644 index 000000000000..bfa3906274b3 --- /dev/null +++ b/arch/ia64/hp/sim/hpsim_ssc.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Platform dependent support for HP simulator. | ||
3 | * | ||
4 | * Copyright (C) 1998, 1999 Hewlett-Packard Co | ||
5 | * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com> | ||
7 | */ | ||
8 | #ifndef _IA64_PLATFORM_HPSIM_SSC_H | ||
9 | #define _IA64_PLATFORM_HPSIM_SSC_H | ||
10 | |||
11 | /* Simulator system calls: */ | ||
12 | |||
13 | #define SSC_CONSOLE_INIT 20 | ||
14 | #define SSC_GETCHAR 21 | ||
15 | #define SSC_PUTCHAR 31 | ||
16 | #define SSC_CONNECT_INTERRUPT 58 | ||
17 | #define SSC_GENERATE_INTERRUPT 59 | ||
18 | #define SSC_SET_PERIODIC_INTERRUPT 60 | ||
19 | #define SSC_GET_RTC 65 | ||
20 | #define SSC_EXIT 66 | ||
21 | #define SSC_LOAD_SYMBOLS 69 | ||
22 | #define SSC_GET_TOD 74 | ||
23 | #define SSC_CTL_TRACE 76 | ||
24 | |||
25 | #define SSC_NETDEV_PROBE 100 | ||
26 | #define SSC_NETDEV_SEND 101 | ||
27 | #define SSC_NETDEV_RECV 102 | ||
28 | #define SSC_NETDEV_ATTACH 103 | ||
29 | #define SSC_NETDEV_DETACH 104 | ||
30 | |||
31 | /* | ||
32 | * Simulator system call. | ||
33 | */ | ||
34 | extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr); | ||
35 | |||
36 | #endif /* _IA64_PLATFORM_HPSIM_SSC_H */ | ||
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c new file mode 100644 index 000000000000..ae84a1018a89 --- /dev/null +++ b/arch/ia64/hp/sim/simeth.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* | ||
2 | * Simulated Ethernet Driver | ||
3 | * | ||
4 | * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co | ||
5 | * Stephane Eranian <eranian@hpl.hp.com> | ||
6 | */ | ||
7 | #include <linux/config.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/in.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/netdevice.h> | ||
17 | #include <linux/etherdevice.h> | ||
18 | #include <linux/inetdevice.h> | ||
19 | #include <linux/if_ether.h> | ||
20 | #include <linux/if_arp.h> | ||
21 | #include <linux/skbuff.h> | ||
22 | #include <linux/notifier.h> | ||
23 | #include <linux/bitops.h> | ||
24 | #include <asm/system.h> | ||
25 | #include <asm/irq.h> | ||
26 | |||
27 | #define SIMETH_RECV_MAX 10 | ||
28 | |||
29 | /* | ||
30 | * Maximum possible received frame for Ethernet. | ||
31 | * We preallocate an sk_buff of that size to avoid costly | ||
32 | * memcpy for temporary buffer into sk_buff. We do basically | ||
33 | * what's done in other drivers, like eepro with a ring. | ||
34 | * The difference is, of course, that we don't have real DMA !!! | ||
35 | */ | ||
36 | #define SIMETH_FRAME_SIZE ETH_FRAME_LEN | ||
37 | |||
38 | |||
39 | #define SSC_NETDEV_PROBE 100 | ||
40 | #define SSC_NETDEV_SEND 101 | ||
41 | #define SSC_NETDEV_RECV 102 | ||
42 | #define SSC_NETDEV_ATTACH 103 | ||
43 | #define SSC_NETDEV_DETACH 104 | ||
44 | |||
45 | #define NETWORK_INTR 8 | ||
46 | |||
47 | struct simeth_local { | ||
48 | struct net_device_stats stats; | ||
49 | int simfd; /* descriptor in the simulator */ | ||
50 | }; | ||
51 | |||
52 | static int simeth_probe1(void); | ||
53 | static int simeth_open(struct net_device *dev); | ||
54 | static int simeth_close(struct net_device *dev); | ||
55 | static int simeth_tx(struct sk_buff *skb, struct net_device *dev); | ||
56 | static int simeth_rx(struct net_device *dev); | ||
57 | static struct net_device_stats *simeth_get_stats(struct net_device *dev); | ||
58 | static irqreturn_t simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs); | ||
59 | static void set_multicast_list(struct net_device *dev); | ||
60 | static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr); | ||
61 | |||
62 | static char *simeth_version="0.3"; | ||
63 | |||
64 | /* | ||
65 | * This variable is used to establish a mapping between the Linux/ia64 kernel | ||
66 | * and the host linux kernel. | ||
67 | * | ||
68 | * As of today, we support only one card, even though most of the code | ||
69 | * is ready for many more. The mapping is then: | ||
70 | * linux/ia64 -> linux/x86 | ||
71 | * eth0 -> eth1 | ||
72 | * | ||
73 | * In the future, we some string operations, we could easily support up | ||
74 | * to 10 cards (0-9). | ||
75 | * | ||
76 | * The default mapping can be changed on the kernel command line by | ||
77 | * specifying simeth=ethX (or whatever string you want). | ||
78 | */ | ||
79 | static char *simeth_device="eth0"; /* default host interface to use */ | ||
80 | |||
81 | |||
82 | |||
83 | static volatile unsigned int card_count; /* how many cards "found" so far */ | ||
84 | static int simeth_debug; /* set to 1 to get debug information */ | ||
85 | |||
86 | /* | ||
87 | * Used to catch IFF_UP & IFF_DOWN events | ||
88 | */ | ||
89 | static struct notifier_block simeth_dev_notifier = { | ||
90 | simeth_device_event, | ||
91 | 0 | ||
92 | }; | ||
93 | |||
94 | |||
95 | /* | ||
96 | * Function used when using a kernel command line option. | ||
97 | * | ||
98 | * Format: simeth=interface_name (like eth0) | ||
99 | */ | ||
100 | static int __init | ||
101 | simeth_setup(char *str) | ||
102 | { | ||
103 | simeth_device = str; | ||
104 | return 1; | ||
105 | } | ||
106 | |||
107 | __setup("simeth=", simeth_setup); | ||
108 | |||
109 | /* | ||
110 | * Function used to probe for simeth devices when not installed | ||
111 | * as a loadable module | ||
112 | */ | ||
113 | |||
114 | int __init | ||
115 | simeth_probe (void) | ||
116 | { | ||
117 | int r; | ||
118 | |||
119 | printk(KERN_INFO "simeth: v%s\n", simeth_version); | ||
120 | |||
121 | r = simeth_probe1(); | ||
122 | |||
123 | if (r == 0) register_netdevice_notifier(&simeth_dev_notifier); | ||
124 | |||
125 | return r; | ||
126 | } | ||
127 | |||
128 | extern long ia64_ssc (long, long, long, long, int); | ||
129 | extern void ia64_ssc_connect_irq (long intr, long irq); | ||
130 | |||
131 | static inline int | ||
132 | netdev_probe(char *name, unsigned char *ether) | ||
133 | { | ||
134 | return ia64_ssc(__pa(name), __pa(ether), 0,0, SSC_NETDEV_PROBE); | ||
135 | } | ||
136 | |||
137 | |||
138 | static inline int | ||
139 | netdev_connect(int irq) | ||
140 | { | ||
141 | /* XXX Fix me | ||
142 | * this does not support multiple cards | ||
143 | * also no return value | ||
144 | */ | ||
145 | ia64_ssc_connect_irq(NETWORK_INTR, irq); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static inline int | ||
150 | netdev_attach(int fd, int irq, unsigned int ipaddr) | ||
151 | { | ||
152 | /* this puts the host interface in the right mode (start interrupting) */ | ||
153 | return ia64_ssc(fd, ipaddr, 0,0, SSC_NETDEV_ATTACH); | ||
154 | } | ||
155 | |||
156 | |||
157 | static inline int | ||
158 | netdev_detach(int fd) | ||
159 | { | ||
160 | /* | ||
161 | * inactivate the host interface (don't interrupt anymore) */ | ||
162 | return ia64_ssc(fd, 0,0,0, SSC_NETDEV_DETACH); | ||
163 | } | ||
164 | |||
165 | static inline int | ||
166 | netdev_send(int fd, unsigned char *buf, unsigned int len) | ||
167 | { | ||
168 | return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_SEND); | ||
169 | } | ||
170 | |||
171 | static inline int | ||
172 | netdev_read(int fd, unsigned char *buf, unsigned int len) | ||
173 | { | ||
174 | return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_RECV); | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Function shared with module code, so cannot be in init section | ||
179 | * | ||
180 | * So far this function "detects" only one card (test_&_set) but could | ||
181 | * be extended easily. | ||
182 | * | ||
183 | * Return: | ||
184 | * - -ENODEV is no device found | ||
185 | * - -ENOMEM is no more memory | ||
186 | * - 0 otherwise | ||
187 | */ | ||
188 | static int | ||
189 | simeth_probe1(void) | ||
190 | { | ||
191 | unsigned char mac_addr[ETH_ALEN]; | ||
192 | struct simeth_local *local; | ||
193 | struct net_device *dev; | ||
194 | int fd, i, err; | ||
195 | |||
196 | /* | ||
197 | * XXX Fix me | ||
198 | * let's support just one card for now | ||
199 | */ | ||
200 | if (test_and_set_bit(0, &card_count)) | ||
201 | return -ENODEV; | ||
202 | |||
203 | /* | ||
204 | * check with the simulator for the device | ||
205 | */ | ||
206 | fd = netdev_probe(simeth_device, mac_addr); | ||
207 | if (fd == -1) | ||
208 | return -ENODEV; | ||
209 | |||
210 | dev = alloc_etherdev(sizeof(struct simeth_local)); | ||
211 | if (!dev) | ||
212 | return -ENOMEM; | ||
213 | |||
214 | memcpy(dev->dev_addr, mac_addr, sizeof(mac_addr)); | ||
215 | |||
216 | local = dev->priv; | ||
217 | local->simfd = fd; /* keep track of underlying file descriptor */ | ||
218 | |||
219 | dev->open = simeth_open; | ||
220 | dev->stop = simeth_close; | ||
221 | dev->hard_start_xmit = simeth_tx; | ||
222 | dev->get_stats = simeth_get_stats; | ||
223 | dev->set_multicast_list = set_multicast_list; /* no yet used */ | ||
224 | |||
225 | err = register_netdev(dev); | ||
226 | if (err) { | ||
227 | free_netdev(dev); | ||
228 | return err; | ||
229 | } | ||
230 | |||
231 | dev->irq = assign_irq_vector(AUTO_ASSIGN); | ||
232 | |||
233 | /* | ||
234 | * attach the interrupt in the simulator, this does enable interrupts | ||
235 | * until a netdev_attach() is called | ||
236 | */ | ||
237 | netdev_connect(dev->irq); | ||
238 | |||
239 | printk(KERN_INFO "%s: hosteth=%s simfd=%d, HwAddr", | ||
240 | dev->name, simeth_device, local->simfd); | ||
241 | for(i = 0; i < ETH_ALEN; i++) { | ||
242 | printk(" %2.2x", dev->dev_addr[i]); | ||
243 | } | ||
244 | printk(", IRQ %d\n", dev->irq); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * actually binds the device to an interrupt vector | ||
251 | */ | ||
252 | static int | ||
253 | simeth_open(struct net_device *dev) | ||
254 | { | ||
255 | if (request_irq(dev->irq, simeth_interrupt, 0, "simeth", dev)) { | ||
256 | printk(KERN_WARNING "simeth: unable to get IRQ %d.\n", dev->irq); | ||
257 | return -EAGAIN; | ||
258 | } | ||
259 | |||
260 | netif_start_queue(dev); | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | /* copied from lapbether.c */ | ||
266 | static __inline__ int dev_is_ethdev(struct net_device *dev) | ||
267 | { | ||
268 | return ( dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5)); | ||
269 | } | ||
270 | |||
271 | |||
272 | /* | ||
273 | * Handler for IFF_UP or IFF_DOWN | ||
274 | * | ||
275 | * The reason for that is that we don't want to be interrupted when the | ||
276 | * interface is down. There is no way to unconnect in the simualtor. Instead | ||
277 | * we use this function to shutdown packet processing in the frame filter | ||
278 | * in the simulator. Thus no interrupts are generated | ||
279 | * | ||
280 | * | ||
281 | * That's also the place where we pass the IP address of this device to the | ||
282 | * simulator so that that we can start filtering packets for it | ||
283 | * | ||
284 | * There may be a better way of doing this, but I don't know which yet. | ||
285 | */ | ||
286 | static int | ||
287 | simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) | ||
288 | { | ||
289 | struct net_device *dev = ptr; | ||
290 | struct simeth_local *local; | ||
291 | struct in_device *in_dev; | ||
292 | struct in_ifaddr **ifap = NULL; | ||
293 | struct in_ifaddr *ifa = NULL; | ||
294 | int r; | ||
295 | |||
296 | |||
297 | if ( ! dev ) { | ||
298 | printk(KERN_WARNING "simeth_device_event dev=0\n"); | ||
299 | return NOTIFY_DONE; | ||
300 | } | ||
301 | |||
302 | if ( event != NETDEV_UP && event != NETDEV_DOWN ) return NOTIFY_DONE; | ||
303 | |||
304 | /* | ||
305 | * Check whether or not it's for an ethernet device | ||
306 | * | ||
307 | * XXX Fixme: This works only as long as we support one | ||
308 | * type of ethernet device. | ||
309 | */ | ||
310 | if ( !dev_is_ethdev(dev) ) return NOTIFY_DONE; | ||
311 | |||
312 | if ((in_dev=dev->ip_ptr) != NULL) { | ||
313 | for (ifap=&in_dev->ifa_list; (ifa=*ifap) != NULL; ifap=&ifa->ifa_next) | ||
314 | if (strcmp(dev->name, ifa->ifa_label) == 0) break; | ||
315 | } | ||
316 | if ( ifa == NULL ) { | ||
317 | printk(KERN_ERR "simeth_open: can't find device %s's ifa\n", dev->name); | ||
318 | return NOTIFY_DONE; | ||
319 | } | ||
320 | |||
321 | printk(KERN_INFO "simeth_device_event: %s ipaddr=0x%x\n", | ||
322 | dev->name, htonl(ifa->ifa_local)); | ||
323 | |||
324 | /* | ||
325 | * XXX Fix me | ||
326 | * if the device was up, and we're simply reconfiguring it, not sure | ||
327 | * we get DOWN then UP. | ||
328 | */ | ||
329 | |||
330 | local = dev->priv; | ||
331 | /* now do it for real */ | ||
332 | r = event == NETDEV_UP ? | ||
333 | netdev_attach(local->simfd, dev->irq, htonl(ifa->ifa_local)): | ||
334 | netdev_detach(local->simfd); | ||
335 | |||
336 | printk(KERN_INFO "simeth: netdev_attach/detach: event=%s ->%d\n", | ||
337 | event == NETDEV_UP ? "attach":"detach", r); | ||
338 | |||
339 | return NOTIFY_DONE; | ||
340 | } | ||
341 | |||
342 | static int | ||
343 | simeth_close(struct net_device *dev) | ||
344 | { | ||
345 | netif_stop_queue(dev); | ||
346 | |||
347 | free_irq(dev->irq, dev); | ||
348 | |||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Only used for debug | ||
354 | */ | ||
355 | static void | ||
356 | frame_print(unsigned char *from, unsigned char *frame, int len) | ||
357 | { | ||
358 | int i; | ||
359 | |||
360 | printk("%s: (%d) %02x", from, len, frame[0] & 0xff); | ||
361 | for(i=1; i < 6; i++ ) { | ||
362 | printk(":%02x", frame[i] &0xff); | ||
363 | } | ||
364 | printk(" %2x", frame[6] &0xff); | ||
365 | for(i=7; i < 12; i++ ) { | ||
366 | printk(":%02x", frame[i] &0xff); | ||
367 | } | ||
368 | printk(" [%02x%02x]\n", frame[12], frame[13]); | ||
369 | |||
370 | for(i=14; i < len; i++ ) { | ||
371 | printk("%02x ", frame[i] &0xff); | ||
372 | if ( (i%10)==0) printk("\n"); | ||
373 | } | ||
374 | printk("\n"); | ||
375 | } | ||
376 | |||
377 | |||
378 | /* | ||
379 | * Function used to transmit of frame, very last one on the path before | ||
380 | * going to the simulator. | ||
381 | */ | ||
382 | static int | ||
383 | simeth_tx(struct sk_buff *skb, struct net_device *dev) | ||
384 | { | ||
385 | struct simeth_local *local = dev->priv; | ||
386 | |||
387 | #if 0 | ||
388 | /* ensure we have at least ETH_ZLEN bytes (min frame size) */ | ||
389 | unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; | ||
390 | /* Where do the extra padding bytes comes from inthe skbuff ? */ | ||
391 | #else | ||
392 | /* the real driver in the host system is going to take care of that | ||
393 | * or maybe it's the NIC itself. | ||
394 | */ | ||
395 | unsigned int length = skb->len; | ||
396 | #endif | ||
397 | |||
398 | local->stats.tx_bytes += skb->len; | ||
399 | local->stats.tx_packets++; | ||
400 | |||
401 | |||
402 | if (simeth_debug > 5) frame_print("simeth_tx", skb->data, length); | ||
403 | |||
404 | netdev_send(local->simfd, skb->data, length); | ||
405 | |||
406 | /* | ||
407 | * we are synchronous on write, so we don't simulate a | ||
408 | * trasnmit complete interrupt, thus we don't need to arm a tx | ||
409 | */ | ||
410 | |||
411 | dev_kfree_skb(skb); | ||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | static inline struct sk_buff * | ||
416 | make_new_skb(struct net_device *dev) | ||
417 | { | ||
418 | struct sk_buff *nskb; | ||
419 | |||
420 | /* | ||
421 | * The +2 is used to make sure that the IP header is nicely | ||
422 | * aligned (on 4byte boundary I assume 14+2=16) | ||
423 | */ | ||
424 | nskb = dev_alloc_skb(SIMETH_FRAME_SIZE + 2); | ||
425 | if ( nskb == NULL ) { | ||
426 | printk(KERN_NOTICE "%s: memory squeeze. dropping packet.\n", dev->name); | ||
427 | return NULL; | ||
428 | } | ||
429 | nskb->dev = dev; | ||
430 | |||
431 | skb_reserve(nskb, 2); /* Align IP on 16 byte boundaries */ | ||
432 | |||
433 | skb_put(nskb,SIMETH_FRAME_SIZE); | ||
434 | |||
435 | return nskb; | ||
436 | } | ||
437 | |||
438 | /* | ||
439 | * called from interrupt handler to process a received frame | ||
440 | */ | ||
441 | static int | ||
442 | simeth_rx(struct net_device *dev) | ||
443 | { | ||
444 | struct simeth_local *local; | ||
445 | struct sk_buff *skb; | ||
446 | int len; | ||
447 | int rcv_count = SIMETH_RECV_MAX; | ||
448 | |||
449 | local = dev->priv; | ||
450 | /* | ||
451 | * the loop concept has been borrowed from other drivers | ||
452 | * looks to me like it's a throttling thing to avoid pushing to many | ||
453 | * packets at one time into the stack. Making sure we can process them | ||
454 | * upstream and make forward progress overall | ||
455 | */ | ||
456 | do { | ||
457 | if ( (skb=make_new_skb(dev)) == NULL ) { | ||
458 | printk(KERN_NOTICE "%s: memory squeeze. dropping packet.\n", dev->name); | ||
459 | local->stats.rx_dropped++; | ||
460 | return 0; | ||
461 | } | ||
462 | /* | ||
463 | * Read only one frame at a time | ||
464 | */ | ||
465 | len = netdev_read(local->simfd, skb->data, SIMETH_FRAME_SIZE); | ||
466 | if ( len == 0 ) { | ||
467 | if ( simeth_debug > 0 ) printk(KERN_WARNING "%s: count=%d netdev_read=0\n", | ||
468 | dev->name, SIMETH_RECV_MAX-rcv_count); | ||
469 | break; | ||
470 | } | ||
471 | #if 0 | ||
472 | /* | ||
473 | * XXX Fix me | ||
474 | * Should really do a csum+copy here | ||
475 | */ | ||
476 | memcpy(skb->data, frame, len); | ||
477 | #endif | ||
478 | skb->protocol = eth_type_trans(skb, dev); | ||
479 | |||
480 | if ( simeth_debug > 6 ) frame_print("simeth_rx", skb->data, len); | ||
481 | |||
482 | /* | ||
483 | * push the packet up & trigger software interrupt | ||
484 | */ | ||
485 | netif_rx(skb); | ||
486 | |||
487 | local->stats.rx_packets++; | ||
488 | local->stats.rx_bytes += len; | ||
489 | |||
490 | } while ( --rcv_count ); | ||
491 | |||
492 | return len; /* 0 = nothing left to read, otherwise, we can try again */ | ||
493 | } | ||
494 | |||
495 | /* | ||
496 | * Interrupt handler (Yes, we can do it too !!!) | ||
497 | */ | ||
498 | static irqreturn_t | ||
499 | simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs) | ||
500 | { | ||
501 | struct net_device *dev = dev_id; | ||
502 | |||
503 | if ( dev == NULL ) { | ||
504 | printk(KERN_WARNING "simeth: irq %d for unknown device\n", irq); | ||
505 | return IRQ_NONE; | ||
506 | } | ||
507 | |||
508 | /* | ||
509 | * very simple loop because we get interrupts only when receiving | ||
510 | */ | ||
511 | while (simeth_rx(dev)); | ||
512 | return IRQ_HANDLED; | ||
513 | } | ||
514 | |||
515 | static struct net_device_stats * | ||
516 | simeth_get_stats(struct net_device *dev) | ||
517 | { | ||
518 | struct simeth_local *local = dev->priv; | ||
519 | |||
520 | return &local->stats; | ||
521 | } | ||
522 | |||
523 | /* fake multicast ability */ | ||
524 | static void | ||
525 | set_multicast_list(struct net_device *dev) | ||
526 | { | ||
527 | printk(KERN_WARNING "%s: set_multicast_list called\n", dev->name); | ||
528 | } | ||
529 | |||
530 | __initcall(simeth_probe); | ||
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c new file mode 100644 index 000000000000..56405dbfd739 --- /dev/null +++ b/arch/ia64/hp/sim/simscsi.c | |||
@@ -0,0 +1,404 @@ | |||
1 | /* | ||
2 | * Simulated SCSI driver. | ||
3 | * | ||
4 | * Copyright (C) 1999, 2001-2003 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * Stephane Eranian <eranian@hpl.hp.com> | ||
7 | * | ||
8 | * 02/01/15 David Mosberger Updated for v2.5.1 | ||
9 | * 99/12/18 David Mosberger Added support for READ10/WRITE10 needed by linux v2.3.33 | ||
10 | */ | ||
11 | #include <linux/blkdev.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/timer.h> | ||
16 | #include <asm/irq.h> | ||
17 | |||
18 | #include <scsi/scsi.h> | ||
19 | #include <scsi/scsi_cmnd.h> | ||
20 | #include <scsi/scsi_device.h> | ||
21 | #include <scsi/scsi_host.h> | ||
22 | |||
23 | #define DEBUG_SIMSCSI 0 | ||
24 | |||
25 | #define SIMSCSI_REQ_QUEUE_LEN 64 | ||
26 | #define DEFAULT_SIMSCSI_ROOT "/var/ski-disks/sd" | ||
27 | |||
28 | /* Simulator system calls: */ | ||
29 | |||
30 | #define SSC_OPEN 50 | ||
31 | #define SSC_CLOSE 51 | ||
32 | #define SSC_READ 52 | ||
33 | #define SSC_WRITE 53 | ||
34 | #define SSC_GET_COMPLETION 54 | ||
35 | #define SSC_WAIT_COMPLETION 55 | ||
36 | |||
37 | #define SSC_WRITE_ACCESS 2 | ||
38 | #define SSC_READ_ACCESS 1 | ||
39 | |||
40 | #if DEBUG_SIMSCSI | ||
41 | int simscsi_debug; | ||
42 | # define DBG simscsi_debug | ||
43 | #else | ||
44 | # define DBG 0 | ||
45 | #endif | ||
46 | |||
47 | static struct Scsi_Host *host; | ||
48 | |||
49 | static void simscsi_interrupt (unsigned long val); | ||
50 | static DECLARE_TASKLET(simscsi_tasklet, simscsi_interrupt, 0); | ||
51 | |||
52 | struct disk_req { | ||
53 | unsigned long addr; | ||
54 | unsigned len; | ||
55 | }; | ||
56 | |||
57 | struct disk_stat { | ||
58 | int fd; | ||
59 | unsigned count; | ||
60 | }; | ||
61 | |||
62 | extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr); | ||
63 | |||
64 | static int desc[16] = { | ||
65 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 | ||
66 | }; | ||
67 | |||
68 | static struct queue_entry { | ||
69 | struct scsi_cmnd *sc; | ||
70 | } queue[SIMSCSI_REQ_QUEUE_LEN]; | ||
71 | |||
72 | static int rd, wr; | ||
73 | static atomic_t num_reqs = ATOMIC_INIT(0); | ||
74 | |||
75 | /* base name for default disks */ | ||
76 | static char *simscsi_root = DEFAULT_SIMSCSI_ROOT; | ||
77 | |||
78 | #define MAX_ROOT_LEN 128 | ||
79 | |||
80 | /* | ||
81 | * used to setup a new base for disk images | ||
82 | * to use /foo/bar/disk[a-z] as disk images | ||
83 | * you have to specify simscsi=/foo/bar/disk on the command line | ||
84 | */ | ||
85 | static int __init | ||
86 | simscsi_setup (char *s) | ||
87 | { | ||
88 | /* XXX Fix me we may need to strcpy() ? */ | ||
89 | if (strlen(s) > MAX_ROOT_LEN) { | ||
90 | printk(KERN_ERR "simscsi_setup: prefix too long---using default %s\n", | ||
91 | simscsi_root); | ||
92 | } | ||
93 | simscsi_root = s; | ||
94 | return 1; | ||
95 | } | ||
96 | |||
97 | __setup("simscsi=", simscsi_setup); | ||
98 | |||
99 | static void | ||
100 | simscsi_interrupt (unsigned long val) | ||
101 | { | ||
102 | struct scsi_cmnd *sc; | ||
103 | |||
104 | while ((sc = queue[rd].sc) != 0) { | ||
105 | atomic_dec(&num_reqs); | ||
106 | queue[rd].sc = 0; | ||
107 | if (DBG) | ||
108 | printk("simscsi_interrupt: done with %ld\n", sc->serial_number); | ||
109 | (*sc->scsi_done)(sc); | ||
110 | rd = (rd + 1) % SIMSCSI_REQ_QUEUE_LEN; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | static int | ||
115 | simscsi_biosparam (struct scsi_device *sdev, struct block_device *n, | ||
116 | sector_t capacity, int ip[]) | ||
117 | { | ||
118 | ip[0] = 64; /* heads */ | ||
119 | ip[1] = 32; /* sectors */ | ||
120 | ip[2] = capacity >> 11; /* cylinders */ | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static void | ||
125 | simscsi_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset, unsigned long len) | ||
126 | { | ||
127 | struct disk_stat stat; | ||
128 | struct disk_req req; | ||
129 | |||
130 | req.addr = __pa(sc->request_buffer); | ||
131 | req.len = len; /* # of bytes to transfer */ | ||
132 | |||
133 | if (sc->request_bufflen < req.len) | ||
134 | return; | ||
135 | |||
136 | stat.fd = desc[sc->device->id]; | ||
137 | if (DBG) | ||
138 | printk("simscsi_%s @ %lx (off %lx)\n", | ||
139 | mode == SSC_READ ? "read":"write", req.addr, offset); | ||
140 | ia64_ssc(stat.fd, 1, __pa(&req), offset, mode); | ||
141 | ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION); | ||
142 | |||
143 | if (stat.count == req.len) { | ||
144 | sc->result = GOOD; | ||
145 | } else { | ||
146 | sc->result = DID_ERROR << 16; | ||
147 | } | ||
148 | } | ||
149 | |||
150 | static void | ||
151 | simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) | ||
152 | { | ||
153 | int list_len = sc->use_sg; | ||
154 | struct scatterlist *sl = (struct scatterlist *)sc->buffer; | ||
155 | struct disk_stat stat; | ||
156 | struct disk_req req; | ||
157 | |||
158 | stat.fd = desc[sc->device->id]; | ||
159 | |||
160 | while (list_len) { | ||
161 | req.addr = __pa(page_address(sl->page) + sl->offset); | ||
162 | req.len = sl->length; | ||
163 | if (DBG) | ||
164 | printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n", | ||
165 | mode == SSC_READ ? "read":"write", req.addr, offset, | ||
166 | list_len, sl->length); | ||
167 | ia64_ssc(stat.fd, 1, __pa(&req), offset, mode); | ||
168 | ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION); | ||
169 | |||
170 | /* should not happen in our case */ | ||
171 | if (stat.count != req.len) { | ||
172 | sc->result = DID_ERROR << 16; | ||
173 | return; | ||
174 | } | ||
175 | offset += sl->length; | ||
176 | sl++; | ||
177 | list_len--; | ||
178 | } | ||
179 | sc->result = GOOD; | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * function handling both READ_6/WRITE_6 (non-scatter/gather mode) | ||
184 | * commands. | ||
185 | * Added 02/26/99 S.Eranian | ||
186 | */ | ||
187 | static void | ||
188 | simscsi_readwrite6 (struct scsi_cmnd *sc, int mode) | ||
189 | { | ||
190 | unsigned long offset; | ||
191 | |||
192 | offset = (((sc->cmnd[1] & 0x1f) << 16) | (sc->cmnd[2] << 8) | sc->cmnd[3])*512; | ||
193 | if (sc->use_sg > 0) | ||
194 | simscsi_sg_readwrite(sc, mode, offset); | ||
195 | else | ||
196 | simscsi_readwrite(sc, mode, offset, sc->cmnd[4]*512); | ||
197 | } | ||
198 | |||
199 | static size_t | ||
200 | simscsi_get_disk_size (int fd) | ||
201 | { | ||
202 | struct disk_stat stat; | ||
203 | size_t bit, sectors = 0; | ||
204 | struct disk_req req; | ||
205 | char buf[512]; | ||
206 | |||
207 | /* | ||
208 | * This is a bit kludgey: the simulator doesn't provide a direct way of determining | ||
209 | * the disk size, so we do a binary search, assuming a maximum disk size of 4GB. | ||
210 | */ | ||
211 | for (bit = (4UL << 30)/512; bit != 0; bit >>= 1) { | ||
212 | req.addr = __pa(&buf); | ||
213 | req.len = sizeof(buf); | ||
214 | ia64_ssc(fd, 1, __pa(&req), ((sectors | bit) - 1)*512, SSC_READ); | ||
215 | stat.fd = fd; | ||
216 | ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION); | ||
217 | if (stat.count == sizeof(buf)) | ||
218 | sectors |= bit; | ||
219 | } | ||
220 | return sectors - 1; /* return last valid sector number */ | ||
221 | } | ||
222 | |||
223 | static void | ||
224 | simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) | ||
225 | { | ||
226 | unsigned long offset; | ||
227 | |||
228 | offset = ( (sc->cmnd[2] << 24) | (sc->cmnd[3] << 16) | ||
229 | | (sc->cmnd[4] << 8) | (sc->cmnd[5] << 0))*512; | ||
230 | if (sc->use_sg > 0) | ||
231 | simscsi_sg_readwrite(sc, mode, offset); | ||
232 | else | ||
233 | simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); | ||
234 | } | ||
235 | |||
236 | static int | ||
237 | simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | ||
238 | { | ||
239 | unsigned int target_id = sc->device->id; | ||
240 | char fname[MAX_ROOT_LEN+16]; | ||
241 | size_t disk_size; | ||
242 | char *buf; | ||
243 | #if DEBUG_SIMSCSI | ||
244 | register long sp asm ("sp"); | ||
245 | |||
246 | if (DBG) | ||
247 | printk("simscsi_queuecommand: target=%d,cmnd=%u,sc=%lu,sp=%lx,done=%p\n", | ||
248 | target_id, sc->cmnd[0], sc->serial_number, sp, done); | ||
249 | #endif | ||
250 | |||
251 | sc->result = DID_BAD_TARGET << 16; | ||
252 | sc->scsi_done = done; | ||
253 | if (target_id <= 15 && sc->device->lun == 0) { | ||
254 | switch (sc->cmnd[0]) { | ||
255 | case INQUIRY: | ||
256 | if (sc->request_bufflen < 35) { | ||
257 | break; | ||
258 | } | ||
259 | sprintf (fname, "%s%c", simscsi_root, 'a' + target_id); | ||
260 | desc[target_id] = ia64_ssc(__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS, | ||
261 | 0, 0, SSC_OPEN); | ||
262 | if (desc[target_id] < 0) { | ||
263 | /* disk doesn't exist... */ | ||
264 | break; | ||
265 | } | ||
266 | buf = sc->request_buffer; | ||
267 | buf[0] = 0; /* magnetic disk */ | ||
268 | buf[1] = 0; /* not a removable medium */ | ||
269 | buf[2] = 2; /* SCSI-2 compliant device */ | ||
270 | buf[3] = 2; /* SCSI-2 response data format */ | ||
271 | buf[4] = 31; /* additional length (bytes) */ | ||
272 | buf[5] = 0; /* reserved */ | ||
273 | buf[6] = 0; /* reserved */ | ||
274 | buf[7] = 0; /* various flags */ | ||
275 | memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); | ||
276 | sc->result = GOOD; | ||
277 | break; | ||
278 | |||
279 | case TEST_UNIT_READY: | ||
280 | sc->result = GOOD; | ||
281 | break; | ||
282 | |||
283 | case READ_6: | ||
284 | if (desc[target_id] < 0 ) | ||
285 | break; | ||
286 | simscsi_readwrite6(sc, SSC_READ); | ||
287 | break; | ||
288 | |||
289 | case READ_10: | ||
290 | if (desc[target_id] < 0 ) | ||
291 | break; | ||
292 | simscsi_readwrite10(sc, SSC_READ); | ||
293 | break; | ||
294 | |||
295 | case WRITE_6: | ||
296 | if (desc[target_id] < 0) | ||
297 | break; | ||
298 | simscsi_readwrite6(sc, SSC_WRITE); | ||
299 | break; | ||
300 | |||
301 | case WRITE_10: | ||
302 | if (desc[target_id] < 0) | ||
303 | break; | ||
304 | simscsi_readwrite10(sc, SSC_WRITE); | ||
305 | break; | ||
306 | |||
307 | |||
308 | case READ_CAPACITY: | ||
309 | if (desc[target_id] < 0 || sc->request_bufflen < 8) { | ||
310 | break; | ||
311 | } | ||
312 | buf = sc->request_buffer; | ||
313 | |||
314 | disk_size = simscsi_get_disk_size(desc[target_id]); | ||
315 | |||
316 | /* pretend to be a 1GB disk (partition table contains real stuff): */ | ||
317 | buf[0] = (disk_size >> 24) & 0xff; | ||
318 | buf[1] = (disk_size >> 16) & 0xff; | ||
319 | buf[2] = (disk_size >> 8) & 0xff; | ||
320 | buf[3] = (disk_size >> 0) & 0xff; | ||
321 | /* set block size of 512 bytes: */ | ||
322 | buf[4] = 0; | ||
323 | buf[5] = 0; | ||
324 | buf[6] = 2; | ||
325 | buf[7] = 0; | ||
326 | sc->result = GOOD; | ||
327 | break; | ||
328 | |||
329 | case MODE_SENSE: | ||
330 | case MODE_SENSE_10: | ||
331 | /* sd.c uses this to determine whether disk does write-caching. */ | ||
332 | memset(sc->request_buffer, 0, 128); | ||
333 | sc->result = GOOD; | ||
334 | break; | ||
335 | |||
336 | case START_STOP: | ||
337 | printk(KERN_ERR "START_STOP\n"); | ||
338 | break; | ||
339 | |||
340 | default: | ||
341 | panic("simscsi: unknown SCSI command %u\n", sc->cmnd[0]); | ||
342 | } | ||
343 | } | ||
344 | if (sc->result == DID_BAD_TARGET) { | ||
345 | sc->result |= DRIVER_SENSE << 24; | ||
346 | sc->sense_buffer[0] = 0x70; | ||
347 | sc->sense_buffer[2] = 0x00; | ||
348 | } | ||
349 | if (atomic_read(&num_reqs) >= SIMSCSI_REQ_QUEUE_LEN) { | ||
350 | panic("Attempt to queue command while command is pending!!"); | ||
351 | } | ||
352 | atomic_inc(&num_reqs); | ||
353 | queue[wr].sc = sc; | ||
354 | wr = (wr + 1) % SIMSCSI_REQ_QUEUE_LEN; | ||
355 | |||
356 | tasklet_schedule(&simscsi_tasklet); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static int | ||
361 | simscsi_host_reset (struct scsi_cmnd *sc) | ||
362 | { | ||
363 | printk(KERN_ERR "simscsi_host_reset: not implemented\n"); | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static struct scsi_host_template driver_template = { | ||
368 | .name = "simulated SCSI host adapter", | ||
369 | .proc_name = "simscsi", | ||
370 | .queuecommand = simscsi_queuecommand, | ||
371 | .eh_host_reset_handler = simscsi_host_reset, | ||
372 | .bios_param = simscsi_biosparam, | ||
373 | .can_queue = SIMSCSI_REQ_QUEUE_LEN, | ||
374 | .this_id = -1, | ||
375 | .sg_tablesize = SG_ALL, | ||
376 | .max_sectors = 1024, | ||
377 | .cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN, | ||
378 | .use_clustering = DISABLE_CLUSTERING, | ||
379 | }; | ||
380 | |||
381 | static int __init | ||
382 | simscsi_init(void) | ||
383 | { | ||
384 | int error; | ||
385 | |||
386 | host = scsi_host_alloc(&driver_template, 0); | ||
387 | if (!host) | ||
388 | return -ENOMEM; | ||
389 | |||
390 | error = scsi_add_host(host, NULL); | ||
391 | if (!error) | ||
392 | scsi_scan_host(host); | ||
393 | return error; | ||
394 | } | ||
395 | |||
396 | static void __exit | ||
397 | simscsi_exit(void) | ||
398 | { | ||
399 | scsi_remove_host(host); | ||
400 | scsi_host_put(host); | ||
401 | } | ||
402 | |||
403 | module_init(simscsi_init); | ||
404 | module_exit(simscsi_exit); | ||
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c new file mode 100644 index 000000000000..786e70718ce4 --- /dev/null +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -0,0 +1,1032 @@ | |||
1 | /* | ||
2 | * Simulated Serial Driver (fake serial) | ||
3 | * | ||
4 | * This driver is mostly used for bringup purposes and will go away. | ||
5 | * It has a strong dependency on the system console. All outputs | ||
6 | * are rerouted to the same facility as the one used by printk which, in our | ||
7 | * case means sys_sim.c console (goes via the simulator). The code hereafter | ||
8 | * is completely leveraged from the serial.c driver. | ||
9 | * | ||
10 | * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co | ||
11 | * Stephane Eranian <eranian@hpl.hp.com> | ||
12 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
13 | * | ||
14 | * 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close(). | ||
15 | * 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c. | ||
16 | * 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/tty.h> | ||
24 | #include <linux/tty_flip.h> | ||
25 | #include <linux/major.h> | ||
26 | #include <linux/fcntl.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/console.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/serial.h> | ||
32 | #include <linux/serialP.h> | ||
33 | |||
34 | #include <asm/irq.h> | ||
35 | #include <asm/hw_irq.h> | ||
36 | #include <asm/uaccess.h> | ||
37 | |||
38 | #ifdef CONFIG_KDB | ||
39 | # include <linux/kdb.h> | ||
40 | #endif | ||
41 | |||
42 | #undef SIMSERIAL_DEBUG /* define this to get some debug information */ | ||
43 | |||
44 | #define KEYBOARD_INTR 3 /* must match with simulator! */ | ||
45 | |||
46 | #define NR_PORTS 1 /* only one port for now */ | ||
47 | #define SERIAL_INLINE 1 | ||
48 | |||
49 | #ifdef SERIAL_INLINE | ||
50 | #define _INLINE_ inline | ||
51 | #endif | ||
52 | |||
53 | #define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) | ||
54 | |||
55 | #define SSC_GETCHAR 21 | ||
56 | |||
57 | extern long ia64_ssc (long, long, long, long, int); | ||
58 | extern void ia64_ssc_connect_irq (long intr, long irq); | ||
59 | |||
60 | static char *serial_name = "SimSerial driver"; | ||
61 | static char *serial_version = "0.6"; | ||
62 | |||
63 | /* | ||
64 | * This has been extracted from asm/serial.h. We need one eventually but | ||
65 | * I don't know exactly what we're going to put in it so just fake one | ||
66 | * for now. | ||
67 | */ | ||
68 | #define BASE_BAUD ( 1843200 / 16 ) | ||
69 | |||
70 | #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) | ||
71 | |||
72 | /* | ||
73 | * Most of the values here are meaningless to this particular driver. | ||
74 | * However some values must be preserved for the code (leveraged from serial.c | ||
75 | * to work correctly). | ||
76 | * port must not be 0 | ||
77 | * type must not be UNKNOWN | ||
78 | * So I picked arbitrary (guess from where?) values instead | ||
79 | */ | ||
80 | static struct serial_state rs_table[NR_PORTS]={ | ||
81 | /* UART CLK PORT IRQ FLAGS */ | ||
82 | { 0, BASE_BAUD, 0x3F8, 0, STD_COM_FLAGS,0,PORT_16550 } /* ttyS0 */ | ||
83 | }; | ||
84 | |||
85 | /* | ||
86 | * Just for the fun of it ! | ||
87 | */ | ||
88 | static struct serial_uart_config uart_config[] = { | ||
89 | { "unknown", 1, 0 }, | ||
90 | { "8250", 1, 0 }, | ||
91 | { "16450", 1, 0 }, | ||
92 | { "16550", 1, 0 }, | ||
93 | { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO }, | ||
94 | { "cirrus", 1, 0 }, | ||
95 | { "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH }, | ||
96 | { "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO | | ||
97 | UART_STARTECH }, | ||
98 | { "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO}, | ||
99 | { 0, 0} | ||
100 | }; | ||
101 | |||
102 | struct tty_driver *hp_simserial_driver; | ||
103 | |||
104 | static struct async_struct *IRQ_ports[NR_IRQS]; | ||
105 | |||
106 | static struct console *console; | ||
107 | |||
108 | static unsigned char *tmp_buf; | ||
109 | static DECLARE_MUTEX(tmp_buf_sem); | ||
110 | |||
111 | extern struct console *console_drivers; /* from kernel/printk.c */ | ||
112 | |||
113 | /* | ||
114 | * ------------------------------------------------------------ | ||
115 | * rs_stop() and rs_start() | ||
116 | * | ||
117 | * This routines are called before setting or resetting tty->stopped. | ||
118 | * They enable or disable transmitter interrupts, as necessary. | ||
119 | * ------------------------------------------------------------ | ||
120 | */ | ||
121 | static void rs_stop(struct tty_struct *tty) | ||
122 | { | ||
123 | #ifdef SIMSERIAL_DEBUG | ||
124 | printk("rs_stop: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", | ||
125 | tty->stopped, tty->hw_stopped, tty->flow_stopped); | ||
126 | #endif | ||
127 | |||
128 | } | ||
129 | |||
130 | static void rs_start(struct tty_struct *tty) | ||
131 | { | ||
132 | #if SIMSERIAL_DEBUG | ||
133 | printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", | ||
134 | tty->stopped, tty->hw_stopped, tty->flow_stopped); | ||
135 | #endif | ||
136 | } | ||
137 | |||
138 | static void receive_chars(struct tty_struct *tty, struct pt_regs *regs) | ||
139 | { | ||
140 | unsigned char ch; | ||
141 | static unsigned char seen_esc = 0; | ||
142 | |||
143 | while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) { | ||
144 | if ( ch == 27 && seen_esc == 0 ) { | ||
145 | seen_esc = 1; | ||
146 | continue; | ||
147 | } else { | ||
148 | if ( seen_esc==1 && ch == 'O' ) { | ||
149 | seen_esc = 2; | ||
150 | continue; | ||
151 | } else if ( seen_esc == 2 ) { | ||
152 | if ( ch == 'P' ) show_state(); /* F1 key */ | ||
153 | #ifdef CONFIG_KDB | ||
154 | if ( ch == 'S' ) | ||
155 | kdb(KDB_REASON_KEYBOARD, 0, (kdb_eframe_t) regs); | ||
156 | #endif | ||
157 | |||
158 | seen_esc = 0; | ||
159 | continue; | ||
160 | } | ||
161 | } | ||
162 | seen_esc = 0; | ||
163 | if (tty->flip.count >= TTY_FLIPBUF_SIZE) break; | ||
164 | |||
165 | *tty->flip.char_buf_ptr = ch; | ||
166 | |||
167 | *tty->flip.flag_buf_ptr = 0; | ||
168 | |||
169 | tty->flip.flag_buf_ptr++; | ||
170 | tty->flip.char_buf_ptr++; | ||
171 | tty->flip.count++; | ||
172 | } | ||
173 | tty_flip_buffer_push(tty); | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * This is the serial driver's interrupt routine for a single port | ||
178 | */ | ||
179 | static irqreturn_t rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs) | ||
180 | { | ||
181 | struct async_struct * info; | ||
182 | |||
183 | /* | ||
184 | * I don't know exactly why they don't use the dev_id opaque data | ||
185 | * pointer instead of this extra lookup table | ||
186 | */ | ||
187 | info = IRQ_ports[irq]; | ||
188 | if (!info || !info->tty) { | ||
189 | printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info); | ||
190 | return IRQ_NONE; | ||
191 | } | ||
192 | /* | ||
193 | * pretty simple in our case, because we only get interrupts | ||
194 | * on inbound traffic | ||
195 | */ | ||
196 | receive_chars(info->tty, regs); | ||
197 | return IRQ_HANDLED; | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * ------------------------------------------------------------------- | ||
202 | * Here ends the serial interrupt routines. | ||
203 | * ------------------------------------------------------------------- | ||
204 | */ | ||
205 | |||
206 | #if 0 | ||
207 | /* | ||
208 | * not really used in our situation so keep them commented out for now | ||
209 | */ | ||
210 | static DECLARE_TASK_QUEUE(tq_serial); /* used to be at the top of the file */ | ||
211 | static void do_serial_bh(void) | ||
212 | { | ||
213 | run_task_queue(&tq_serial); | ||
214 | printk(KERN_ERR "do_serial_bh: called\n"); | ||
215 | } | ||
216 | #endif | ||
217 | |||
218 | static void do_softint(void *private_) | ||
219 | { | ||
220 | printk(KERN_ERR "simserial: do_softint called\n"); | ||
221 | } | ||
222 | |||
223 | static void rs_put_char(struct tty_struct *tty, unsigned char ch) | ||
224 | { | ||
225 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
226 | unsigned long flags; | ||
227 | |||
228 | if (!tty || !info->xmit.buf) return; | ||
229 | |||
230 | local_irq_save(flags); | ||
231 | if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { | ||
232 | local_irq_restore(flags); | ||
233 | return; | ||
234 | } | ||
235 | info->xmit.buf[info->xmit.head] = ch; | ||
236 | info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1); | ||
237 | local_irq_restore(flags); | ||
238 | } | ||
239 | |||
240 | static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) | ||
241 | { | ||
242 | int count; | ||
243 | unsigned long flags; | ||
244 | |||
245 | |||
246 | local_irq_save(flags); | ||
247 | |||
248 | if (info->x_char) { | ||
249 | char c = info->x_char; | ||
250 | |||
251 | console->write(console, &c, 1); | ||
252 | |||
253 | info->state->icount.tx++; | ||
254 | info->x_char = 0; | ||
255 | |||
256 | goto out; | ||
257 | } | ||
258 | |||
259 | if (info->xmit.head == info->xmit.tail || info->tty->stopped || info->tty->hw_stopped) { | ||
260 | #ifdef SIMSERIAL_DEBUG | ||
261 | printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", | ||
262 | info->xmit.head, info->xmit.tail, info->tty->stopped); | ||
263 | #endif | ||
264 | goto out; | ||
265 | } | ||
266 | /* | ||
267 | * We removed the loop and try to do it in to chunks. We need | ||
268 | * 2 operations maximum because it's a ring buffer. | ||
269 | * | ||
270 | * First from current to tail if possible. | ||
271 | * Then from the beginning of the buffer until necessary | ||
272 | */ | ||
273 | |||
274 | count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE), | ||
275 | SERIAL_XMIT_SIZE - info->xmit.tail); | ||
276 | console->write(console, info->xmit.buf+info->xmit.tail, count); | ||
277 | |||
278 | info->xmit.tail = (info->xmit.tail+count) & (SERIAL_XMIT_SIZE-1); | ||
279 | |||
280 | /* | ||
281 | * We have more at the beginning of the buffer | ||
282 | */ | ||
283 | count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); | ||
284 | if (count) { | ||
285 | console->write(console, info->xmit.buf, count); | ||
286 | info->xmit.tail += count; | ||
287 | } | ||
288 | out: | ||
289 | local_irq_restore(flags); | ||
290 | } | ||
291 | |||
292 | static void rs_flush_chars(struct tty_struct *tty) | ||
293 | { | ||
294 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
295 | |||
296 | if (info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped || | ||
297 | !info->xmit.buf) | ||
298 | return; | ||
299 | |||
300 | transmit_chars(info, NULL); | ||
301 | } | ||
302 | |||
303 | |||
304 | static int rs_write(struct tty_struct * tty, | ||
305 | const unsigned char *buf, int count) | ||
306 | { | ||
307 | int c, ret = 0; | ||
308 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
309 | unsigned long flags; | ||
310 | |||
311 | if (!tty || !info->xmit.buf || !tmp_buf) return 0; | ||
312 | |||
313 | local_irq_save(flags); | ||
314 | while (1) { | ||
315 | c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); | ||
316 | if (count < c) | ||
317 | c = count; | ||
318 | if (c <= 0) { | ||
319 | break; | ||
320 | } | ||
321 | memcpy(info->xmit.buf + info->xmit.head, buf, c); | ||
322 | info->xmit.head = ((info->xmit.head + c) & | ||
323 | (SERIAL_XMIT_SIZE-1)); | ||
324 | buf += c; | ||
325 | count -= c; | ||
326 | ret += c; | ||
327 | } | ||
328 | local_irq_restore(flags); | ||
329 | /* | ||
330 | * Hey, we transmit directly from here in our case | ||
331 | */ | ||
332 | if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) | ||
333 | && !tty->stopped && !tty->hw_stopped) { | ||
334 | transmit_chars(info, NULL); | ||
335 | } | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | static int rs_write_room(struct tty_struct *tty) | ||
340 | { | ||
341 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
342 | |||
343 | return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); | ||
344 | } | ||
345 | |||
346 | static int rs_chars_in_buffer(struct tty_struct *tty) | ||
347 | { | ||
348 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
349 | |||
350 | return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); | ||
351 | } | ||
352 | |||
353 | static void rs_flush_buffer(struct tty_struct *tty) | ||
354 | { | ||
355 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
356 | unsigned long flags; | ||
357 | |||
358 | local_irq_save(flags); | ||
359 | info->xmit.head = info->xmit.tail = 0; | ||
360 | local_irq_restore(flags); | ||
361 | |||
362 | wake_up_interruptible(&tty->write_wait); | ||
363 | |||
364 | if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && | ||
365 | tty->ldisc.write_wakeup) | ||
366 | (tty->ldisc.write_wakeup)(tty); | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * This function is used to send a high-priority XON/XOFF character to | ||
371 | * the device | ||
372 | */ | ||
373 | static void rs_send_xchar(struct tty_struct *tty, char ch) | ||
374 | { | ||
375 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
376 | |||
377 | info->x_char = ch; | ||
378 | if (ch) { | ||
379 | /* | ||
380 | * I guess we could call console->write() directly but | ||
381 | * let's do that for now. | ||
382 | */ | ||
383 | transmit_chars(info, NULL); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * ------------------------------------------------------------ | ||
389 | * rs_throttle() | ||
390 | * | ||
391 | * This routine is called by the upper-layer tty layer to signal that | ||
392 | * incoming characters should be throttled. | ||
393 | * ------------------------------------------------------------ | ||
394 | */ | ||
395 | static void rs_throttle(struct tty_struct * tty) | ||
396 | { | ||
397 | if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty)); | ||
398 | |||
399 | printk(KERN_INFO "simrs_throttle called\n"); | ||
400 | } | ||
401 | |||
402 | static void rs_unthrottle(struct tty_struct * tty) | ||
403 | { | ||
404 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
405 | |||
406 | if (I_IXOFF(tty)) { | ||
407 | if (info->x_char) | ||
408 | info->x_char = 0; | ||
409 | else | ||
410 | rs_send_xchar(tty, START_CHAR(tty)); | ||
411 | } | ||
412 | printk(KERN_INFO "simrs_unthrottle called\n"); | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * rs_break() --- routine which turns the break handling on or off | ||
417 | */ | ||
418 | static void rs_break(struct tty_struct *tty, int break_state) | ||
419 | { | ||
420 | } | ||
421 | |||
422 | static int rs_ioctl(struct tty_struct *tty, struct file * file, | ||
423 | unsigned int cmd, unsigned long arg) | ||
424 | { | ||
425 | if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && | ||
426 | (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && | ||
427 | (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { | ||
428 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
429 | return -EIO; | ||
430 | } | ||
431 | |||
432 | switch (cmd) { | ||
433 | case TIOCMGET: | ||
434 | printk(KERN_INFO "rs_ioctl: TIOCMGET called\n"); | ||
435 | return -EINVAL; | ||
436 | case TIOCMBIS: | ||
437 | case TIOCMBIC: | ||
438 | case TIOCMSET: | ||
439 | printk(KERN_INFO "rs_ioctl: TIOCMBIS/BIC/SET called\n"); | ||
440 | return -EINVAL; | ||
441 | case TIOCGSERIAL: | ||
442 | printk(KERN_INFO "simrs_ioctl TIOCGSERIAL called\n"); | ||
443 | return 0; | ||
444 | case TIOCSSERIAL: | ||
445 | printk(KERN_INFO "simrs_ioctl TIOCSSERIAL called\n"); | ||
446 | return 0; | ||
447 | case TIOCSERCONFIG: | ||
448 | printk(KERN_INFO "rs_ioctl: TIOCSERCONFIG called\n"); | ||
449 | return -EINVAL; | ||
450 | |||
451 | case TIOCSERGETLSR: /* Get line status register */ | ||
452 | printk(KERN_INFO "rs_ioctl: TIOCSERGETLSR called\n"); | ||
453 | return -EINVAL; | ||
454 | |||
455 | case TIOCSERGSTRUCT: | ||
456 | printk(KERN_INFO "rs_ioctl: TIOCSERGSTRUCT called\n"); | ||
457 | #if 0 | ||
458 | if (copy_to_user((struct async_struct *) arg, | ||
459 | info, sizeof(struct async_struct))) | ||
460 | return -EFAULT; | ||
461 | #endif | ||
462 | return 0; | ||
463 | |||
464 | /* | ||
465 | * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change | ||
466 | * - mask passed in arg for lines of interest | ||
467 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) | ||
468 | * Caller should use TIOCGICOUNT to see which one it was | ||
469 | */ | ||
470 | case TIOCMIWAIT: | ||
471 | printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n"); | ||
472 | return 0; | ||
473 | /* | ||
474 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) | ||
475 | * Return: write counters to the user passed counter struct | ||
476 | * NB: both 1->0 and 0->1 transitions are counted except for | ||
477 | * RI where only 0->1 is counted. | ||
478 | */ | ||
479 | case TIOCGICOUNT: | ||
480 | printk(KERN_INFO "rs_ioctl: TIOCGICOUNT called\n"); | ||
481 | return 0; | ||
482 | |||
483 | case TIOCSERGWILD: | ||
484 | case TIOCSERSWILD: | ||
485 | /* "setserial -W" is called in Debian boot */ | ||
486 | printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n"); | ||
487 | return 0; | ||
488 | |||
489 | default: | ||
490 | return -ENOIOCTLCMD; | ||
491 | } | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) | ||
496 | |||
497 | static void rs_set_termios(struct tty_struct *tty, struct termios *old_termios) | ||
498 | { | ||
499 | unsigned int cflag = tty->termios->c_cflag; | ||
500 | |||
501 | if ( (cflag == old_termios->c_cflag) | ||
502 | && ( RELEVANT_IFLAG(tty->termios->c_iflag) | ||
503 | == RELEVANT_IFLAG(old_termios->c_iflag))) | ||
504 | return; | ||
505 | |||
506 | |||
507 | /* Handle turning off CRTSCTS */ | ||
508 | if ((old_termios->c_cflag & CRTSCTS) && | ||
509 | !(tty->termios->c_cflag & CRTSCTS)) { | ||
510 | tty->hw_stopped = 0; | ||
511 | rs_start(tty); | ||
512 | } | ||
513 | } | ||
514 | /* | ||
515 | * This routine will shutdown a serial port; interrupts are disabled, and | ||
516 | * DTR is dropped if the hangup on close termio flag is on. | ||
517 | */ | ||
518 | static void shutdown(struct async_struct * info) | ||
519 | { | ||
520 | unsigned long flags; | ||
521 | struct serial_state *state; | ||
522 | int retval; | ||
523 | |||
524 | if (!(info->flags & ASYNC_INITIALIZED)) return; | ||
525 | |||
526 | state = info->state; | ||
527 | |||
528 | #ifdef SIMSERIAL_DEBUG | ||
529 | printk("Shutting down serial port %d (irq %d)....", info->line, | ||
530 | state->irq); | ||
531 | #endif | ||
532 | |||
533 | local_irq_save(flags); | ||
534 | { | ||
535 | /* | ||
536 | * First unlink the serial port from the IRQ chain... | ||
537 | */ | ||
538 | if (info->next_port) | ||
539 | info->next_port->prev_port = info->prev_port; | ||
540 | if (info->prev_port) | ||
541 | info->prev_port->next_port = info->next_port; | ||
542 | else | ||
543 | IRQ_ports[state->irq] = info->next_port; | ||
544 | |||
545 | /* | ||
546 | * Free the IRQ, if necessary | ||
547 | */ | ||
548 | if (state->irq && (!IRQ_ports[state->irq] || | ||
549 | !IRQ_ports[state->irq]->next_port)) { | ||
550 | if (IRQ_ports[state->irq]) { | ||
551 | free_irq(state->irq, NULL); | ||
552 | retval = request_irq(state->irq, rs_interrupt_single, | ||
553 | IRQ_T(info), "serial", NULL); | ||
554 | |||
555 | if (retval) | ||
556 | printk(KERN_ERR "serial shutdown: request_irq: error %d" | ||
557 | " Couldn't reacquire IRQ.\n", retval); | ||
558 | } else | ||
559 | free_irq(state->irq, NULL); | ||
560 | } | ||
561 | |||
562 | if (info->xmit.buf) { | ||
563 | free_page((unsigned long) info->xmit.buf); | ||
564 | info->xmit.buf = 0; | ||
565 | } | ||
566 | |||
567 | if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); | ||
568 | |||
569 | info->flags &= ~ASYNC_INITIALIZED; | ||
570 | } | ||
571 | local_irq_restore(flags); | ||
572 | } | ||
573 | |||
574 | /* | ||
575 | * ------------------------------------------------------------ | ||
576 | * rs_close() | ||
577 | * | ||
578 | * This routine is called when the serial port gets closed. First, we | ||
579 | * wait for the last remaining data to be sent. Then, we unlink its | ||
580 | * async structure from the interrupt chain if necessary, and we free | ||
581 | * that IRQ if nothing is left in the chain. | ||
582 | * ------------------------------------------------------------ | ||
583 | */ | ||
584 | static void rs_close(struct tty_struct *tty, struct file * filp) | ||
585 | { | ||
586 | struct async_struct * info = (struct async_struct *)tty->driver_data; | ||
587 | struct serial_state *state; | ||
588 | unsigned long flags; | ||
589 | |||
590 | if (!info ) return; | ||
591 | |||
592 | state = info->state; | ||
593 | |||
594 | local_irq_save(flags); | ||
595 | if (tty_hung_up_p(filp)) { | ||
596 | #ifdef SIMSERIAL_DEBUG | ||
597 | printk("rs_close: hung_up\n"); | ||
598 | #endif | ||
599 | local_irq_restore(flags); | ||
600 | return; | ||
601 | } | ||
602 | #ifdef SIMSERIAL_DEBUG | ||
603 | printk("rs_close ttys%d, count = %d\n", info->line, state->count); | ||
604 | #endif | ||
605 | if ((tty->count == 1) && (state->count != 1)) { | ||
606 | /* | ||
607 | * Uh, oh. tty->count is 1, which means that the tty | ||
608 | * structure will be freed. state->count should always | ||
609 | * be one in these conditions. If it's greater than | ||
610 | * one, we've got real problems, since it means the | ||
611 | * serial port won't be shutdown. | ||
612 | */ | ||
613 | printk(KERN_ERR "rs_close: bad serial port count; tty->count is 1, " | ||
614 | "state->count is %d\n", state->count); | ||
615 | state->count = 1; | ||
616 | } | ||
617 | if (--state->count < 0) { | ||
618 | printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n", | ||
619 | info->line, state->count); | ||
620 | state->count = 0; | ||
621 | } | ||
622 | if (state->count) { | ||
623 | local_irq_restore(flags); | ||
624 | return; | ||
625 | } | ||
626 | info->flags |= ASYNC_CLOSING; | ||
627 | local_irq_restore(flags); | ||
628 | |||
629 | /* | ||
630 | * Now we wait for the transmit buffer to clear; and we notify | ||
631 | * the line discipline to only process XON/XOFF characters. | ||
632 | */ | ||
633 | shutdown(info); | ||
634 | if (tty->driver->flush_buffer) tty->driver->flush_buffer(tty); | ||
635 | if (tty->ldisc.flush_buffer) tty->ldisc.flush_buffer(tty); | ||
636 | info->event = 0; | ||
637 | info->tty = 0; | ||
638 | if (info->blocked_open) { | ||
639 | if (info->close_delay) { | ||
640 | current->state = TASK_INTERRUPTIBLE; | ||
641 | schedule_timeout(info->close_delay); | ||
642 | } | ||
643 | wake_up_interruptible(&info->open_wait); | ||
644 | } | ||
645 | info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
646 | wake_up_interruptible(&info->close_wait); | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * rs_wait_until_sent() --- wait until the transmitter is empty | ||
651 | */ | ||
652 | static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | ||
653 | { | ||
654 | } | ||
655 | |||
656 | |||
657 | /* | ||
658 | * rs_hangup() --- called by tty_hangup() when a hangup is signaled. | ||
659 | */ | ||
660 | static void rs_hangup(struct tty_struct *tty) | ||
661 | { | ||
662 | struct async_struct * info = (struct async_struct *)tty->driver_data; | ||
663 | struct serial_state *state = info->state; | ||
664 | |||
665 | #ifdef SIMSERIAL_DEBUG | ||
666 | printk("rs_hangup: called\n"); | ||
667 | #endif | ||
668 | |||
669 | state = info->state; | ||
670 | |||
671 | rs_flush_buffer(tty); | ||
672 | if (info->flags & ASYNC_CLOSING) | ||
673 | return; | ||
674 | shutdown(info); | ||
675 | |||
676 | info->event = 0; | ||
677 | state->count = 0; | ||
678 | info->flags &= ~ASYNC_NORMAL_ACTIVE; | ||
679 | info->tty = 0; | ||
680 | wake_up_interruptible(&info->open_wait); | ||
681 | } | ||
682 | |||
683 | |||
684 | static int get_async_struct(int line, struct async_struct **ret_info) | ||
685 | { | ||
686 | struct async_struct *info; | ||
687 | struct serial_state *sstate; | ||
688 | |||
689 | sstate = rs_table + line; | ||
690 | sstate->count++; | ||
691 | if (sstate->info) { | ||
692 | *ret_info = sstate->info; | ||
693 | return 0; | ||
694 | } | ||
695 | info = kmalloc(sizeof(struct async_struct), GFP_KERNEL); | ||
696 | if (!info) { | ||
697 | sstate->count--; | ||
698 | return -ENOMEM; | ||
699 | } | ||
700 | memset(info, 0, sizeof(struct async_struct)); | ||
701 | init_waitqueue_head(&info->open_wait); | ||
702 | init_waitqueue_head(&info->close_wait); | ||
703 | init_waitqueue_head(&info->delta_msr_wait); | ||
704 | info->magic = SERIAL_MAGIC; | ||
705 | info->port = sstate->port; | ||
706 | info->flags = sstate->flags; | ||
707 | info->xmit_fifo_size = sstate->xmit_fifo_size; | ||
708 | info->line = line; | ||
709 | INIT_WORK(&info->work, do_softint, info); | ||
710 | info->state = sstate; | ||
711 | if (sstate->info) { | ||
712 | kfree(info); | ||
713 | *ret_info = sstate->info; | ||
714 | return 0; | ||
715 | } | ||
716 | *ret_info = sstate->info = info; | ||
717 | return 0; | ||
718 | } | ||
719 | |||
720 | static int | ||
721 | startup(struct async_struct *info) | ||
722 | { | ||
723 | unsigned long flags; | ||
724 | int retval=0; | ||
725 | irqreturn_t (*handler)(int, void *, struct pt_regs *); | ||
726 | struct serial_state *state= info->state; | ||
727 | unsigned long page; | ||
728 | |||
729 | page = get_zeroed_page(GFP_KERNEL); | ||
730 | if (!page) | ||
731 | return -ENOMEM; | ||
732 | |||
733 | local_irq_save(flags); | ||
734 | |||
735 | if (info->flags & ASYNC_INITIALIZED) { | ||
736 | free_page(page); | ||
737 | goto errout; | ||
738 | } | ||
739 | |||
740 | if (!state->port || !state->type) { | ||
741 | if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); | ||
742 | free_page(page); | ||
743 | goto errout; | ||
744 | } | ||
745 | if (info->xmit.buf) | ||
746 | free_page(page); | ||
747 | else | ||
748 | info->xmit.buf = (unsigned char *) page; | ||
749 | |||
750 | #ifdef SIMSERIAL_DEBUG | ||
751 | printk("startup: ttys%d (irq %d)...", info->line, state->irq); | ||
752 | #endif | ||
753 | |||
754 | /* | ||
755 | * Allocate the IRQ if necessary | ||
756 | */ | ||
757 | if (state->irq && (!IRQ_ports[state->irq] || | ||
758 | !IRQ_ports[state->irq]->next_port)) { | ||
759 | if (IRQ_ports[state->irq]) { | ||
760 | retval = -EBUSY; | ||
761 | goto errout; | ||
762 | } else | ||
763 | handler = rs_interrupt_single; | ||
764 | |||
765 | retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL); | ||
766 | if (retval) { | ||
767 | if (capable(CAP_SYS_ADMIN)) { | ||
768 | if (info->tty) | ||
769 | set_bit(TTY_IO_ERROR, | ||
770 | &info->tty->flags); | ||
771 | retval = 0; | ||
772 | } | ||
773 | goto errout; | ||
774 | } | ||
775 | } | ||
776 | |||
777 | /* | ||
778 | * Insert serial port into IRQ chain. | ||
779 | */ | ||
780 | info->prev_port = 0; | ||
781 | info->next_port = IRQ_ports[state->irq]; | ||
782 | if (info->next_port) | ||
783 | info->next_port->prev_port = info; | ||
784 | IRQ_ports[state->irq] = info; | ||
785 | |||
786 | if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags); | ||
787 | |||
788 | info->xmit.head = info->xmit.tail = 0; | ||
789 | |||
790 | #if 0 | ||
791 | /* | ||
792 | * Set up serial timers... | ||
793 | */ | ||
794 | timer_table[RS_TIMER].expires = jiffies + 2*HZ/100; | ||
795 | timer_active |= 1 << RS_TIMER; | ||
796 | #endif | ||
797 | |||
798 | /* | ||
799 | * Set up the tty->alt_speed kludge | ||
800 | */ | ||
801 | if (info->tty) { | ||
802 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) | ||
803 | info->tty->alt_speed = 57600; | ||
804 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) | ||
805 | info->tty->alt_speed = 115200; | ||
806 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) | ||
807 | info->tty->alt_speed = 230400; | ||
808 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) | ||
809 | info->tty->alt_speed = 460800; | ||
810 | } | ||
811 | |||
812 | info->flags |= ASYNC_INITIALIZED; | ||
813 | local_irq_restore(flags); | ||
814 | return 0; | ||
815 | |||
816 | errout: | ||
817 | local_irq_restore(flags); | ||
818 | return retval; | ||
819 | } | ||
820 | |||
821 | |||
822 | /* | ||
823 | * This routine is called whenever a serial port is opened. It | ||
824 | * enables interrupts for a serial port, linking in its async structure into | ||
825 | * the IRQ chain. It also performs the serial-specific | ||
826 | * initialization for the tty structure. | ||
827 | */ | ||
828 | static int rs_open(struct tty_struct *tty, struct file * filp) | ||
829 | { | ||
830 | struct async_struct *info; | ||
831 | int retval, line; | ||
832 | unsigned long page; | ||
833 | |||
834 | line = tty->index; | ||
835 | if ((line < 0) || (line >= NR_PORTS)) | ||
836 | return -ENODEV; | ||
837 | retval = get_async_struct(line, &info); | ||
838 | if (retval) | ||
839 | return retval; | ||
840 | tty->driver_data = info; | ||
841 | info->tty = tty; | ||
842 | |||
843 | #ifdef SIMSERIAL_DEBUG | ||
844 | printk("rs_open %s, count = %d\n", tty->name, info->state->count); | ||
845 | #endif | ||
846 | info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; | ||
847 | |||
848 | if (!tmp_buf) { | ||
849 | page = get_zeroed_page(GFP_KERNEL); | ||
850 | if (!page) | ||
851 | return -ENOMEM; | ||
852 | if (tmp_buf) | ||
853 | free_page(page); | ||
854 | else | ||
855 | tmp_buf = (unsigned char *) page; | ||
856 | } | ||
857 | |||
858 | /* | ||
859 | * If the port is the middle of closing, bail out now | ||
860 | */ | ||
861 | if (tty_hung_up_p(filp) || | ||
862 | (info->flags & ASYNC_CLOSING)) { | ||
863 | if (info->flags & ASYNC_CLOSING) | ||
864 | interruptible_sleep_on(&info->close_wait); | ||
865 | #ifdef SERIAL_DO_RESTART | ||
866 | return ((info->flags & ASYNC_HUP_NOTIFY) ? | ||
867 | -EAGAIN : -ERESTARTSYS); | ||
868 | #else | ||
869 | return -EAGAIN; | ||
870 | #endif | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * Start up serial port | ||
875 | */ | ||
876 | retval = startup(info); | ||
877 | if (retval) { | ||
878 | return retval; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * figure out which console to use (should be one already) | ||
883 | */ | ||
884 | console = console_drivers; | ||
885 | while (console) { | ||
886 | if ((console->flags & CON_ENABLED) && console->write) break; | ||
887 | console = console->next; | ||
888 | } | ||
889 | |||
890 | #ifdef SIMSERIAL_DEBUG | ||
891 | printk("rs_open ttys%d successful\n", info->line); | ||
892 | #endif | ||
893 | return 0; | ||
894 | } | ||
895 | |||
896 | /* | ||
897 | * /proc fs routines.... | ||
898 | */ | ||
899 | |||
900 | static inline int line_info(char *buf, struct serial_state *state) | ||
901 | { | ||
902 | return sprintf(buf, "%d: uart:%s port:%lX irq:%d\n", | ||
903 | state->line, uart_config[state->type].name, | ||
904 | state->port, state->irq); | ||
905 | } | ||
906 | |||
907 | static int rs_read_proc(char *page, char **start, off_t off, int count, | ||
908 | int *eof, void *data) | ||
909 | { | ||
910 | int i, len = 0, l; | ||
911 | off_t begin = 0; | ||
912 | |||
913 | len += sprintf(page, "simserinfo:1.0 driver:%s\n", serial_version); | ||
914 | for (i = 0; i < NR_PORTS && len < 4000; i++) { | ||
915 | l = line_info(page + len, &rs_table[i]); | ||
916 | len += l; | ||
917 | if (len+begin > off+count) | ||
918 | goto done; | ||
919 | if (len+begin < off) { | ||
920 | begin += len; | ||
921 | len = 0; | ||
922 | } | ||
923 | } | ||
924 | *eof = 1; | ||
925 | done: | ||
926 | if (off >= len+begin) | ||
927 | return 0; | ||
928 | *start = page + (begin-off); | ||
929 | return ((count < begin+len-off) ? count : begin+len-off); | ||
930 | } | ||
931 | |||
932 | /* | ||
933 | * --------------------------------------------------------------------- | ||
934 | * rs_init() and friends | ||
935 | * | ||
936 | * rs_init() is called at boot-time to initialize the serial driver. | ||
937 | * --------------------------------------------------------------------- | ||
938 | */ | ||
939 | |||
940 | /* | ||
941 | * This routine prints out the appropriate serial driver version | ||
942 | * number, and identifies which options were configured into this | ||
943 | * driver. | ||
944 | */ | ||
945 | static inline void show_serial_version(void) | ||
946 | { | ||
947 | printk(KERN_INFO "%s version %s with", serial_name, serial_version); | ||
948 | printk(KERN_INFO " no serial options enabled\n"); | ||
949 | } | ||
950 | |||
951 | static struct tty_operations hp_ops = { | ||
952 | .open = rs_open, | ||
953 | .close = rs_close, | ||
954 | .write = rs_write, | ||
955 | .put_char = rs_put_char, | ||
956 | .flush_chars = rs_flush_chars, | ||
957 | .write_room = rs_write_room, | ||
958 | .chars_in_buffer = rs_chars_in_buffer, | ||
959 | .flush_buffer = rs_flush_buffer, | ||
960 | .ioctl = rs_ioctl, | ||
961 | .throttle = rs_throttle, | ||
962 | .unthrottle = rs_unthrottle, | ||
963 | .send_xchar = rs_send_xchar, | ||
964 | .set_termios = rs_set_termios, | ||
965 | .stop = rs_stop, | ||
966 | .start = rs_start, | ||
967 | .hangup = rs_hangup, | ||
968 | .break_ctl = rs_break, | ||
969 | .wait_until_sent = rs_wait_until_sent, | ||
970 | .read_proc = rs_read_proc, | ||
971 | }; | ||
972 | |||
973 | /* | ||
974 | * The serial driver boot-time initialization code! | ||
975 | */ | ||
976 | static int __init | ||
977 | simrs_init (void) | ||
978 | { | ||
979 | int i; | ||
980 | struct serial_state *state; | ||
981 | |||
982 | if (!ia64_platform_is("hpsim")) | ||
983 | return -ENODEV; | ||
984 | |||
985 | hp_simserial_driver = alloc_tty_driver(1); | ||
986 | if (!hp_simserial_driver) | ||
987 | return -ENOMEM; | ||
988 | |||
989 | show_serial_version(); | ||
990 | |||
991 | /* Initialize the tty_driver structure */ | ||
992 | |||
993 | hp_simserial_driver->owner = THIS_MODULE; | ||
994 | hp_simserial_driver->driver_name = "simserial"; | ||
995 | hp_simserial_driver->name = "ttyS"; | ||
996 | hp_simserial_driver->major = TTY_MAJOR; | ||
997 | hp_simserial_driver->minor_start = 64; | ||
998 | hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL; | ||
999 | hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL; | ||
1000 | hp_simserial_driver->init_termios = tty_std_termios; | ||
1001 | hp_simserial_driver->init_termios.c_cflag = | ||
1002 | B9600 | CS8 | CREAD | HUPCL | CLOCAL; | ||
1003 | hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW; | ||
1004 | tty_set_operations(hp_simserial_driver, &hp_ops); | ||
1005 | |||
1006 | /* | ||
1007 | * Let's have a little bit of fun ! | ||
1008 | */ | ||
1009 | for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) { | ||
1010 | |||
1011 | if (state->type == PORT_UNKNOWN) continue; | ||
1012 | |||
1013 | if (!state->irq) { | ||
1014 | state->irq = assign_irq_vector(AUTO_ASSIGN); | ||
1015 | ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); | ||
1016 | } | ||
1017 | |||
1018 | printk(KERN_INFO "ttyS%d at 0x%04lx (irq = %d) is a %s\n", | ||
1019 | state->line, | ||
1020 | state->port, state->irq, | ||
1021 | uart_config[state->type].name); | ||
1022 | } | ||
1023 | |||
1024 | if (tty_register_driver(hp_simserial_driver)) | ||
1025 | panic("Couldn't register simserial driver\n"); | ||
1026 | |||
1027 | return 0; | ||
1028 | } | ||
1029 | |||
1030 | #ifndef MODULE | ||
1031 | __initcall(simrs_init); | ||
1032 | #endif | ||
diff --git a/arch/ia64/hp/zx1/Makefile b/arch/ia64/hp/zx1/Makefile new file mode 100644 index 000000000000..61e878729d1e --- /dev/null +++ b/arch/ia64/hp/zx1/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # ia64/hp/zx1/Makefile | ||
3 | # | ||
4 | # Copyright (C) 2002 Hewlett Packard | ||
5 | # Copyright (C) Alex Williamson (alex_williamson@hp.com) | ||
6 | # | ||
7 | |||
8 | obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o hpzx1_swiotlb_machvec.o | ||
diff --git a/arch/ia64/hp/zx1/hpzx1_machvec.c b/arch/ia64/hp/zx1/hpzx1_machvec.c new file mode 100644 index 000000000000..32518b0f923e --- /dev/null +++ b/arch/ia64/hp/zx1/hpzx1_machvec.c | |||
@@ -0,0 +1,3 @@ | |||
1 | #define MACHVEC_PLATFORM_NAME hpzx1 | ||
2 | #define MACHVEC_PLATFORM_HEADER <asm/machvec_hpzx1.h> | ||
3 | #include <asm/machvec_init.h> | ||
diff --git a/arch/ia64/hp/zx1/hpzx1_swiotlb_machvec.c b/arch/ia64/hp/zx1/hpzx1_swiotlb_machvec.c new file mode 100644 index 000000000000..4392a96b3c58 --- /dev/null +++ b/arch/ia64/hp/zx1/hpzx1_swiotlb_machvec.c | |||
@@ -0,0 +1,3 @@ | |||
1 | #define MACHVEC_PLATFORM_NAME hpzx1_swiotlb | ||
2 | #define MACHVEC_PLATFORM_HEADER <asm/machvec_hpzx1_swiotlb.h> | ||
3 | #include <asm/machvec_init.h> | ||