diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/hp/common |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/hp/common')
-rw-r--r-- | arch/ia64/hp/common/Makefile | 10 | ||||
-rw-r--r-- | arch/ia64/hp/common/hwsw_iommu.c | 185 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 2121 |
3 files changed, 2316 insertions, 0 deletions
diff --git a/arch/ia64/hp/common/Makefile b/arch/ia64/hp/common/Makefile new file mode 100644 index 000000000000..f61a60057ff7 --- /dev/null +++ b/arch/ia64/hp/common/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | # | ||
2 | # ia64/platform/hp/common/Makefile | ||
3 | # | ||
4 | # Copyright (C) 2002 Hewlett Packard | ||
5 | # Copyright (C) Alex Williamson (alex_williamson@hp.com) | ||
6 | # | ||
7 | |||
8 | obj-y := sba_iommu.o | ||
9 | obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += hwsw_iommu.o | ||
10 | obj-$(CONFIG_IA64_GENERIC) += hwsw_iommu.o | ||
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c new file mode 100644 index 000000000000..80f8ef013939 --- /dev/null +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. | ||
3 | * Contributed by David Mosberger-Tang <davidm@hpl.hp.com> | ||
4 | * | ||
5 | * This is a pseudo I/O MMU which dispatches to the hardware I/O MMU | ||
6 | * whenever possible. We assume that the hardware I/O MMU requires | ||
7 | * full 32-bit addressability, as is the case, e.g., for HP zx1-based | ||
8 | * systems (there, the I/O MMU window is mapped at 3-4GB). If a | ||
9 | * device doesn't provide full 32-bit addressability, we fall back on | ||
10 | * the sw I/O TLB. This is good enough to let us support broken | ||
11 | * hardware such as soundcards which have a DMA engine that can | ||
12 | * address only 28 bits. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | |||
17 | #include <asm/machvec.h> | ||
18 | |||
19 | /* swiotlb declarations & definitions: */ | ||
20 | extern void swiotlb_init_with_default_size (size_t size); | ||
21 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; | ||
22 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; | ||
23 | extern ia64_mv_dma_map_single swiotlb_map_single; | ||
24 | extern ia64_mv_dma_unmap_single swiotlb_unmap_single; | ||
25 | extern ia64_mv_dma_map_sg swiotlb_map_sg; | ||
26 | extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; | ||
27 | extern ia64_mv_dma_supported swiotlb_dma_supported; | ||
28 | extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; | ||
29 | |||
30 | /* hwiommu declarations & definitions: */ | ||
31 | |||
32 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | ||
33 | extern ia64_mv_dma_free_coherent sba_free_coherent; | ||
34 | extern ia64_mv_dma_map_single sba_map_single; | ||
35 | extern ia64_mv_dma_unmap_single sba_unmap_single; | ||
36 | extern ia64_mv_dma_map_sg sba_map_sg; | ||
37 | extern ia64_mv_dma_unmap_sg sba_unmap_sg; | ||
38 | extern ia64_mv_dma_supported sba_dma_supported; | ||
39 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | ||
40 | |||
41 | #define hwiommu_alloc_coherent sba_alloc_coherent | ||
42 | #define hwiommu_free_coherent sba_free_coherent | ||
43 | #define hwiommu_map_single sba_map_single | ||
44 | #define hwiommu_unmap_single sba_unmap_single | ||
45 | #define hwiommu_map_sg sba_map_sg | ||
46 | #define hwiommu_unmap_sg sba_unmap_sg | ||
47 | #define hwiommu_dma_supported sba_dma_supported | ||
48 | #define hwiommu_dma_mapping_error sba_dma_mapping_error | ||
49 | #define hwiommu_sync_single_for_cpu machvec_dma_sync_single | ||
50 | #define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg | ||
51 | #define hwiommu_sync_single_for_device machvec_dma_sync_single | ||
52 | #define hwiommu_sync_sg_for_device machvec_dma_sync_sg | ||
53 | |||
54 | |||
55 | /* | ||
56 | * Note: we need to make the determination of whether or not to use | ||
57 | * the sw I/O TLB based purely on the device structure. Anything else | ||
58 | * would be unreliable or would be too intrusive. | ||
59 | */ | ||
60 | static inline int | ||
61 | use_swiotlb (struct device *dev) | ||
62 | { | ||
63 | return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); | ||
64 | } | ||
65 | |||
66 | void | ||
67 | hwsw_init (void) | ||
68 | { | ||
69 | /* default to a smallish 2MB sw I/O TLB */ | ||
70 | swiotlb_init_with_default_size (2 * (1<<20)); | ||
71 | } | ||
72 | |||
73 | void * | ||
74 | hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags) | ||
75 | { | ||
76 | if (use_swiotlb(dev)) | ||
77 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | ||
78 | else | ||
79 | return hwiommu_alloc_coherent(dev, size, dma_handle, flags); | ||
80 | } | ||
81 | |||
82 | void | ||
83 | hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
84 | { | ||
85 | if (use_swiotlb(dev)) | ||
86 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | ||
87 | else | ||
88 | hwiommu_free_coherent(dev, size, vaddr, dma_handle); | ||
89 | } | ||
90 | |||
91 | dma_addr_t | ||
92 | hwsw_map_single (struct device *dev, void *addr, size_t size, int dir) | ||
93 | { | ||
94 | if (use_swiotlb(dev)) | ||
95 | return swiotlb_map_single(dev, addr, size, dir); | ||
96 | else | ||
97 | return hwiommu_map_single(dev, addr, size, dir); | ||
98 | } | ||
99 | |||
100 | void | ||
101 | hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir) | ||
102 | { | ||
103 | if (use_swiotlb(dev)) | ||
104 | return swiotlb_unmap_single(dev, iova, size, dir); | ||
105 | else | ||
106 | return hwiommu_unmap_single(dev, iova, size, dir); | ||
107 | } | ||
108 | |||
109 | |||
110 | int | ||
111 | hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | ||
112 | { | ||
113 | if (use_swiotlb(dev)) | ||
114 | return swiotlb_map_sg(dev, sglist, nents, dir); | ||
115 | else | ||
116 | return hwiommu_map_sg(dev, sglist, nents, dir); | ||
117 | } | ||
118 | |||
119 | void | ||
120 | hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | ||
121 | { | ||
122 | if (use_swiotlb(dev)) | ||
123 | return swiotlb_unmap_sg(dev, sglist, nents, dir); | ||
124 | else | ||
125 | return hwiommu_unmap_sg(dev, sglist, nents, dir); | ||
126 | } | ||
127 | |||
128 | void | ||
129 | hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
130 | { | ||
131 | if (use_swiotlb(dev)) | ||
132 | swiotlb_sync_single_for_cpu(dev, addr, size, dir); | ||
133 | else | ||
134 | hwiommu_sync_single_for_cpu(dev, addr, size, dir); | ||
135 | } | ||
136 | |||
137 | void | ||
138 | hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
139 | { | ||
140 | if (use_swiotlb(dev)) | ||
141 | swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
142 | else | ||
143 | hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
144 | } | ||
145 | |||
146 | void | ||
147 | hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
148 | { | ||
149 | if (use_swiotlb(dev)) | ||
150 | swiotlb_sync_single_for_device(dev, addr, size, dir); | ||
151 | else | ||
152 | hwiommu_sync_single_for_device(dev, addr, size, dir); | ||
153 | } | ||
154 | |||
155 | void | ||
156 | hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
157 | { | ||
158 | if (use_swiotlb(dev)) | ||
159 | swiotlb_sync_sg_for_device(dev, sg, nelems, dir); | ||
160 | else | ||
161 | hwiommu_sync_sg_for_device(dev, sg, nelems, dir); | ||
162 | } | ||
163 | |||
164 | int | ||
165 | hwsw_dma_supported (struct device *dev, u64 mask) | ||
166 | { | ||
167 | if (hwiommu_dma_supported(dev, mask)) | ||
168 | return 1; | ||
169 | return swiotlb_dma_supported(dev, mask); | ||
170 | } | ||
171 | |||
172 | int | ||
173 | hwsw_dma_mapping_error (dma_addr_t dma_addr) | ||
174 | { | ||
175 | return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr); | ||
176 | } | ||
177 | |||
178 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | ||
179 | EXPORT_SYMBOL(hwsw_map_single); | ||
180 | EXPORT_SYMBOL(hwsw_unmap_single); | ||
181 | EXPORT_SYMBOL(hwsw_map_sg); | ||
182 | EXPORT_SYMBOL(hwsw_unmap_sg); | ||
183 | EXPORT_SYMBOL(hwsw_dma_supported); | ||
184 | EXPORT_SYMBOL(hwsw_alloc_coherent); | ||
185 | EXPORT_SYMBOL(hwsw_free_coherent); | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c new file mode 100644 index 000000000000..017c9ab5fc1b --- /dev/null +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -0,0 +1,2121 @@ | |||
1 | /* | ||
2 | ** IA64 System Bus Adapter (SBA) I/O MMU manager | ||
3 | ** | ||
4 | ** (c) Copyright 2002-2004 Alex Williamson | ||
5 | ** (c) Copyright 2002-2003 Grant Grundler | ||
6 | ** (c) Copyright 2002-2004 Hewlett-Packard Company | ||
7 | ** | ||
8 | ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) | ||
9 | ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) | ||
10 | ** | ||
11 | ** This program is free software; you can redistribute it and/or modify | ||
12 | ** it under the terms of the GNU General Public License as published by | ||
13 | ** the Free Software Foundation; either version 2 of the License, or | ||
14 | ** (at your option) any later version. | ||
15 | ** | ||
16 | ** | ||
17 | ** This module initializes the IOC (I/O Controller) found on HP | ||
18 | ** McKinley machines and their successors. | ||
19 | ** | ||
20 | */ | ||
21 | |||
22 | #include <linux/config.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/proc_fs.h> | ||
33 | #include <linux/seq_file.h> | ||
34 | #include <linux/acpi.h> | ||
35 | #include <linux/efi.h> | ||
36 | #include <linux/nodemask.h> | ||
37 | #include <linux/bitops.h> /* hweight64() */ | ||
38 | |||
39 | #include <asm/delay.h> /* ia64_get_itc() */ | ||
40 | #include <asm/io.h> | ||
41 | #include <asm/page.h> /* PAGE_OFFSET */ | ||
42 | #include <asm/dma.h> | ||
43 | #include <asm/system.h> /* wmb() */ | ||
44 | |||
45 | #include <asm/acpi-ext.h> | ||
46 | |||
47 | #define PFX "IOC: " | ||
48 | |||
49 | /* | ||
50 | ** Enabling timing search of the pdir resource map. Output in /proc. | ||
51 | ** Disabled by default to optimize performance. | ||
52 | */ | ||
53 | #undef PDIR_SEARCH_TIMING | ||
54 | |||
55 | /* | ||
56 | ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If | ||
57 | ** not defined, all DMA will be 32bit and go through the TLB. | ||
58 | ** There's potentially a conflict in the bio merge code with us | ||
59 | ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing | ||
60 | ** appears to give more performance than bio-level virtual merging, we'll | ||
61 | ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to | ||
62 | ** completely restrict DMA to the IOMMU. | ||
63 | */ | ||
64 | #define ALLOW_IOV_BYPASS | ||
65 | |||
66 | /* | ||
67 | ** This option specifically allows/disallows bypassing scatterlists with | ||
68 | ** multiple entries. Coalescing these entries can allow better DMA streaming | ||
69 | ** and in some cases shows better performance than entirely bypassing the | ||
70 | ** IOMMU. Performance increase on the order of 1-2% sequential output/input | ||
71 | ** using bonnie++ on a RAID0 MD device (sym2 & mpt). | ||
72 | */ | ||
73 | #undef ALLOW_IOV_BYPASS_SG | ||
74 | |||
75 | /* | ||
76 | ** If a device prefetches beyond the end of a valid pdir entry, it will cause | ||
77 | ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should | ||
78 | ** disconnect on 4k boundaries and prevent such issues. If the device is | ||
79 | ** particularly agressive, this option will keep the entire pdir valid such | ||
80 | ** that prefetching will hit a valid address. This could severely impact | ||
81 | ** error containment, and is therefore off by default. The page that is | ||
82 | ** used for spill-over is poisoned, so that should help debugging somewhat. | ||
83 | */ | ||
84 | #undef FULL_VALID_PDIR | ||
85 | |||
86 | #define ENABLE_MARK_CLEAN | ||
87 | |||
88 | /* | ||
89 | ** The number of debug flags is a clue - this code is fragile. NOTE: since | ||
90 | ** tightening the use of res_lock the resource bitmap and actual pdir are no | ||
91 | ** longer guaranteed to stay in sync. The sanity checking code isn't going to | ||
92 | ** like that. | ||
93 | */ | ||
94 | #undef DEBUG_SBA_INIT | ||
95 | #undef DEBUG_SBA_RUN | ||
96 | #undef DEBUG_SBA_RUN_SG | ||
97 | #undef DEBUG_SBA_RESOURCE | ||
98 | #undef ASSERT_PDIR_SANITY | ||
99 | #undef DEBUG_LARGE_SG_ENTRIES | ||
100 | #undef DEBUG_BYPASS | ||
101 | |||
102 | #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY) | ||
103 | #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive | ||
104 | #endif | ||
105 | |||
106 | #define SBA_INLINE __inline__ | ||
107 | /* #define SBA_INLINE */ | ||
108 | |||
109 | #ifdef DEBUG_SBA_INIT | ||
110 | #define DBG_INIT(x...) printk(x) | ||
111 | #else | ||
112 | #define DBG_INIT(x...) | ||
113 | #endif | ||
114 | |||
115 | #ifdef DEBUG_SBA_RUN | ||
116 | #define DBG_RUN(x...) printk(x) | ||
117 | #else | ||
118 | #define DBG_RUN(x...) | ||
119 | #endif | ||
120 | |||
121 | #ifdef DEBUG_SBA_RUN_SG | ||
122 | #define DBG_RUN_SG(x...) printk(x) | ||
123 | #else | ||
124 | #define DBG_RUN_SG(x...) | ||
125 | #endif | ||
126 | |||
127 | |||
128 | #ifdef DEBUG_SBA_RESOURCE | ||
129 | #define DBG_RES(x...) printk(x) | ||
130 | #else | ||
131 | #define DBG_RES(x...) | ||
132 | #endif | ||
133 | |||
134 | #ifdef DEBUG_BYPASS | ||
135 | #define DBG_BYPASS(x...) printk(x) | ||
136 | #else | ||
137 | #define DBG_BYPASS(x...) | ||
138 | #endif | ||
139 | |||
140 | #ifdef ASSERT_PDIR_SANITY | ||
141 | #define ASSERT(expr) \ | ||
142 | if(!(expr)) { \ | ||
143 | printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \ | ||
144 | panic(#expr); \ | ||
145 | } | ||
146 | #else | ||
147 | #define ASSERT(expr) | ||
148 | #endif | ||
149 | |||
150 | /* | ||
151 | ** The number of pdir entries to "free" before issuing | ||
152 | ** a read to PCOM register to flush out PCOM writes. | ||
153 | ** Interacts with allocation granularity (ie 4 or 8 entries | ||
154 | ** allocated and free'd/purged at a time might make this | ||
155 | ** less interesting). | ||
156 | */ | ||
157 | #define DELAYED_RESOURCE_CNT 64 | ||
158 | |||
159 | #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP) | ||
160 | #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP) | ||
161 | #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP) | ||
162 | #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP) | ||
163 | |||
164 | #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ | ||
165 | |||
166 | #define IOC_FUNC_ID 0x000 | ||
167 | #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */ | ||
168 | #define IOC_IBASE 0x300 /* IO TLB */ | ||
169 | #define IOC_IMASK 0x308 | ||
170 | #define IOC_PCOM 0x310 | ||
171 | #define IOC_TCNFG 0x318 | ||
172 | #define IOC_PDIR_BASE 0x320 | ||
173 | |||
174 | #define IOC_ROPE0_CFG 0x500 | ||
175 | #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */ | ||
176 | |||
177 | |||
178 | /* AGP GART driver looks for this */ | ||
179 | #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL | ||
180 | |||
181 | /* | ||
182 | ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register) | ||
183 | ** | ||
184 | ** Some IOCs (sx1000) can run at the above pages sizes, but are | ||
185 | ** really only supported using the IOC at a 4k page size. | ||
186 | ** | ||
187 | ** iovp_size could only be greater than PAGE_SIZE if we are | ||
188 | ** confident the drivers really only touch the next physical | ||
189 | ** page iff that driver instance owns it. | ||
190 | */ | ||
191 | static unsigned long iovp_size; | ||
192 | static unsigned long iovp_shift; | ||
193 | static unsigned long iovp_mask; | ||
194 | |||
195 | struct ioc { | ||
196 | void __iomem *ioc_hpa; /* I/O MMU base address */ | ||
197 | char *res_map; /* resource map, bit == pdir entry */ | ||
198 | u64 *pdir_base; /* physical base address */ | ||
199 | unsigned long ibase; /* pdir IOV Space base */ | ||
200 | unsigned long imask; /* pdir IOV Space mask */ | ||
201 | |||
202 | unsigned long *res_hint; /* next avail IOVP - circular search */ | ||
203 | unsigned long dma_mask; | ||
204 | spinlock_t res_lock; /* protects the resource bitmap, but must be held when */ | ||
205 | /* clearing pdir to prevent races with allocations. */ | ||
206 | unsigned int res_bitshift; /* from the RIGHT! */ | ||
207 | unsigned int res_size; /* size of resource map in bytes */ | ||
208 | #ifdef CONFIG_NUMA | ||
209 | unsigned int node; /* node where this IOC lives */ | ||
210 | #endif | ||
211 | #if DELAYED_RESOURCE_CNT > 0 | ||
212 | spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */ | ||
213 | /* than res_lock for bigger systems. */ | ||
214 | int saved_cnt; | ||
215 | struct sba_dma_pair { | ||
216 | dma_addr_t iova; | ||
217 | size_t size; | ||
218 | } saved[DELAYED_RESOURCE_CNT]; | ||
219 | #endif | ||
220 | |||
221 | #ifdef PDIR_SEARCH_TIMING | ||
222 | #define SBA_SEARCH_SAMPLE 0x100 | ||
223 | unsigned long avg_search[SBA_SEARCH_SAMPLE]; | ||
224 | unsigned long avg_idx; /* current index into avg_search */ | ||
225 | #endif | ||
226 | |||
227 | /* Stuff we don't need in performance path */ | ||
228 | struct ioc *next; /* list of IOC's in system */ | ||
229 | acpi_handle handle; /* for multiple IOC's */ | ||
230 | const char *name; | ||
231 | unsigned int func_id; | ||
232 | unsigned int rev; /* HW revision of chip */ | ||
233 | u32 iov_size; | ||
234 | unsigned int pdir_size; /* in bytes, determined by IOV Space size */ | ||
235 | struct pci_dev *sac_only_dev; | ||
236 | }; | ||
237 | |||
238 | static struct ioc *ioc_list; | ||
239 | static int reserve_sba_gart = 1; | ||
240 | |||
241 | static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t); | ||
242 | static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t); | ||
243 | |||
244 | #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) | ||
245 | |||
246 | #ifdef FULL_VALID_PDIR | ||
247 | static u64 prefetch_spill_page; | ||
248 | #endif | ||
249 | |||
250 | #ifdef CONFIG_PCI | ||
251 | # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \ | ||
252 | ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) | ||
253 | #else | ||
254 | # define GET_IOC(dev) NULL | ||
255 | #endif | ||
256 | |||
257 | /* | ||
258 | ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up | ||
259 | ** (or rather not merge) DMA's into managable chunks. | ||
260 | ** On parisc, this is more of the software/tuning constraint | ||
261 | ** rather than the HW. I/O MMU allocation alogorithms can be | ||
262 | ** faster with smaller size is (to some degree). | ||
263 | */ | ||
264 | #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size) | ||
265 | |||
266 | #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) | ||
267 | |||
268 | /************************************ | ||
269 | ** SBA register read and write support | ||
270 | ** | ||
271 | ** BE WARNED: register writes are posted. | ||
272 | ** (ie follow writes which must reach HW with a read) | ||
273 | ** | ||
274 | */ | ||
275 | #define READ_REG(addr) __raw_readq(addr) | ||
276 | #define WRITE_REG(val, addr) __raw_writeq(val, addr) | ||
277 | |||
278 | #ifdef DEBUG_SBA_INIT | ||
279 | |||
280 | /** | ||
281 | * sba_dump_tlb - debugging only - print IOMMU operating parameters | ||
282 | * @hpa: base address of the IOMMU | ||
283 | * | ||
284 | * Print the size/location of the IO MMU PDIR. | ||
285 | */ | ||
286 | static void | ||
287 | sba_dump_tlb(char *hpa) | ||
288 | { | ||
289 | DBG_INIT("IO TLB at 0x%p\n", (void *)hpa); | ||
290 | DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE)); | ||
291 | DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK)); | ||
292 | DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG)); | ||
293 | DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE)); | ||
294 | DBG_INIT("\n"); | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | |||
299 | #ifdef ASSERT_PDIR_SANITY | ||
300 | |||
301 | /** | ||
302 | * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry | ||
303 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
304 | * @msg: text to print ont the output line. | ||
305 | * @pide: pdir index. | ||
306 | * | ||
307 | * Print one entry of the IO MMU PDIR in human readable form. | ||
308 | */ | ||
309 | static void | ||
310 | sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) | ||
311 | { | ||
312 | /* start printing from lowest pde in rval */ | ||
313 | u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]; | ||
314 | unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)]; | ||
315 | uint rcnt; | ||
316 | |||
317 | printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", | ||
318 | msg, rptr, pide & (BITS_PER_LONG - 1), *rptr); | ||
319 | |||
320 | rcnt = 0; | ||
321 | while (rcnt < BITS_PER_LONG) { | ||
322 | printk(KERN_DEBUG "%s %2d %p %016Lx\n", | ||
323 | (rcnt == (pide & (BITS_PER_LONG - 1))) | ||
324 | ? " -->" : " ", | ||
325 | rcnt, ptr, (unsigned long long) *ptr ); | ||
326 | rcnt++; | ||
327 | ptr++; | ||
328 | } | ||
329 | printk(KERN_DEBUG "%s", msg); | ||
330 | } | ||
331 | |||
332 | |||
333 | /** | ||
334 | * sba_check_pdir - debugging only - consistency checker | ||
335 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
336 | * @msg: text to print ont the output line. | ||
337 | * | ||
338 | * Verify the resource map and pdir state is consistent | ||
339 | */ | ||
340 | static int | ||
341 | sba_check_pdir(struct ioc *ioc, char *msg) | ||
342 | { | ||
343 | u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]); | ||
344 | u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */ | ||
345 | u64 *pptr = ioc->pdir_base; /* pdir ptr */ | ||
346 | uint pide = 0; | ||
347 | |||
348 | while (rptr < rptr_end) { | ||
349 | u64 rval; | ||
350 | int rcnt; /* number of bits we might check */ | ||
351 | |||
352 | rval = *rptr; | ||
353 | rcnt = 64; | ||
354 | |||
355 | while (rcnt) { | ||
356 | /* Get last byte and highest bit from that */ | ||
357 | u32 pde = ((u32)((*pptr >> (63)) & 0x1)); | ||
358 | if ((rval & 0x1) ^ pde) | ||
359 | { | ||
360 | /* | ||
361 | ** BUMMER! -- res_map != pdir -- | ||
362 | ** Dump rval and matching pdir entries | ||
363 | */ | ||
364 | sba_dump_pdir_entry(ioc, msg, pide); | ||
365 | return(1); | ||
366 | } | ||
367 | rcnt--; | ||
368 | rval >>= 1; /* try the next bit */ | ||
369 | pptr++; | ||
370 | pide++; | ||
371 | } | ||
372 | rptr++; /* look at next word of res_map */ | ||
373 | } | ||
374 | /* It'd be nice if we always got here :^) */ | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | |||
379 | /** | ||
380 | * sba_dump_sg - debugging only - print Scatter-Gather list | ||
381 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
382 | * @startsg: head of the SG list | ||
383 | * @nents: number of entries in SG list | ||
384 | * | ||
385 | * print the SG list so we can verify it's correct by hand. | ||
386 | */ | ||
387 | static void | ||
388 | sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | ||
389 | { | ||
390 | while (nents-- > 0) { | ||
391 | printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, | ||
392 | startsg->dma_address, startsg->dma_length, | ||
393 | sba_sg_address(startsg)); | ||
394 | startsg++; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static void | ||
399 | sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | ||
400 | { | ||
401 | struct scatterlist *the_sg = startsg; | ||
402 | int the_nents = nents; | ||
403 | |||
404 | while (the_nents-- > 0) { | ||
405 | if (sba_sg_address(the_sg) == 0x0UL) | ||
406 | sba_dump_sg(NULL, startsg, nents); | ||
407 | the_sg++; | ||
408 | } | ||
409 | } | ||
410 | |||
411 | #endif /* ASSERT_PDIR_SANITY */ | ||
412 | |||
413 | |||
414 | |||
415 | |||
416 | /************************************************************** | ||
417 | * | ||
418 | * I/O Pdir Resource Management | ||
419 | * | ||
420 | * Bits set in the resource map are in use. | ||
421 | * Each bit can represent a number of pages. | ||
422 | * LSbs represent lower addresses (IOVA's). | ||
423 | * | ||
424 | ***************************************************************/ | ||
425 | #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ | ||
426 | |||
427 | /* Convert from IOVP to IOVA and vice versa. */ | ||
428 | #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset)) | ||
429 | #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase)) | ||
430 | |||
431 | #define PDIR_ENTRY_SIZE sizeof(u64) | ||
432 | |||
433 | #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift) | ||
434 | |||
435 | #define RESMAP_MASK(n) ~(~0UL << (n)) | ||
436 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) | ||
437 | |||
438 | |||
439 | /** | ||
440 | * For most cases the normal get_order is sufficient, however it limits us | ||
441 | * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity. | ||
442 | * It only incurs about 1 clock cycle to use this one with the static variable | ||
443 | * and makes the code more intuitive. | ||
444 | */ | ||
445 | static SBA_INLINE int | ||
446 | get_iovp_order (unsigned long size) | ||
447 | { | ||
448 | long double d = size - 1; | ||
449 | long order; | ||
450 | |||
451 | order = ia64_getf_exp(d); | ||
452 | order = order - iovp_shift - 0xffff + 1; | ||
453 | if (order < 0) | ||
454 | order = 0; | ||
455 | return order; | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | ||
460 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
461 | * @bits_wanted: number of entries we need. | ||
462 | * | ||
463 | * Find consecutive free bits in resource bitmap. | ||
464 | * Each bit represents one entry in the IO Pdir. | ||
465 | * Cool perf optimization: search for log2(size) bits at a time. | ||
466 | */ | ||
467 | static SBA_INLINE unsigned long | ||
468 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | ||
469 | { | ||
470 | unsigned long *res_ptr = ioc->res_hint; | ||
471 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); | ||
472 | unsigned long pide = ~0UL; | ||
473 | |||
474 | ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); | ||
475 | ASSERT(res_ptr < res_end); | ||
476 | |||
477 | /* | ||
478 | * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts | ||
479 | * if a TLB entry is purged while in use. sba_mark_invalid() | ||
480 | * purges IOTLB entries in power-of-two sizes, so we also | ||
481 | * allocate IOVA space in power-of-two sizes. | ||
482 | */ | ||
483 | bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift); | ||
484 | |||
485 | if (likely(bits_wanted == 1)) { | ||
486 | unsigned int bitshiftcnt; | ||
487 | for(; res_ptr < res_end ; res_ptr++) { | ||
488 | if (likely(*res_ptr != ~0UL)) { | ||
489 | bitshiftcnt = ffz(*res_ptr); | ||
490 | *res_ptr |= (1UL << bitshiftcnt); | ||
491 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | ||
492 | pide <<= 3; /* convert to bit address */ | ||
493 | pide += bitshiftcnt; | ||
494 | ioc->res_bitshift = bitshiftcnt + bits_wanted; | ||
495 | goto found_it; | ||
496 | } | ||
497 | } | ||
498 | goto not_found; | ||
499 | |||
500 | } | ||
501 | |||
502 | if (likely(bits_wanted <= BITS_PER_LONG/2)) { | ||
503 | /* | ||
504 | ** Search the resource bit map on well-aligned values. | ||
505 | ** "o" is the alignment. | ||
506 | ** We need the alignment to invalidate I/O TLB using | ||
507 | ** SBA HW features in the unmap path. | ||
508 | */ | ||
509 | unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift); | ||
510 | uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); | ||
511 | unsigned long mask, base_mask; | ||
512 | |||
513 | base_mask = RESMAP_MASK(bits_wanted); | ||
514 | mask = base_mask << bitshiftcnt; | ||
515 | |||
516 | DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); | ||
517 | for(; res_ptr < res_end ; res_ptr++) | ||
518 | { | ||
519 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | ||
520 | ASSERT(0 != mask); | ||
521 | for (; mask ; mask <<= o, bitshiftcnt += o) { | ||
522 | if(0 == ((*res_ptr) & mask)) { | ||
523 | *res_ptr |= mask; /* mark resources busy! */ | ||
524 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | ||
525 | pide <<= 3; /* convert to bit address */ | ||
526 | pide += bitshiftcnt; | ||
527 | ioc->res_bitshift = bitshiftcnt + bits_wanted; | ||
528 | goto found_it; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | bitshiftcnt = 0; | ||
533 | mask = base_mask; | ||
534 | |||
535 | } | ||
536 | |||
537 | } else { | ||
538 | int qwords, bits, i; | ||
539 | unsigned long *end; | ||
540 | |||
541 | qwords = bits_wanted >> 6; /* /64 */ | ||
542 | bits = bits_wanted - (qwords * BITS_PER_LONG); | ||
543 | |||
544 | end = res_end - qwords; | ||
545 | |||
546 | for (; res_ptr < end; res_ptr++) { | ||
547 | for (i = 0 ; i < qwords ; i++) { | ||
548 | if (res_ptr[i] != 0) | ||
549 | goto next_ptr; | ||
550 | } | ||
551 | if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits)) | ||
552 | continue; | ||
553 | |||
554 | /* Found it, mark it */ | ||
555 | for (i = 0 ; i < qwords ; i++) | ||
556 | res_ptr[i] = ~0UL; | ||
557 | res_ptr[i] |= RESMAP_MASK(bits); | ||
558 | |||
559 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | ||
560 | pide <<= 3; /* convert to bit address */ | ||
561 | res_ptr += qwords; | ||
562 | ioc->res_bitshift = bits; | ||
563 | goto found_it; | ||
564 | next_ptr: | ||
565 | ; | ||
566 | } | ||
567 | } | ||
568 | |||
569 | not_found: | ||
570 | prefetch(ioc->res_map); | ||
571 | ioc->res_hint = (unsigned long *) ioc->res_map; | ||
572 | ioc->res_bitshift = 0; | ||
573 | return (pide); | ||
574 | |||
575 | found_it: | ||
576 | ioc->res_hint = res_ptr; | ||
577 | return (pide); | ||
578 | } | ||
579 | |||
580 | |||
581 | /** | ||
582 | * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap | ||
583 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
584 | * @size: number of bytes to create a mapping for | ||
585 | * | ||
586 | * Given a size, find consecutive unmarked and then mark those bits in the | ||
587 | * resource bit map. | ||
588 | */ | ||
589 | static int | ||
590 | sba_alloc_range(struct ioc *ioc, size_t size) | ||
591 | { | ||
592 | unsigned int pages_needed = size >> iovp_shift; | ||
593 | #ifdef PDIR_SEARCH_TIMING | ||
594 | unsigned long itc_start; | ||
595 | #endif | ||
596 | unsigned long pide; | ||
597 | unsigned long flags; | ||
598 | |||
599 | ASSERT(pages_needed); | ||
600 | ASSERT(0 == (size & ~iovp_mask)); | ||
601 | |||
602 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
603 | |||
604 | #ifdef PDIR_SEARCH_TIMING | ||
605 | itc_start = ia64_get_itc(); | ||
606 | #endif | ||
607 | /* | ||
608 | ** "seek and ye shall find"...praying never hurts either... | ||
609 | */ | ||
610 | pide = sba_search_bitmap(ioc, pages_needed); | ||
611 | if (unlikely(pide >= (ioc->res_size << 3))) { | ||
612 | pide = sba_search_bitmap(ioc, pages_needed); | ||
613 | if (unlikely(pide >= (ioc->res_size << 3))) { | ||
614 | #if DELAYED_RESOURCE_CNT > 0 | ||
615 | /* | ||
616 | ** With delayed resource freeing, we can give this one more shot. We're | ||
617 | ** getting close to being in trouble here, so do what we can to make this | ||
618 | ** one count. | ||
619 | */ | ||
620 | spin_lock(&ioc->saved_lock); | ||
621 | if (ioc->saved_cnt > 0) { | ||
622 | struct sba_dma_pair *d; | ||
623 | int cnt = ioc->saved_cnt; | ||
624 | |||
625 | d = &(ioc->saved[ioc->saved_cnt]); | ||
626 | |||
627 | while (cnt--) { | ||
628 | sba_mark_invalid(ioc, d->iova, d->size); | ||
629 | sba_free_range(ioc, d->iova, d->size); | ||
630 | d--; | ||
631 | } | ||
632 | ioc->saved_cnt = 0; | ||
633 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | ||
634 | } | ||
635 | spin_unlock(&ioc->saved_lock); | ||
636 | |||
637 | pide = sba_search_bitmap(ioc, pages_needed); | ||
638 | if (unlikely(pide >= (ioc->res_size << 3))) | ||
639 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | ||
640 | ioc->ioc_hpa); | ||
641 | #else | ||
642 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | ||
643 | ioc->ioc_hpa); | ||
644 | #endif | ||
645 | } | ||
646 | } | ||
647 | |||
648 | #ifdef PDIR_SEARCH_TIMING | ||
649 | ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed; | ||
650 | ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; | ||
651 | #endif | ||
652 | |||
653 | prefetchw(&(ioc->pdir_base[pide])); | ||
654 | |||
655 | #ifdef ASSERT_PDIR_SANITY | ||
656 | /* verify the first enable bit is clear */ | ||
657 | if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) { | ||
658 | sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); | ||
659 | } | ||
660 | #endif | ||
661 | |||
662 | DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", | ||
663 | __FUNCTION__, size, pages_needed, pide, | ||
664 | (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), | ||
665 | ioc->res_bitshift ); | ||
666 | |||
667 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
668 | |||
669 | return (pide); | ||
670 | } | ||
671 | |||
672 | |||
673 | /** | ||
674 | * sba_free_range - unmark bits in IO PDIR resource bitmap | ||
675 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
676 | * @iova: IO virtual address which was previously allocated. | ||
677 | * @size: number of bytes to create a mapping for | ||
678 | * | ||
679 | * clear bits in the ioc's resource map | ||
680 | */ | ||
681 | static SBA_INLINE void | ||
682 | sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) | ||
683 | { | ||
684 | unsigned long iovp = SBA_IOVP(ioc, iova); | ||
685 | unsigned int pide = PDIR_INDEX(iovp); | ||
686 | unsigned int ridx = pide >> 3; /* convert bit to byte address */ | ||
687 | unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); | ||
688 | int bits_not_wanted = size >> iovp_shift; | ||
689 | unsigned long m; | ||
690 | |||
691 | /* Round up to power-of-two size: see AR2305 note above */ | ||
692 | bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift); | ||
693 | for (; bits_not_wanted > 0 ; res_ptr++) { | ||
694 | |||
695 | if (unlikely(bits_not_wanted > BITS_PER_LONG)) { | ||
696 | |||
697 | /* these mappings start 64bit aligned */ | ||
698 | *res_ptr = 0UL; | ||
699 | bits_not_wanted -= BITS_PER_LONG; | ||
700 | pide += BITS_PER_LONG; | ||
701 | |||
702 | } else { | ||
703 | |||
704 | /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ | ||
705 | m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); | ||
706 | bits_not_wanted = 0; | ||
707 | |||
708 | DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size, | ||
709 | bits_not_wanted, m, pide, res_ptr, *res_ptr); | ||
710 | |||
711 | ASSERT(m != 0); | ||
712 | ASSERT(bits_not_wanted); | ||
713 | ASSERT((*res_ptr & m) == m); /* verify same bits are set */ | ||
714 | *res_ptr &= ~m; | ||
715 | } | ||
716 | } | ||
717 | } | ||
718 | |||
719 | |||
720 | /************************************************************** | ||
721 | * | ||
722 | * "Dynamic DMA Mapping" support (aka "Coherent I/O") | ||
723 | * | ||
724 | ***************************************************************/ | ||
725 | |||
726 | /** | ||
727 | * sba_io_pdir_entry - fill in one IO PDIR entry | ||
728 | * @pdir_ptr: pointer to IO PDIR entry | ||
729 | * @vba: Virtual CPU address of buffer to map | ||
730 | * | ||
731 | * SBA Mapping Routine | ||
732 | * | ||
733 | * Given a virtual address (vba, arg1) sba_io_pdir_entry() | ||
734 | * loads the I/O PDIR entry pointed to by pdir_ptr (arg0). | ||
735 | * Each IO Pdir entry consists of 8 bytes as shown below | ||
736 | * (LSB == bit 0): | ||
737 | * | ||
738 | * 63 40 11 7 0 | ||
739 | * +-+---------------------+----------------------------------+----+--------+ | ||
740 | * |V| U | PPN[39:12] | U | FF | | ||
741 | * +-+---------------------+----------------------------------+----+--------+ | ||
742 | * | ||
743 | * V == Valid Bit | ||
744 | * U == Unused | ||
745 | * PPN == Physical Page Number | ||
746 | * | ||
747 | * The physical address fields are filled with the results of virt_to_phys() | ||
748 | * on the vba. | ||
749 | */ | ||
750 | |||
751 | #if 1 | ||
752 | #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \ | ||
753 | | 0x8000000000000000ULL) | ||
754 | #else | ||
755 | void SBA_INLINE | ||
756 | sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba) | ||
757 | { | ||
758 | *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL); | ||
759 | } | ||
760 | #endif | ||
761 | |||
762 | #ifdef ENABLE_MARK_CLEAN | ||
763 | /** | ||
764 | * Since DMA is i-cache coherent, any (complete) pages that were written via | ||
765 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | ||
766 | * flush them when they get mapped into an executable vm-area. | ||
767 | */ | ||
768 | static void | ||
769 | mark_clean (void *addr, size_t size) | ||
770 | { | ||
771 | unsigned long pg_addr, end; | ||
772 | |||
773 | pg_addr = PAGE_ALIGN((unsigned long) addr); | ||
774 | end = (unsigned long) addr + size; | ||
775 | while (pg_addr + PAGE_SIZE <= end) { | ||
776 | struct page *page = virt_to_page((void *)pg_addr); | ||
777 | set_bit(PG_arch_1, &page->flags); | ||
778 | pg_addr += PAGE_SIZE; | ||
779 | } | ||
780 | } | ||
781 | #endif | ||
782 | |||
783 | /** | ||
784 | * sba_mark_invalid - invalidate one or more IO PDIR entries | ||
785 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
786 | * @iova: IO Virtual Address mapped earlier | ||
787 | * @byte_cnt: number of bytes this mapping covers. | ||
788 | * | ||
789 | * Marking the IO PDIR entry(ies) as Invalid and invalidate | ||
790 | * corresponding IO TLB entry. The PCOM (Purge Command Register) | ||
791 | * is to purge stale entries in the IO TLB when unmapping entries. | ||
792 | * | ||
793 | * The PCOM register supports purging of multiple pages, with a minium | ||
794 | * of 1 page and a maximum of 2GB. Hardware requires the address be | ||
795 | * aligned to the size of the range being purged. The size of the range | ||
796 | * must be a power of 2. The "Cool perf optimization" in the | ||
797 | * allocation routine helps keep that true. | ||
798 | */ | ||
799 | static SBA_INLINE void | ||
800 | sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | ||
801 | { | ||
802 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | ||
803 | |||
804 | int off = PDIR_INDEX(iovp); | ||
805 | |||
806 | /* Must be non-zero and rounded up */ | ||
807 | ASSERT(byte_cnt > 0); | ||
808 | ASSERT(0 == (byte_cnt & ~iovp_mask)); | ||
809 | |||
810 | #ifdef ASSERT_PDIR_SANITY | ||
811 | /* Assert first pdir entry is set */ | ||
812 | if (!(ioc->pdir_base[off] >> 60)) { | ||
813 | sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); | ||
814 | } | ||
815 | #endif | ||
816 | |||
817 | if (byte_cnt <= iovp_size) | ||
818 | { | ||
819 | ASSERT(off < ioc->pdir_size); | ||
820 | |||
821 | iovp |= iovp_shift; /* set "size" field for PCOM */ | ||
822 | |||
823 | #ifndef FULL_VALID_PDIR | ||
824 | /* | ||
825 | ** clear I/O PDIR entry "valid" bit | ||
826 | ** Do NOT clear the rest - save it for debugging. | ||
827 | ** We should only clear bits that have previously | ||
828 | ** been enabled. | ||
829 | */ | ||
830 | ioc->pdir_base[off] &= ~(0x80000000000000FFULL); | ||
831 | #else | ||
832 | /* | ||
833 | ** If we want to maintain the PDIR as valid, put in | ||
834 | ** the spill page so devices prefetching won't | ||
835 | ** cause a hard fail. | ||
836 | */ | ||
837 | ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); | ||
838 | #endif | ||
839 | } else { | ||
840 | u32 t = get_iovp_order(byte_cnt) + iovp_shift; | ||
841 | |||
842 | iovp |= t; | ||
843 | ASSERT(t <= 31); /* 2GB! Max value of "size" field */ | ||
844 | |||
845 | do { | ||
846 | /* verify this pdir entry is enabled */ | ||
847 | ASSERT(ioc->pdir_base[off] >> 63); | ||
848 | #ifndef FULL_VALID_PDIR | ||
849 | /* clear I/O Pdir entry "valid" bit first */ | ||
850 | ioc->pdir_base[off] &= ~(0x80000000000000FFULL); | ||
851 | #else | ||
852 | ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); | ||
853 | #endif | ||
854 | off++; | ||
855 | byte_cnt -= iovp_size; | ||
856 | } while (byte_cnt > 0); | ||
857 | } | ||
858 | |||
859 | WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM); | ||
860 | } | ||
861 | |||
862 | /** | ||
863 | * sba_map_single - map one buffer and return IOVA for DMA | ||
864 | * @dev: instance of PCI owned by the driver that's asking. | ||
865 | * @addr: driver buffer to map. | ||
866 | * @size: number of bytes to map in driver buffer. | ||
867 | * @dir: R/W or both. | ||
868 | * | ||
869 | * See Documentation/DMA-mapping.txt | ||
870 | */ | ||
871 | dma_addr_t | ||
872 | sba_map_single(struct device *dev, void *addr, size_t size, int dir) | ||
873 | { | ||
874 | struct ioc *ioc; | ||
875 | dma_addr_t iovp; | ||
876 | dma_addr_t offset; | ||
877 | u64 *pdir_start; | ||
878 | int pide; | ||
879 | #ifdef ASSERT_PDIR_SANITY | ||
880 | unsigned long flags; | ||
881 | #endif | ||
882 | #ifdef ALLOW_IOV_BYPASS | ||
883 | unsigned long pci_addr = virt_to_phys(addr); | ||
884 | #endif | ||
885 | |||
886 | #ifdef ALLOW_IOV_BYPASS | ||
887 | ASSERT(to_pci_dev(dev)->dma_mask); | ||
888 | /* | ||
889 | ** Check if the PCI device can DMA to ptr... if so, just return ptr | ||
890 | */ | ||
891 | if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) { | ||
892 | /* | ||
893 | ** Device is bit capable of DMA'ing to the buffer... | ||
894 | ** just return the PCI address of ptr | ||
895 | */ | ||
896 | DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", | ||
897 | to_pci_dev(dev)->dma_mask, pci_addr); | ||
898 | return pci_addr; | ||
899 | } | ||
900 | #endif | ||
901 | ioc = GET_IOC(dev); | ||
902 | ASSERT(ioc); | ||
903 | |||
904 | prefetch(ioc->res_hint); | ||
905 | |||
906 | ASSERT(size > 0); | ||
907 | ASSERT(size <= DMA_CHUNK_SIZE); | ||
908 | |||
909 | /* save offset bits */ | ||
910 | offset = ((dma_addr_t) (long) addr) & ~iovp_mask; | ||
911 | |||
912 | /* round up to nearest iovp_size */ | ||
913 | size = (size + offset + ~iovp_mask) & iovp_mask; | ||
914 | |||
915 | #ifdef ASSERT_PDIR_SANITY | ||
916 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
917 | if (sba_check_pdir(ioc,"Check before sba_map_single()")) | ||
918 | panic("Sanity check failed"); | ||
919 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
920 | #endif | ||
921 | |||
922 | pide = sba_alloc_range(ioc, size); | ||
923 | |||
924 | iovp = (dma_addr_t) pide << iovp_shift; | ||
925 | |||
926 | DBG_RUN("%s() 0x%p -> 0x%lx\n", | ||
927 | __FUNCTION__, addr, (long) iovp | offset); | ||
928 | |||
929 | pdir_start = &(ioc->pdir_base[pide]); | ||
930 | |||
931 | while (size > 0) { | ||
932 | ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ | ||
933 | sba_io_pdir_entry(pdir_start, (unsigned long) addr); | ||
934 | |||
935 | DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start); | ||
936 | |||
937 | addr += iovp_size; | ||
938 | size -= iovp_size; | ||
939 | pdir_start++; | ||
940 | } | ||
941 | /* force pdir update */ | ||
942 | wmb(); | ||
943 | |||
944 | /* form complete address */ | ||
945 | #ifdef ASSERT_PDIR_SANITY | ||
946 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
947 | sba_check_pdir(ioc,"Check after sba_map_single()"); | ||
948 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
949 | #endif | ||
950 | return SBA_IOVA(ioc, iovp, offset); | ||
951 | } | ||
952 | |||
953 | /** | ||
954 | * sba_unmap_single - unmap one IOVA and free resources | ||
955 | * @dev: instance of PCI owned by the driver that's asking. | ||
956 | * @iova: IOVA of driver buffer previously mapped. | ||
957 | * @size: number of bytes mapped in driver buffer. | ||
958 | * @dir: R/W or both. | ||
959 | * | ||
960 | * See Documentation/DMA-mapping.txt | ||
961 | */ | ||
962 | void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | ||
963 | { | ||
964 | struct ioc *ioc; | ||
965 | #if DELAYED_RESOURCE_CNT > 0 | ||
966 | struct sba_dma_pair *d; | ||
967 | #endif | ||
968 | unsigned long flags; | ||
969 | dma_addr_t offset; | ||
970 | |||
971 | ioc = GET_IOC(dev); | ||
972 | ASSERT(ioc); | ||
973 | |||
974 | #ifdef ALLOW_IOV_BYPASS | ||
975 | if (likely((iova & ioc->imask) != ioc->ibase)) { | ||
976 | /* | ||
977 | ** Address does not fall w/in IOVA, must be bypassing | ||
978 | */ | ||
979 | DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); | ||
980 | |||
981 | #ifdef ENABLE_MARK_CLEAN | ||
982 | if (dir == DMA_FROM_DEVICE) { | ||
983 | mark_clean(phys_to_virt(iova), size); | ||
984 | } | ||
985 | #endif | ||
986 | return; | ||
987 | } | ||
988 | #endif | ||
989 | offset = iova & ~iovp_mask; | ||
990 | |||
991 | DBG_RUN("%s() iovp 0x%lx/%x\n", | ||
992 | __FUNCTION__, (long) iova, size); | ||
993 | |||
994 | iova ^= offset; /* clear offset bits */ | ||
995 | size += offset; | ||
996 | size = ROUNDUP(size, iovp_size); | ||
997 | |||
998 | |||
999 | #if DELAYED_RESOURCE_CNT > 0 | ||
1000 | spin_lock_irqsave(&ioc->saved_lock, flags); | ||
1001 | d = &(ioc->saved[ioc->saved_cnt]); | ||
1002 | d->iova = iova; | ||
1003 | d->size = size; | ||
1004 | if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) { | ||
1005 | int cnt = ioc->saved_cnt; | ||
1006 | spin_lock(&ioc->res_lock); | ||
1007 | while (cnt--) { | ||
1008 | sba_mark_invalid(ioc, d->iova, d->size); | ||
1009 | sba_free_range(ioc, d->iova, d->size); | ||
1010 | d--; | ||
1011 | } | ||
1012 | ioc->saved_cnt = 0; | ||
1013 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | ||
1014 | spin_unlock(&ioc->res_lock); | ||
1015 | } | ||
1016 | spin_unlock_irqrestore(&ioc->saved_lock, flags); | ||
1017 | #else /* DELAYED_RESOURCE_CNT == 0 */ | ||
1018 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1019 | sba_mark_invalid(ioc, iova, size); | ||
1020 | sba_free_range(ioc, iova, size); | ||
1021 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | ||
1022 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1023 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | ||
1024 | #ifdef ENABLE_MARK_CLEAN | ||
1025 | if (dir == DMA_FROM_DEVICE) { | ||
1026 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | ||
1027 | int off = PDIR_INDEX(iovp); | ||
1028 | void *addr; | ||
1029 | |||
1030 | if (size <= iovp_size) { | ||
1031 | addr = phys_to_virt(ioc->pdir_base[off] & | ||
1032 | ~0xE000000000000FFFULL); | ||
1033 | mark_clean(addr, size); | ||
1034 | } else { | ||
1035 | size_t byte_cnt = size; | ||
1036 | |||
1037 | do { | ||
1038 | addr = phys_to_virt(ioc->pdir_base[off] & | ||
1039 | ~0xE000000000000FFFULL); | ||
1040 | mark_clean(addr, min(byte_cnt, iovp_size)); | ||
1041 | off++; | ||
1042 | byte_cnt -= iovp_size; | ||
1043 | |||
1044 | } while (byte_cnt > 0); | ||
1045 | } | ||
1046 | } | ||
1047 | #endif | ||
1048 | } | ||
1049 | |||
1050 | |||
1051 | /** | ||
1052 | * sba_alloc_coherent - allocate/map shared mem for DMA | ||
1053 | * @dev: instance of PCI owned by the driver that's asking. | ||
1054 | * @size: number of bytes mapped in driver buffer. | ||
1055 | * @dma_handle: IOVA of new buffer. | ||
1056 | * | ||
1057 | * See Documentation/DMA-mapping.txt | ||
1058 | */ | ||
1059 | void * | ||
1060 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags) | ||
1061 | { | ||
1062 | struct ioc *ioc; | ||
1063 | void *addr; | ||
1064 | |||
1065 | ioc = GET_IOC(dev); | ||
1066 | ASSERT(ioc); | ||
1067 | |||
1068 | #ifdef CONFIG_NUMA | ||
1069 | { | ||
1070 | struct page *page; | ||
1071 | page = alloc_pages_node(ioc->node == MAX_NUMNODES ? | ||
1072 | numa_node_id() : ioc->node, flags, | ||
1073 | get_order(size)); | ||
1074 | |||
1075 | if (unlikely(!page)) | ||
1076 | return NULL; | ||
1077 | |||
1078 | addr = page_address(page); | ||
1079 | } | ||
1080 | #else | ||
1081 | addr = (void *) __get_free_pages(flags, get_order(size)); | ||
1082 | #endif | ||
1083 | if (unlikely(!addr)) | ||
1084 | return NULL; | ||
1085 | |||
1086 | memset(addr, 0, size); | ||
1087 | *dma_handle = virt_to_phys(addr); | ||
1088 | |||
1089 | #ifdef ALLOW_IOV_BYPASS | ||
1090 | ASSERT(dev->coherent_dma_mask); | ||
1091 | /* | ||
1092 | ** Check if the PCI device can DMA to ptr... if so, just return ptr | ||
1093 | */ | ||
1094 | if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) { | ||
1095 | DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n", | ||
1096 | dev->coherent_dma_mask, *dma_handle); | ||
1097 | |||
1098 | return addr; | ||
1099 | } | ||
1100 | #endif | ||
1101 | |||
1102 | /* | ||
1103 | * If device can't bypass or bypass is disabled, pass the 32bit fake | ||
1104 | * device to map single to get an iova mapping. | ||
1105 | */ | ||
1106 | *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0); | ||
1107 | |||
1108 | return addr; | ||
1109 | } | ||
1110 | |||
1111 | |||
1112 | /** | ||
1113 | * sba_free_coherent - free/unmap shared mem for DMA | ||
1114 | * @dev: instance of PCI owned by the driver that's asking. | ||
1115 | * @size: number of bytes mapped in driver buffer. | ||
1116 | * @vaddr: virtual address IOVA of "consistent" buffer. | ||
1117 | * @dma_handler: IO virtual address of "consistent" buffer. | ||
1118 | * | ||
1119 | * See Documentation/DMA-mapping.txt | ||
1120 | */ | ||
1121 | void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
1122 | { | ||
1123 | sba_unmap_single(dev, dma_handle, size, 0); | ||
1124 | free_pages((unsigned long) vaddr, get_order(size)); | ||
1125 | } | ||
1126 | |||
1127 | |||
1128 | /* | ||
1129 | ** Since 0 is a valid pdir_base index value, can't use that | ||
1130 | ** to determine if a value is valid or not. Use a flag to indicate | ||
1131 | ** the SG list entry contains a valid pdir index. | ||
1132 | */ | ||
1133 | #define PIDE_FLAG 0x1UL | ||
1134 | |||
1135 | #ifdef DEBUG_LARGE_SG_ENTRIES | ||
1136 | int dump_run_sg = 0; | ||
1137 | #endif | ||
1138 | |||
1139 | |||
1140 | /** | ||
1141 | * sba_fill_pdir - write allocated SG entries into IO PDIR | ||
1142 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
1143 | * @startsg: list of IOVA/size pairs | ||
1144 | * @nents: number of entries in startsg list | ||
1145 | * | ||
1146 | * Take preprocessed SG list and write corresponding entries | ||
1147 | * in the IO PDIR. | ||
1148 | */ | ||
1149 | |||
1150 | static SBA_INLINE int | ||
1151 | sba_fill_pdir( | ||
1152 | struct ioc *ioc, | ||
1153 | struct scatterlist *startsg, | ||
1154 | int nents) | ||
1155 | { | ||
1156 | struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ | ||
1157 | int n_mappings = 0; | ||
1158 | u64 *pdirp = NULL; | ||
1159 | unsigned long dma_offset = 0; | ||
1160 | |||
1161 | dma_sg--; | ||
1162 | while (nents-- > 0) { | ||
1163 | int cnt = startsg->dma_length; | ||
1164 | startsg->dma_length = 0; | ||
1165 | |||
1166 | #ifdef DEBUG_LARGE_SG_ENTRIES | ||
1167 | if (dump_run_sg) | ||
1168 | printk(" %2d : %08lx/%05x %p\n", | ||
1169 | nents, startsg->dma_address, cnt, | ||
1170 | sba_sg_address(startsg)); | ||
1171 | #else | ||
1172 | DBG_RUN_SG(" %d : %08lx/%05x %p\n", | ||
1173 | nents, startsg->dma_address, cnt, | ||
1174 | sba_sg_address(startsg)); | ||
1175 | #endif | ||
1176 | /* | ||
1177 | ** Look for the start of a new DMA stream | ||
1178 | */ | ||
1179 | if (startsg->dma_address & PIDE_FLAG) { | ||
1180 | u32 pide = startsg->dma_address & ~PIDE_FLAG; | ||
1181 | dma_offset = (unsigned long) pide & ~iovp_mask; | ||
1182 | startsg->dma_address = 0; | ||
1183 | dma_sg++; | ||
1184 | dma_sg->dma_address = pide | ioc->ibase; | ||
1185 | pdirp = &(ioc->pdir_base[pide >> iovp_shift]); | ||
1186 | n_mappings++; | ||
1187 | } | ||
1188 | |||
1189 | /* | ||
1190 | ** Look for a VCONTIG chunk | ||
1191 | */ | ||
1192 | if (cnt) { | ||
1193 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | ||
1194 | ASSERT(pdirp); | ||
1195 | |||
1196 | /* Since multiple Vcontig blocks could make up | ||
1197 | ** one DMA stream, *add* cnt to dma_len. | ||
1198 | */ | ||
1199 | dma_sg->dma_length += cnt; | ||
1200 | cnt += dma_offset; | ||
1201 | dma_offset=0; /* only want offset on first chunk */ | ||
1202 | cnt = ROUNDUP(cnt, iovp_size); | ||
1203 | do { | ||
1204 | sba_io_pdir_entry(pdirp, vaddr); | ||
1205 | vaddr += iovp_size; | ||
1206 | cnt -= iovp_size; | ||
1207 | pdirp++; | ||
1208 | } while (cnt > 0); | ||
1209 | } | ||
1210 | startsg++; | ||
1211 | } | ||
1212 | /* force pdir update */ | ||
1213 | wmb(); | ||
1214 | |||
1215 | #ifdef DEBUG_LARGE_SG_ENTRIES | ||
1216 | dump_run_sg = 0; | ||
1217 | #endif | ||
1218 | return(n_mappings); | ||
1219 | } | ||
1220 | |||
1221 | |||
1222 | /* | ||
1223 | ** Two address ranges are DMA contiguous *iff* "end of prev" and | ||
1224 | ** "start of next" are both on an IOV page boundary. | ||
1225 | ** | ||
1226 | ** (shift left is a quick trick to mask off upper bits) | ||
1227 | */ | ||
1228 | #define DMA_CONTIG(__X, __Y) \ | ||
1229 | (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL) | ||
1230 | |||
1231 | |||
1232 | /** | ||
1233 | * sba_coalesce_chunks - preprocess the SG list | ||
1234 | * @ioc: IO MMU structure which owns the pdir we are interested in. | ||
1235 | * @startsg: list of IOVA/size pairs | ||
1236 | * @nents: number of entries in startsg list | ||
1237 | * | ||
1238 | * First pass is to walk the SG list and determine where the breaks are | ||
1239 | * in the DMA stream. Allocates PDIR entries but does not fill them. | ||
1240 | * Returns the number of DMA chunks. | ||
1241 | * | ||
1242 | * Doing the fill separate from the coalescing/allocation keeps the | ||
1243 | * code simpler. Future enhancement could make one pass through | ||
1244 | * the sglist do both. | ||
1245 | */ | ||
1246 | static SBA_INLINE int | ||
1247 | sba_coalesce_chunks( struct ioc *ioc, | ||
1248 | struct scatterlist *startsg, | ||
1249 | int nents) | ||
1250 | { | ||
1251 | struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ | ||
1252 | unsigned long vcontig_len; /* len of VCONTIG chunk */ | ||
1253 | unsigned long vcontig_end; | ||
1254 | struct scatterlist *dma_sg; /* next DMA stream head */ | ||
1255 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | ||
1256 | int n_mappings = 0; | ||
1257 | |||
1258 | while (nents > 0) { | ||
1259 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | ||
1260 | |||
1261 | /* | ||
1262 | ** Prepare for first/next DMA stream | ||
1263 | */ | ||
1264 | dma_sg = vcontig_sg = startsg; | ||
1265 | dma_len = vcontig_len = vcontig_end = startsg->length; | ||
1266 | vcontig_end += vaddr; | ||
1267 | dma_offset = vaddr & ~iovp_mask; | ||
1268 | |||
1269 | /* PARANOID: clear entries */ | ||
1270 | startsg->dma_address = startsg->dma_length = 0; | ||
1271 | |||
1272 | /* | ||
1273 | ** This loop terminates one iteration "early" since | ||
1274 | ** it's always looking one "ahead". | ||
1275 | */ | ||
1276 | while (--nents > 0) { | ||
1277 | unsigned long vaddr; /* tmp */ | ||
1278 | |||
1279 | startsg++; | ||
1280 | |||
1281 | /* PARANOID */ | ||
1282 | startsg->dma_address = startsg->dma_length = 0; | ||
1283 | |||
1284 | /* catch brokenness in SCSI layer */ | ||
1285 | ASSERT(startsg->length <= DMA_CHUNK_SIZE); | ||
1286 | |||
1287 | /* | ||
1288 | ** First make sure current dma stream won't | ||
1289 | ** exceed DMA_CHUNK_SIZE if we coalesce the | ||
1290 | ** next entry. | ||
1291 | */ | ||
1292 | if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask) | ||
1293 | > DMA_CHUNK_SIZE) | ||
1294 | break; | ||
1295 | |||
1296 | /* | ||
1297 | ** Then look for virtually contiguous blocks. | ||
1298 | ** | ||
1299 | ** append the next transaction? | ||
1300 | */ | ||
1301 | vaddr = (unsigned long) sba_sg_address(startsg); | ||
1302 | if (vcontig_end == vaddr) | ||
1303 | { | ||
1304 | vcontig_len += startsg->length; | ||
1305 | vcontig_end += startsg->length; | ||
1306 | dma_len += startsg->length; | ||
1307 | continue; | ||
1308 | } | ||
1309 | |||
1310 | #ifdef DEBUG_LARGE_SG_ENTRIES | ||
1311 | dump_run_sg = (vcontig_len > iovp_size); | ||
1312 | #endif | ||
1313 | |||
1314 | /* | ||
1315 | ** Not virtually contigous. | ||
1316 | ** Terminate prev chunk. | ||
1317 | ** Start a new chunk. | ||
1318 | ** | ||
1319 | ** Once we start a new VCONTIG chunk, dma_offset | ||
1320 | ** can't change. And we need the offset from the first | ||
1321 | ** chunk - not the last one. Ergo Successive chunks | ||
1322 | ** must start on page boundaries and dove tail | ||
1323 | ** with it's predecessor. | ||
1324 | */ | ||
1325 | vcontig_sg->dma_length = vcontig_len; | ||
1326 | |||
1327 | vcontig_sg = startsg; | ||
1328 | vcontig_len = startsg->length; | ||
1329 | |||
1330 | /* | ||
1331 | ** 3) do the entries end/start on page boundaries? | ||
1332 | ** Don't update vcontig_end until we've checked. | ||
1333 | */ | ||
1334 | if (DMA_CONTIG(vcontig_end, vaddr)) | ||
1335 | { | ||
1336 | vcontig_end = vcontig_len + vaddr; | ||
1337 | dma_len += vcontig_len; | ||
1338 | continue; | ||
1339 | } else { | ||
1340 | break; | ||
1341 | } | ||
1342 | } | ||
1343 | |||
1344 | /* | ||
1345 | ** End of DMA Stream | ||
1346 | ** Terminate last VCONTIG block. | ||
1347 | ** Allocate space for DMA stream. | ||
1348 | */ | ||
1349 | vcontig_sg->dma_length = vcontig_len; | ||
1350 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; | ||
1351 | ASSERT(dma_len <= DMA_CHUNK_SIZE); | ||
1352 | dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG | ||
1353 | | (sba_alloc_range(ioc, dma_len) << iovp_shift) | ||
1354 | | dma_offset); | ||
1355 | n_mappings++; | ||
1356 | } | ||
1357 | |||
1358 | return n_mappings; | ||
1359 | } | ||
1360 | |||
1361 | |||
1362 | /** | ||
1363 | * sba_map_sg - map Scatter/Gather list | ||
1364 | * @dev: instance of PCI owned by the driver that's asking. | ||
1365 | * @sglist: array of buffer/length pairs | ||
1366 | * @nents: number of entries in list | ||
1367 | * @dir: R/W or both. | ||
1368 | * | ||
1369 | * See Documentation/DMA-mapping.txt | ||
1370 | */ | ||
1371 | int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir) | ||
1372 | { | ||
1373 | struct ioc *ioc; | ||
1374 | int coalesced, filled = 0; | ||
1375 | #ifdef ASSERT_PDIR_SANITY | ||
1376 | unsigned long flags; | ||
1377 | #endif | ||
1378 | #ifdef ALLOW_IOV_BYPASS_SG | ||
1379 | struct scatterlist *sg; | ||
1380 | #endif | ||
1381 | |||
1382 | DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); | ||
1383 | ioc = GET_IOC(dev); | ||
1384 | ASSERT(ioc); | ||
1385 | |||
1386 | #ifdef ALLOW_IOV_BYPASS_SG | ||
1387 | ASSERT(to_pci_dev(dev)->dma_mask); | ||
1388 | if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { | ||
1389 | for (sg = sglist ; filled < nents ; filled++, sg++){ | ||
1390 | sg->dma_length = sg->length; | ||
1391 | sg->dma_address = virt_to_phys(sba_sg_address(sg)); | ||
1392 | } | ||
1393 | return filled; | ||
1394 | } | ||
1395 | #endif | ||
1396 | /* Fast path single entry scatterlists. */ | ||
1397 | if (nents == 1) { | ||
1398 | sglist->dma_length = sglist->length; | ||
1399 | sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir); | ||
1400 | return 1; | ||
1401 | } | ||
1402 | |||
1403 | #ifdef ASSERT_PDIR_SANITY | ||
1404 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1405 | if (sba_check_pdir(ioc,"Check before sba_map_sg()")) | ||
1406 | { | ||
1407 | sba_dump_sg(ioc, sglist, nents); | ||
1408 | panic("Check before sba_map_sg()"); | ||
1409 | } | ||
1410 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1411 | #endif | ||
1412 | |||
1413 | prefetch(ioc->res_hint); | ||
1414 | |||
1415 | /* | ||
1416 | ** First coalesce the chunks and allocate I/O pdir space | ||
1417 | ** | ||
1418 | ** If this is one DMA stream, we can properly map using the | ||
1419 | ** correct virtual address associated with each DMA page. | ||
1420 | ** w/o this association, we wouldn't have coherent DMA! | ||
1421 | ** Access to the virtual address is what forces a two pass algorithm. | ||
1422 | */ | ||
1423 | coalesced = sba_coalesce_chunks(ioc, sglist, nents); | ||
1424 | |||
1425 | /* | ||
1426 | ** Program the I/O Pdir | ||
1427 | ** | ||
1428 | ** map the virtual addresses to the I/O Pdir | ||
1429 | ** o dma_address will contain the pdir index | ||
1430 | ** o dma_len will contain the number of bytes to map | ||
1431 | ** o address contains the virtual address. | ||
1432 | */ | ||
1433 | filled = sba_fill_pdir(ioc, sglist, nents); | ||
1434 | |||
1435 | #ifdef ASSERT_PDIR_SANITY | ||
1436 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1437 | if (sba_check_pdir(ioc,"Check after sba_map_sg()")) | ||
1438 | { | ||
1439 | sba_dump_sg(ioc, sglist, nents); | ||
1440 | panic("Check after sba_map_sg()\n"); | ||
1441 | } | ||
1442 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1443 | #endif | ||
1444 | |||
1445 | ASSERT(coalesced == filled); | ||
1446 | DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); | ||
1447 | |||
1448 | return filled; | ||
1449 | } | ||
1450 | |||
1451 | |||
1452 | /** | ||
1453 | * sba_unmap_sg - unmap Scatter/Gather list | ||
1454 | * @dev: instance of PCI owned by the driver that's asking. | ||
1455 | * @sglist: array of buffer/length pairs | ||
1456 | * @nents: number of entries in list | ||
1457 | * @dir: R/W or both. | ||
1458 | * | ||
1459 | * See Documentation/DMA-mapping.txt | ||
1460 | */ | ||
1461 | void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | ||
1462 | { | ||
1463 | #ifdef ASSERT_PDIR_SANITY | ||
1464 | struct ioc *ioc; | ||
1465 | unsigned long flags; | ||
1466 | #endif | ||
1467 | |||
1468 | DBG_RUN_SG("%s() START %d entries, %p,%x\n", | ||
1469 | __FUNCTION__, nents, sba_sg_address(sglist), sglist->length); | ||
1470 | |||
1471 | #ifdef ASSERT_PDIR_SANITY | ||
1472 | ioc = GET_IOC(dev); | ||
1473 | ASSERT(ioc); | ||
1474 | |||
1475 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1476 | sba_check_pdir(ioc,"Check before sba_unmap_sg()"); | ||
1477 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1478 | #endif | ||
1479 | |||
1480 | while (nents && sglist->dma_length) { | ||
1481 | |||
1482 | sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); | ||
1483 | sglist++; | ||
1484 | nents--; | ||
1485 | } | ||
1486 | |||
1487 | DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); | ||
1488 | |||
1489 | #ifdef ASSERT_PDIR_SANITY | ||
1490 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
1491 | sba_check_pdir(ioc,"Check after sba_unmap_sg()"); | ||
1492 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
1493 | #endif | ||
1494 | |||
1495 | } | ||
1496 | |||
1497 | /************************************************************** | ||
1498 | * | ||
1499 | * Initialization and claim | ||
1500 | * | ||
1501 | ***************************************************************/ | ||
1502 | |||
1503 | static void __init | ||
1504 | ioc_iova_init(struct ioc *ioc) | ||
1505 | { | ||
1506 | int tcnfg; | ||
1507 | int agp_found = 0; | ||
1508 | struct pci_dev *device = NULL; | ||
1509 | #ifdef FULL_VALID_PDIR | ||
1510 | unsigned long index; | ||
1511 | #endif | ||
1512 | |||
1513 | /* | ||
1514 | ** Firmware programs the base and size of a "safe IOVA space" | ||
1515 | ** (one that doesn't overlap memory or LMMIO space) in the | ||
1516 | ** IBASE and IMASK registers. | ||
1517 | */ | ||
1518 | ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL; | ||
1519 | ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL; | ||
1520 | |||
1521 | ioc->iov_size = ~ioc->imask + 1; | ||
1522 | |||
1523 | DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", | ||
1524 | __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask, | ||
1525 | ioc->iov_size >> 20); | ||
1526 | |||
1527 | switch (iovp_size) { | ||
1528 | case 4*1024: tcnfg = 0; break; | ||
1529 | case 8*1024: tcnfg = 1; break; | ||
1530 | case 16*1024: tcnfg = 2; break; | ||
1531 | case 64*1024: tcnfg = 3; break; | ||
1532 | default: | ||
1533 | panic(PFX "Unsupported IOTLB page size %ldK", | ||
1534 | iovp_size >> 10); | ||
1535 | break; | ||
1536 | } | ||
1537 | WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); | ||
1538 | |||
1539 | ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE; | ||
1540 | ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, | ||
1541 | get_order(ioc->pdir_size)); | ||
1542 | if (!ioc->pdir_base) | ||
1543 | panic(PFX "Couldn't allocate I/O Page Table\n"); | ||
1544 | |||
1545 | memset(ioc->pdir_base, 0, ioc->pdir_size); | ||
1546 | |||
1547 | DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__, | ||
1548 | iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); | ||
1549 | |||
1550 | ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); | ||
1551 | WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); | ||
1552 | |||
1553 | /* | ||
1554 | ** If an AGP device is present, only use half of the IOV space | ||
1555 | ** for PCI DMA. Unfortunately we can't know ahead of time | ||
1556 | ** whether GART support will actually be used, for now we | ||
1557 | ** can just key on an AGP device found in the system. | ||
1558 | ** We program the next pdir index after we stop w/ a key for | ||
1559 | ** the GART code to handshake on. | ||
1560 | */ | ||
1561 | for_each_pci_dev(device) | ||
1562 | agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); | ||
1563 | |||
1564 | if (agp_found && reserve_sba_gart) { | ||
1565 | printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n", | ||
1566 | ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2); | ||
1567 | ioc->pdir_size /= 2; | ||
1568 | ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE; | ||
1569 | } | ||
1570 | #ifdef FULL_VALID_PDIR | ||
1571 | /* | ||
1572 | ** Check to see if the spill page has been allocated, we don't need more than | ||
1573 | ** one across multiple SBAs. | ||
1574 | */ | ||
1575 | if (!prefetch_spill_page) { | ||
1576 | char *spill_poison = "SBAIOMMU POISON"; | ||
1577 | int poison_size = 16; | ||
1578 | void *poison_addr, *addr; | ||
1579 | |||
1580 | addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size)); | ||
1581 | if (!addr) | ||
1582 | panic(PFX "Couldn't allocate PDIR spill page\n"); | ||
1583 | |||
1584 | poison_addr = addr; | ||
1585 | for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size) | ||
1586 | memcpy(poison_addr, spill_poison, poison_size); | ||
1587 | |||
1588 | prefetch_spill_page = virt_to_phys(addr); | ||
1589 | |||
1590 | DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); | ||
1591 | } | ||
1592 | /* | ||
1593 | ** Set all the PDIR entries valid w/ the spill page as the target | ||
1594 | */ | ||
1595 | for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++) | ||
1596 | ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page); | ||
1597 | #endif | ||
1598 | |||
1599 | /* Clear I/O TLB of any possible entries */ | ||
1600 | WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM); | ||
1601 | READ_REG(ioc->ioc_hpa + IOC_PCOM); | ||
1602 | |||
1603 | /* Enable IOVA translation */ | ||
1604 | WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); | ||
1605 | READ_REG(ioc->ioc_hpa + IOC_IBASE); | ||
1606 | } | ||
1607 | |||
1608 | static void __init | ||
1609 | ioc_resource_init(struct ioc *ioc) | ||
1610 | { | ||
1611 | spin_lock_init(&ioc->res_lock); | ||
1612 | #if DELAYED_RESOURCE_CNT > 0 | ||
1613 | spin_lock_init(&ioc->saved_lock); | ||
1614 | #endif | ||
1615 | |||
1616 | /* resource map size dictated by pdir_size */ | ||
1617 | ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ | ||
1618 | ioc->res_size >>= 3; /* convert bit count to byte count */ | ||
1619 | DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); | ||
1620 | |||
1621 | ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, | ||
1622 | get_order(ioc->res_size)); | ||
1623 | if (!ioc->res_map) | ||
1624 | panic(PFX "Couldn't allocate resource map\n"); | ||
1625 | |||
1626 | memset(ioc->res_map, 0, ioc->res_size); | ||
1627 | /* next available IOVP - circular search */ | ||
1628 | ioc->res_hint = (unsigned long *) ioc->res_map; | ||
1629 | |||
1630 | #ifdef ASSERT_PDIR_SANITY | ||
1631 | /* Mark first bit busy - ie no IOVA 0 */ | ||
1632 | ioc->res_map[0] = 0x1; | ||
1633 | ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE; | ||
1634 | #endif | ||
1635 | #ifdef FULL_VALID_PDIR | ||
1636 | /* Mark the last resource used so we don't prefetch beyond IOVA space */ | ||
1637 | ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */ | ||
1638 | ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF | ||
1639 | | prefetch_spill_page); | ||
1640 | #endif | ||
1641 | |||
1642 | DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, | ||
1643 | ioc->res_size, (void *) ioc->res_map); | ||
1644 | } | ||
1645 | |||
1646 | static void __init | ||
1647 | ioc_sac_init(struct ioc *ioc) | ||
1648 | { | ||
1649 | struct pci_dev *sac = NULL; | ||
1650 | struct pci_controller *controller = NULL; | ||
1651 | |||
1652 | /* | ||
1653 | * pci_alloc_coherent() must return a DMA address which is | ||
1654 | * SAC (single address cycle) addressable, so allocate a | ||
1655 | * pseudo-device to enforce that. | ||
1656 | */ | ||
1657 | sac = kmalloc(sizeof(*sac), GFP_KERNEL); | ||
1658 | if (!sac) | ||
1659 | panic(PFX "Couldn't allocate struct pci_dev"); | ||
1660 | memset(sac, 0, sizeof(*sac)); | ||
1661 | |||
1662 | controller = kmalloc(sizeof(*controller), GFP_KERNEL); | ||
1663 | if (!controller) | ||
1664 | panic(PFX "Couldn't allocate struct pci_controller"); | ||
1665 | memset(controller, 0, sizeof(*controller)); | ||
1666 | |||
1667 | controller->iommu = ioc; | ||
1668 | sac->sysdata = controller; | ||
1669 | sac->dma_mask = 0xFFFFFFFFUL; | ||
1670 | #ifdef CONFIG_PCI | ||
1671 | sac->dev.bus = &pci_bus_type; | ||
1672 | #endif | ||
1673 | ioc->sac_only_dev = sac; | ||
1674 | } | ||
1675 | |||
1676 | static void __init | ||
1677 | ioc_zx1_init(struct ioc *ioc) | ||
1678 | { | ||
1679 | unsigned long rope_config; | ||
1680 | unsigned int i; | ||
1681 | |||
1682 | if (ioc->rev < 0x20) | ||
1683 | panic(PFX "IOC 2.0 or later required for IOMMU support\n"); | ||
1684 | |||
1685 | /* 38 bit memory controller + extra bit for range displaced by MMIO */ | ||
1686 | ioc->dma_mask = (0x1UL << 39) - 1; | ||
1687 | |||
1688 | /* | ||
1689 | ** Clear ROPE(N)_CONFIG AO bit. | ||
1690 | ** Disables "NT Ordering" (~= !"Relaxed Ordering") | ||
1691 | ** Overrides bit 1 in DMA Hint Sets. | ||
1692 | ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701. | ||
1693 | */ | ||
1694 | for (i=0; i<(8*8); i+=8) { | ||
1695 | rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i); | ||
1696 | rope_config &= ~IOC_ROPE_AO; | ||
1697 | WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i); | ||
1698 | } | ||
1699 | } | ||
1700 | |||
1701 | typedef void (initfunc)(struct ioc *); | ||
1702 | |||
1703 | struct ioc_iommu { | ||
1704 | u32 func_id; | ||
1705 | char *name; | ||
1706 | initfunc *init; | ||
1707 | }; | ||
1708 | |||
1709 | static struct ioc_iommu ioc_iommu_info[] __initdata = { | ||
1710 | { ZX1_IOC_ID, "zx1", ioc_zx1_init }, | ||
1711 | { ZX2_IOC_ID, "zx2", NULL }, | ||
1712 | { SX1000_IOC_ID, "sx1000", NULL }, | ||
1713 | }; | ||
1714 | |||
1715 | static struct ioc * __init | ||
1716 | ioc_init(u64 hpa, void *handle) | ||
1717 | { | ||
1718 | struct ioc *ioc; | ||
1719 | struct ioc_iommu *info; | ||
1720 | |||
1721 | ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); | ||
1722 | if (!ioc) | ||
1723 | return NULL; | ||
1724 | |||
1725 | memset(ioc, 0, sizeof(*ioc)); | ||
1726 | |||
1727 | ioc->next = ioc_list; | ||
1728 | ioc_list = ioc; | ||
1729 | |||
1730 | ioc->handle = handle; | ||
1731 | ioc->ioc_hpa = ioremap(hpa, 0x1000); | ||
1732 | |||
1733 | ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); | ||
1734 | ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL; | ||
1735 | ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */ | ||
1736 | |||
1737 | for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) { | ||
1738 | if (ioc->func_id == info->func_id) { | ||
1739 | ioc->name = info->name; | ||
1740 | if (info->init) | ||
1741 | (info->init)(ioc); | ||
1742 | } | ||
1743 | } | ||
1744 | |||
1745 | iovp_size = (1 << iovp_shift); | ||
1746 | iovp_mask = ~(iovp_size - 1); | ||
1747 | |||
1748 | DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__, | ||
1749 | PAGE_SIZE >> 10, iovp_size >> 10); | ||
1750 | |||
1751 | if (!ioc->name) { | ||
1752 | ioc->name = kmalloc(24, GFP_KERNEL); | ||
1753 | if (ioc->name) | ||
1754 | sprintf((char *) ioc->name, "Unknown (%04x:%04x)", | ||
1755 | ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF); | ||
1756 | else | ||
1757 | ioc->name = "Unknown"; | ||
1758 | } | ||
1759 | |||
1760 | ioc_iova_init(ioc); | ||
1761 | ioc_resource_init(ioc); | ||
1762 | ioc_sac_init(ioc); | ||
1763 | |||
1764 | if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask) | ||
1765 | ia64_max_iommu_merge_mask = ~iovp_mask; | ||
1766 | |||
1767 | printk(KERN_INFO PFX | ||
1768 | "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", | ||
1769 | ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, | ||
1770 | hpa, ioc->iov_size >> 20, ioc->ibase); | ||
1771 | |||
1772 | return ioc; | ||
1773 | } | ||
1774 | |||
1775 | |||
1776 | |||
1777 | /************************************************************************** | ||
1778 | ** | ||
1779 | ** SBA initialization code (HW and SW) | ||
1780 | ** | ||
1781 | ** o identify SBA chip itself | ||
1782 | ** o FIXME: initialize DMA hints for reasonable defaults | ||
1783 | ** | ||
1784 | **************************************************************************/ | ||
1785 | |||
1786 | #ifdef CONFIG_PROC_FS | ||
1787 | static void * | ||
1788 | ioc_start(struct seq_file *s, loff_t *pos) | ||
1789 | { | ||
1790 | struct ioc *ioc; | ||
1791 | loff_t n = *pos; | ||
1792 | |||
1793 | for (ioc = ioc_list; ioc; ioc = ioc->next) | ||
1794 | if (!n--) | ||
1795 | return ioc; | ||
1796 | |||
1797 | return NULL; | ||
1798 | } | ||
1799 | |||
1800 | static void * | ||
1801 | ioc_next(struct seq_file *s, void *v, loff_t *pos) | ||
1802 | { | ||
1803 | struct ioc *ioc = v; | ||
1804 | |||
1805 | ++*pos; | ||
1806 | return ioc->next; | ||
1807 | } | ||
1808 | |||
1809 | static void | ||
1810 | ioc_stop(struct seq_file *s, void *v) | ||
1811 | { | ||
1812 | } | ||
1813 | |||
1814 | static int | ||
1815 | ioc_show(struct seq_file *s, void *v) | ||
1816 | { | ||
1817 | struct ioc *ioc = v; | ||
1818 | unsigned long *res_ptr = (unsigned long *)ioc->res_map; | ||
1819 | int i, used = 0; | ||
1820 | |||
1821 | seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n", | ||
1822 | ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF)); | ||
1823 | #ifdef CONFIG_NUMA | ||
1824 | if (ioc->node != MAX_NUMNODES) | ||
1825 | seq_printf(s, "NUMA node : %d\n", ioc->node); | ||
1826 | #endif | ||
1827 | seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024)); | ||
1828 | seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024); | ||
1829 | |||
1830 | for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr) | ||
1831 | used += hweight64(*res_ptr); | ||
1832 | |||
1833 | seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3); | ||
1834 | seq_printf(s, "PDIR used : %d entries\n", used); | ||
1835 | |||
1836 | #ifdef PDIR_SEARCH_TIMING | ||
1837 | { | ||
1838 | unsigned long i = 0, avg = 0, min, max; | ||
1839 | min = max = ioc->avg_search[0]; | ||
1840 | for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { | ||
1841 | avg += ioc->avg_search[i]; | ||
1842 | if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; | ||
1843 | if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; | ||
1844 | } | ||
1845 | avg /= SBA_SEARCH_SAMPLE; | ||
1846 | seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n", | ||
1847 | min, avg, max); | ||
1848 | } | ||
1849 | #endif | ||
1850 | #ifndef ALLOW_IOV_BYPASS | ||
1851 | seq_printf(s, "IOVA bypass disabled\n"); | ||
1852 | #endif | ||
1853 | return 0; | ||
1854 | } | ||
1855 | |||
1856 | static struct seq_operations ioc_seq_ops = { | ||
1857 | .start = ioc_start, | ||
1858 | .next = ioc_next, | ||
1859 | .stop = ioc_stop, | ||
1860 | .show = ioc_show | ||
1861 | }; | ||
1862 | |||
1863 | static int | ||
1864 | ioc_open(struct inode *inode, struct file *file) | ||
1865 | { | ||
1866 | return seq_open(file, &ioc_seq_ops); | ||
1867 | } | ||
1868 | |||
1869 | static struct file_operations ioc_fops = { | ||
1870 | .open = ioc_open, | ||
1871 | .read = seq_read, | ||
1872 | .llseek = seq_lseek, | ||
1873 | .release = seq_release | ||
1874 | }; | ||
1875 | |||
1876 | static void __init | ||
1877 | ioc_proc_init(void) | ||
1878 | { | ||
1879 | struct proc_dir_entry *dir, *entry; | ||
1880 | |||
1881 | dir = proc_mkdir("bus/mckinley", NULL); | ||
1882 | if (!dir) | ||
1883 | return; | ||
1884 | |||
1885 | entry = create_proc_entry(ioc_list->name, 0, dir); | ||
1886 | if (entry) | ||
1887 | entry->proc_fops = &ioc_fops; | ||
1888 | } | ||
1889 | #endif | ||
1890 | |||
1891 | static void | ||
1892 | sba_connect_bus(struct pci_bus *bus) | ||
1893 | { | ||
1894 | acpi_handle handle, parent; | ||
1895 | acpi_status status; | ||
1896 | struct ioc *ioc; | ||
1897 | |||
1898 | if (!PCI_CONTROLLER(bus)) | ||
1899 | panic(PFX "no sysdata on bus %d!\n", bus->number); | ||
1900 | |||
1901 | if (PCI_CONTROLLER(bus)->iommu) | ||
1902 | return; | ||
1903 | |||
1904 | handle = PCI_CONTROLLER(bus)->acpi_handle; | ||
1905 | if (!handle) | ||
1906 | return; | ||
1907 | |||
1908 | /* | ||
1909 | * The IOC scope encloses PCI root bridges in the ACPI | ||
1910 | * namespace, so work our way out until we find an IOC we | ||
1911 | * claimed previously. | ||
1912 | */ | ||
1913 | do { | ||
1914 | for (ioc = ioc_list; ioc; ioc = ioc->next) | ||
1915 | if (ioc->handle == handle) { | ||
1916 | PCI_CONTROLLER(bus)->iommu = ioc; | ||
1917 | return; | ||
1918 | } | ||
1919 | |||
1920 | status = acpi_get_parent(handle, &parent); | ||
1921 | handle = parent; | ||
1922 | } while (ACPI_SUCCESS(status)); | ||
1923 | |||
1924 | printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number); | ||
1925 | } | ||
1926 | |||
1927 | #ifdef CONFIG_NUMA | ||
1928 | static void __init | ||
1929 | sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) | ||
1930 | { | ||
1931 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
1932 | union acpi_object *obj; | ||
1933 | acpi_handle phandle; | ||
1934 | unsigned int node; | ||
1935 | |||
1936 | ioc->node = MAX_NUMNODES; | ||
1937 | |||
1938 | /* | ||
1939 | * Check for a _PXM on this node first. We don't typically see | ||
1940 | * one here, so we'll end up getting it from the parent. | ||
1941 | */ | ||
1942 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) { | ||
1943 | if (ACPI_FAILURE(acpi_get_parent(handle, &phandle))) | ||
1944 | return; | ||
1945 | |||
1946 | /* Reset the acpi buffer */ | ||
1947 | buffer.length = ACPI_ALLOCATE_BUFFER; | ||
1948 | buffer.pointer = NULL; | ||
1949 | |||
1950 | if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL, | ||
1951 | &buffer))) | ||
1952 | return; | ||
1953 | } | ||
1954 | |||
1955 | if (!buffer.length || !buffer.pointer) | ||
1956 | return; | ||
1957 | |||
1958 | obj = buffer.pointer; | ||
1959 | |||
1960 | if (obj->type != ACPI_TYPE_INTEGER || | ||
1961 | obj->integer.value >= MAX_PXM_DOMAINS) { | ||
1962 | acpi_os_free(buffer.pointer); | ||
1963 | return; | ||
1964 | } | ||
1965 | |||
1966 | node = pxm_to_nid_map[obj->integer.value]; | ||
1967 | acpi_os_free(buffer.pointer); | ||
1968 | |||
1969 | if (node >= MAX_NUMNODES || !node_online(node)) | ||
1970 | return; | ||
1971 | |||
1972 | ioc->node = node; | ||
1973 | return; | ||
1974 | } | ||
1975 | #else | ||
1976 | #define sba_map_ioc_to_node(ioc, handle) | ||
1977 | #endif | ||
1978 | |||
1979 | static int __init | ||
1980 | acpi_sba_ioc_add(struct acpi_device *device) | ||
1981 | { | ||
1982 | struct ioc *ioc; | ||
1983 | acpi_status status; | ||
1984 | u64 hpa, length; | ||
1985 | struct acpi_buffer buffer; | ||
1986 | struct acpi_device_info *dev_info; | ||
1987 | |||
1988 | status = hp_acpi_csr_space(device->handle, &hpa, &length); | ||
1989 | if (ACPI_FAILURE(status)) | ||
1990 | return 1; | ||
1991 | |||
1992 | buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; | ||
1993 | status = acpi_get_object_info(device->handle, &buffer); | ||
1994 | if (ACPI_FAILURE(status)) | ||
1995 | return 1; | ||
1996 | dev_info = buffer.pointer; | ||
1997 | |||
1998 | /* | ||
1999 | * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI | ||
2000 | * root bridges, and its CSR space includes the IOC function. | ||
2001 | */ | ||
2002 | if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) { | ||
2003 | hpa += ZX1_IOC_OFFSET; | ||
2004 | /* zx1 based systems default to kernel page size iommu pages */ | ||
2005 | if (!iovp_shift) | ||
2006 | iovp_shift = min(PAGE_SHIFT, 16); | ||
2007 | } | ||
2008 | ACPI_MEM_FREE(dev_info); | ||
2009 | |||
2010 | /* | ||
2011 | * default anything not caught above or specified on cmdline to 4k | ||
2012 | * iommu page size | ||
2013 | */ | ||
2014 | if (!iovp_shift) | ||
2015 | iovp_shift = 12; | ||
2016 | |||
2017 | ioc = ioc_init(hpa, device->handle); | ||
2018 | if (!ioc) | ||
2019 | return 1; | ||
2020 | |||
2021 | /* setup NUMA node association */ | ||
2022 | sba_map_ioc_to_node(ioc, device->handle); | ||
2023 | return 0; | ||
2024 | } | ||
2025 | |||
2026 | static struct acpi_driver acpi_sba_ioc_driver = { | ||
2027 | .name = "IOC IOMMU Driver", | ||
2028 | .ids = "HWP0001,HWP0004", | ||
2029 | .ops = { | ||
2030 | .add = acpi_sba_ioc_add, | ||
2031 | }, | ||
2032 | }; | ||
2033 | |||
2034 | static int __init | ||
2035 | sba_init(void) | ||
2036 | { | ||
2037 | acpi_bus_register_driver(&acpi_sba_ioc_driver); | ||
2038 | if (!ioc_list) | ||
2039 | return 0; | ||
2040 | |||
2041 | #ifdef CONFIG_PCI | ||
2042 | { | ||
2043 | struct pci_bus *b = NULL; | ||
2044 | while ((b = pci_find_next_bus(b)) != NULL) | ||
2045 | sba_connect_bus(b); | ||
2046 | } | ||
2047 | #endif | ||
2048 | |||
2049 | #ifdef CONFIG_PROC_FS | ||
2050 | ioc_proc_init(); | ||
2051 | #endif | ||
2052 | return 0; | ||
2053 | } | ||
2054 | |||
2055 | subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ | ||
2056 | |||
2057 | extern void dig_setup(char**); | ||
2058 | /* | ||
2059 | * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good, | ||
2060 | * so we use the platform_setup hook to fix it up. | ||
2061 | */ | ||
2062 | void __init | ||
2063 | sba_setup(char **cmdline_p) | ||
2064 | { | ||
2065 | MAX_DMA_ADDRESS = ~0UL; | ||
2066 | dig_setup(cmdline_p); | ||
2067 | } | ||
2068 | |||
2069 | static int __init | ||
2070 | nosbagart(char *str) | ||
2071 | { | ||
2072 | reserve_sba_gart = 0; | ||
2073 | return 1; | ||
2074 | } | ||
2075 | |||
2076 | int | ||
2077 | sba_dma_supported (struct device *dev, u64 mask) | ||
2078 | { | ||
2079 | /* make sure it's at least 32bit capable */ | ||
2080 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); | ||
2081 | } | ||
2082 | |||
2083 | int | ||
2084 | sba_dma_mapping_error (dma_addr_t dma_addr) | ||
2085 | { | ||
2086 | return 0; | ||
2087 | } | ||
2088 | |||
2089 | __setup("nosbagart", nosbagart); | ||
2090 | |||
2091 | static int __init | ||
2092 | sba_page_override(char *str) | ||
2093 | { | ||
2094 | unsigned long page_size; | ||
2095 | |||
2096 | page_size = memparse(str, &str); | ||
2097 | switch (page_size) { | ||
2098 | case 4096: | ||
2099 | case 8192: | ||
2100 | case 16384: | ||
2101 | case 65536: | ||
2102 | iovp_shift = ffs(page_size) - 1; | ||
2103 | break; | ||
2104 | default: | ||
2105 | printk("%s: unknown/unsupported iommu page size %ld\n", | ||
2106 | __FUNCTION__, page_size); | ||
2107 | } | ||
2108 | |||
2109 | return 1; | ||
2110 | } | ||
2111 | |||
2112 | __setup("sbapagesize=",sba_page_override); | ||
2113 | |||
2114 | EXPORT_SYMBOL(sba_dma_mapping_error); | ||
2115 | EXPORT_SYMBOL(sba_map_single); | ||
2116 | EXPORT_SYMBOL(sba_unmap_single); | ||
2117 | EXPORT_SYMBOL(sba_map_sg); | ||
2118 | EXPORT_SYMBOL(sba_unmap_sg); | ||
2119 | EXPORT_SYMBOL(sba_dma_supported); | ||
2120 | EXPORT_SYMBOL(sba_alloc_coherent); | ||
2121 | EXPORT_SYMBOL(sba_free_coherent); | ||