aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c13
-rw-r--r--arch/ia64/hp/common/sba_iommu.c45
-rw-r--r--arch/ia64/lib/swiotlb.c102
3 files changed, 145 insertions, 15 deletions
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 80f8ef013939..317c334c5a18 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -17,7 +17,7 @@
17#include <asm/machvec.h> 17#include <asm/machvec.h>
18 18
19/* swiotlb declarations & definitions: */ 19/* swiotlb declarations & definitions: */
20extern void swiotlb_init_with_default_size (size_t size); 20extern int swiotlb_late_init_with_default_size (size_t size);
21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; 21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
22extern ia64_mv_dma_free_coherent swiotlb_free_coherent; 22extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
23extern ia64_mv_dma_map_single swiotlb_map_single; 23extern ia64_mv_dma_map_single swiotlb_map_single;
@@ -67,7 +67,16 @@ void
67hwsw_init (void) 67hwsw_init (void)
68{ 68{
69 /* default to a smallish 2MB sw I/O TLB */ 69 /* default to a smallish 2MB sw I/O TLB */
70 swiotlb_init_with_default_size (2 * (1<<20)); 70 if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) {
71#ifdef CONFIG_IA64_GENERIC
72 /* Better to have normal DMA than panic */
73 printk(KERN_WARNING "%s: Failed to initialize software I/O TLB,"
74 " reverting to hpzx1 platform vector\n", __FUNCTION__);
75 machvec_init("hpzx1");
76#else
77 panic("Unable to initialize software I/O TLB services");
78#endif
79 }
71} 80}
72 81
73void * 82void *
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 11957598a8b9..e64ca04ace89 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2028,9 +2028,40 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2028static int __init 2028static int __init
2029sba_init(void) 2029sba_init(void)
2030{ 2030{
2031 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2032 return 0;
2033
2031 acpi_bus_register_driver(&acpi_sba_ioc_driver); 2034 acpi_bus_register_driver(&acpi_sba_ioc_driver);
2032 if (!ioc_list) 2035 if (!ioc_list) {
2036#ifdef CONFIG_IA64_GENERIC
2037 extern int swiotlb_late_init_with_default_size (size_t size);
2038
2039 /*
2040 * If we didn't find something sba_iommu can claim, we
2041 * need to setup the swiotlb and switch to the dig machvec.
2042 */
2043 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2044 panic("Unable to find SBA IOMMU or initialize "
2045 "software I/O TLB: Try machvec=dig boot option");
2046 machvec_init("dig");
2047#else
2048 panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2049#endif
2033 return 0; 2050 return 0;
2051 }
2052
2053#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2054 /*
2055 * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2056 * buffer setup to support devices with smaller DMA masks than
2057 * sba_iommu can handle.
2058 */
2059 if (ia64_platform_is("hpzx1_swiotlb")) {
2060 extern void hwsw_init(void);
2061
2062 hwsw_init();
2063 }
2064#endif
2034 2065
2035#ifdef CONFIG_PCI 2066#ifdef CONFIG_PCI
2036 { 2067 {
@@ -2048,18 +2079,6 @@ sba_init(void)
2048 2079
2049subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ 2080subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2050 2081
2051extern void dig_setup(char**);
2052/*
2053 * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good,
2054 * so we use the platform_setup hook to fix it up.
2055 */
2056void __init
2057sba_setup(char **cmdline_p)
2058{
2059 MAX_DMA_ADDRESS = ~0UL;
2060 dig_setup(cmdline_p);
2061}
2062
2063static int __init 2082static int __init
2064nosbagart(char *str) 2083nosbagart(char *str)
2065{ 2084{
diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c
index dbc0b3e449c5..875b0c16250c 100644
--- a/arch/ia64/lib/swiotlb.c
+++ b/arch/ia64/lib/swiotlb.c
@@ -49,6 +49,15 @@
49 */ 49 */
50#define IO_TLB_SHIFT 11 50#define IO_TLB_SHIFT 11
51 51
52#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
53
54/*
55 * Minimum IO TLB size to bother booting with. Systems with mainly
56 * 64bit capable cards will only lightly use the swiotlb. If we can't
57 * allocate a contiguous 1MB, we're probably in trouble anyway.
58 */
59#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
60
52int swiotlb_force; 61int swiotlb_force;
53 62
54/* 63/*
@@ -154,6 +163,99 @@ swiotlb_init (void)
154 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 163 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
155} 164}
156 165
166/*
167 * Systems with larger DMA zones (those that don't support ISA) can
168 * initialize the swiotlb later using the slab allocator if needed.
169 * This should be just like above, but with some error catching.
170 */
171int
172swiotlb_late_init_with_default_size (size_t default_size)
173{
174 unsigned long i, req_nslabs = io_tlb_nslabs;
175 unsigned int order;
176
177 if (!io_tlb_nslabs) {
178 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
179 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
180 }
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
186 io_tlb_nslabs = SLABS_PER_PAGE << order;
187
188 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
189 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
190 order);
191 if (io_tlb_start)
192 break;
193 order--;
194 }
195
196 if (!io_tlb_start)
197 goto cleanup1;
198
199 if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
200 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
201 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
202 io_tlb_nslabs = SLABS_PER_PAGE << order;
203 }
204 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
205 memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
206
207 /*
208 * Allocate and initialize the free list array. This array is used
209 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
210 * between io_tlb_start and io_tlb_end.
211 */
212 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
213 get_order(io_tlb_nslabs * sizeof(int)));
214 if (!io_tlb_list)
215 goto cleanup2;
216
217 for (i = 0; i < io_tlb_nslabs; i++)
218 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
219 io_tlb_index = 0;
220
221 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
222 get_order(io_tlb_nslabs * sizeof(char *)));
223 if (!io_tlb_orig_addr)
224 goto cleanup3;
225
226 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
227
228 /*
229 * Get the overflow emergency buffer
230 */
231 io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
232 get_order(io_tlb_overflow));
233 if (!io_tlb_overflow_buffer)
234 goto cleanup4;
235
236 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
237 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
238 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
239
240 return 0;
241
242cleanup4:
243 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
244 sizeof(char *)));
245 io_tlb_orig_addr = NULL;
246cleanup3:
247 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
248 sizeof(int)));
249 io_tlb_list = NULL;
250 io_tlb_end = NULL;
251cleanup2:
252 free_pages((unsigned long)io_tlb_start, order);
253 io_tlb_start = NULL;
254cleanup1:
255 io_tlb_nslabs = req_nslabs;
256 return -ENOMEM;
257}
258
157static inline int 259static inline int
158address_needs_mapping(struct device *hwdev, dma_addr_t addr) 260address_needs_mapping(struct device *hwdev, dma_addr_t addr)
159{ 261{