diff options
Diffstat (limited to 'arch/powerpc/sysdev/dart_iommu.c')
-rw-r--r-- | arch/powerpc/sysdev/dart_iommu.c | 184 |
1 files changed, 88 insertions, 96 deletions
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index b7348637eae0..26904f4879ec 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c | |||
@@ -48,16 +48,10 @@ | |||
48 | 48 | ||
49 | #include "dart.h" | 49 | #include "dart.h" |
50 | 50 | ||
51 | /* Physical base address and size of the DART table */ | 51 | /* DART table address and size */ |
52 | unsigned long dart_tablebase; /* exported to htab_initialize */ | 52 | static u32 *dart_tablebase; |
53 | static unsigned long dart_tablesize; | 53 | static unsigned long dart_tablesize; |
54 | 54 | ||
55 | /* Virtual base address of the DART table */ | ||
56 | static u32 *dart_vbase; | ||
57 | #ifdef CONFIG_PM | ||
58 | static u32 *dart_copy; | ||
59 | #endif | ||
60 | |||
61 | /* Mapped base address for the dart */ | 55 | /* Mapped base address for the dart */ |
62 | static unsigned int __iomem *dart; | 56 | static unsigned int __iomem *dart; |
63 | 57 | ||
@@ -151,6 +145,34 @@ wait_more: | |||
151 | spin_unlock_irqrestore(&invalidate_lock, flags); | 145 | spin_unlock_irqrestore(&invalidate_lock, flags); |
152 | } | 146 | } |
153 | 147 | ||
148 | static void dart_cache_sync(unsigned int *base, unsigned int count) | ||
149 | { | ||
150 | /* | ||
151 | * We add 1 to the number of entries to flush, following a | ||
152 | * comment in Darwin indicating that the memory controller | ||
153 | * can prefetch unmapped memory under some circumstances. | ||
154 | */ | ||
155 | unsigned long start = (unsigned long)base; | ||
156 | unsigned long end = start + (count + 1) * sizeof(unsigned int); | ||
157 | unsigned int tmp; | ||
158 | |||
159 | /* Perform a standard cache flush */ | ||
160 | flush_inval_dcache_range(start, end); | ||
161 | |||
162 | /* | ||
163 | * Perform the sequence described in the CPC925 manual to | ||
164 | * ensure all the data gets to a point the cache incoherent | ||
165 | * DART hardware will see. | ||
166 | */ | ||
167 | asm volatile(" sync;" | ||
168 | " isync;" | ||
169 | " dcbf 0,%1;" | ||
170 | " sync;" | ||
171 | " isync;" | ||
172 | " lwz %0,0(%1);" | ||
173 | " isync" : "=r" (tmp) : "r" (end) : "memory"); | ||
174 | } | ||
175 | |||
154 | static void dart_flush(struct iommu_table *tbl) | 176 | static void dart_flush(struct iommu_table *tbl) |
155 | { | 177 | { |
156 | mb(); | 178 | mb(); |
@@ -165,13 +187,13 @@ static int dart_build(struct iommu_table *tbl, long index, | |||
165 | enum dma_data_direction direction, | 187 | enum dma_data_direction direction, |
166 | struct dma_attrs *attrs) | 188 | struct dma_attrs *attrs) |
167 | { | 189 | { |
168 | unsigned int *dp; | 190 | unsigned int *dp, *orig_dp; |
169 | unsigned int rpn; | 191 | unsigned int rpn; |
170 | long l; | 192 | long l; |
171 | 193 | ||
172 | DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); | 194 | DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); |
173 | 195 | ||
174 | dp = ((unsigned int*)tbl->it_base) + index; | 196 | orig_dp = dp = ((unsigned int*)tbl->it_base) + index; |
175 | 197 | ||
176 | /* On U3, all memory is contiguous, so we can move this | 198 | /* On U3, all memory is contiguous, so we can move this |
177 | * out of the loop. | 199 | * out of the loop. |
@@ -184,11 +206,7 @@ static int dart_build(struct iommu_table *tbl, long index, | |||
184 | 206 | ||
185 | uaddr += DART_PAGE_SIZE; | 207 | uaddr += DART_PAGE_SIZE; |
186 | } | 208 | } |
187 | 209 | dart_cache_sync(orig_dp, npages); | |
188 | /* make sure all updates have reached memory */ | ||
189 | mb(); | ||
190 | in_be32((unsigned __iomem *)dp); | ||
191 | mb(); | ||
192 | 210 | ||
193 | if (dart_is_u4) { | 211 | if (dart_is_u4) { |
194 | rpn = index; | 212 | rpn = index; |
@@ -203,7 +221,8 @@ static int dart_build(struct iommu_table *tbl, long index, | |||
203 | 221 | ||
204 | static void dart_free(struct iommu_table *tbl, long index, long npages) | 222 | static void dart_free(struct iommu_table *tbl, long index, long npages) |
205 | { | 223 | { |
206 | unsigned int *dp; | 224 | unsigned int *dp, *orig_dp; |
225 | long orig_npages = npages; | ||
207 | 226 | ||
208 | /* We don't worry about flushing the TLB cache. The only drawback of | 227 | /* We don't worry about flushing the TLB cache. The only drawback of |
209 | * not doing it is that we won't catch buggy device drivers doing | 228 | * not doing it is that we won't catch buggy device drivers doing |
@@ -212,34 +231,30 @@ static void dart_free(struct iommu_table *tbl, long index, long npages) | |||
212 | 231 | ||
213 | DBG("dart: free at: %lx, %lx\n", index, npages); | 232 | DBG("dart: free at: %lx, %lx\n", index, npages); |
214 | 233 | ||
215 | dp = ((unsigned int *)tbl->it_base) + index; | 234 | orig_dp = dp = ((unsigned int *)tbl->it_base) + index; |
216 | 235 | ||
217 | while (npages--) | 236 | while (npages--) |
218 | *(dp++) = dart_emptyval; | 237 | *(dp++) = dart_emptyval; |
219 | } | ||
220 | 238 | ||
239 | dart_cache_sync(orig_dp, orig_npages); | ||
240 | } | ||
221 | 241 | ||
222 | static int __init dart_init(struct device_node *dart_node) | 242 | static void allocate_dart(void) |
223 | { | 243 | { |
224 | unsigned int i; | 244 | unsigned long tmp; |
225 | unsigned long tmp, base, size; | ||
226 | struct resource r; | ||
227 | |||
228 | if (dart_tablebase == 0 || dart_tablesize == 0) { | ||
229 | printk(KERN_INFO "DART: table not allocated, using " | ||
230 | "direct DMA\n"); | ||
231 | return -ENODEV; | ||
232 | } | ||
233 | 245 | ||
234 | if (of_address_to_resource(dart_node, 0, &r)) | 246 | /* 512 pages (2MB) is max DART tablesize. */ |
235 | panic("DART: can't get register base ! "); | 247 | dart_tablesize = 1UL << 21; |
236 | 248 | ||
237 | /* Make sure nothing from the DART range remains in the CPU cache | 249 | /* |
238 | * from a previous mapping that existed before the kernel took | 250 | * 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we |
239 | * over | 251 | * will blow up an entire large page anyway in the kernel mapping. |
240 | */ | 252 | */ |
241 | flush_dcache_phys_range(dart_tablebase, | 253 | dart_tablebase = __va(memblock_alloc_base(1UL<<24, |
242 | dart_tablebase + dart_tablesize); | 254 | 1UL<<24, 0x80000000L)); |
255 | |||
256 | /* There is no point scanning the DART space for leaks*/ | ||
257 | kmemleak_no_scan((void *)dart_tablebase); | ||
243 | 258 | ||
244 | /* Allocate a spare page to map all invalid DART pages. We need to do | 259 | /* Allocate a spare page to map all invalid DART pages. We need to do |
245 | * that to work around what looks like a problem with the HT bridge | 260 | * that to work around what looks like a problem with the HT bridge |
@@ -249,20 +264,51 @@ static int __init dart_init(struct device_node *dart_node) | |||
249 | dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & | 264 | dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & |
250 | DARTMAP_RPNMASK); | 265 | DARTMAP_RPNMASK); |
251 | 266 | ||
267 | printk(KERN_INFO "DART table allocated at: %p\n", dart_tablebase); | ||
268 | } | ||
269 | |||
270 | static int __init dart_init(struct device_node *dart_node) | ||
271 | { | ||
272 | unsigned int i; | ||
273 | unsigned long base, size; | ||
274 | struct resource r; | ||
275 | |||
276 | /* IOMMU disabled by the user ? bail out */ | ||
277 | if (iommu_is_off) | ||
278 | return -ENODEV; | ||
279 | |||
280 | /* | ||
281 | * Only use the DART if the machine has more than 1GB of RAM | ||
282 | * or if requested with iommu=on on cmdline. | ||
283 | * | ||
284 | * 1GB of RAM is picked as limit because some default devices | ||
285 | * (i.e. Airport Extreme) have 30 bit address range limits. | ||
286 | */ | ||
287 | |||
288 | if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull) | ||
289 | return -ENODEV; | ||
290 | |||
291 | /* Get DART registers */ | ||
292 | if (of_address_to_resource(dart_node, 0, &r)) | ||
293 | panic("DART: can't get register base ! "); | ||
294 | |||
252 | /* Map in DART registers */ | 295 | /* Map in DART registers */ |
253 | dart = ioremap(r.start, resource_size(&r)); | 296 | dart = ioremap(r.start, resource_size(&r)); |
254 | if (dart == NULL) | 297 | if (dart == NULL) |
255 | panic("DART: Cannot map registers!"); | 298 | panic("DART: Cannot map registers!"); |
256 | 299 | ||
257 | /* Map in DART table */ | 300 | /* Allocate the DART and dummy page */ |
258 | dart_vbase = ioremap(__pa(dart_tablebase), dart_tablesize); | 301 | allocate_dart(); |
259 | 302 | ||
260 | /* Fill initial table */ | 303 | /* Fill initial table */ |
261 | for (i = 0; i < dart_tablesize/4; i++) | 304 | for (i = 0; i < dart_tablesize/4; i++) |
262 | dart_vbase[i] = dart_emptyval; | 305 | dart_tablebase[i] = dart_emptyval; |
306 | |||
307 | /* Push to memory */ | ||
308 | dart_cache_sync(dart_tablebase, dart_tablesize / sizeof(u32)); | ||
263 | 309 | ||
264 | /* Initialize DART with table base and enable it. */ | 310 | /* Initialize DART with table base and enable it. */ |
265 | base = dart_tablebase >> DART_PAGE_SHIFT; | 311 | base = ((unsigned long)dart_tablebase) >> DART_PAGE_SHIFT; |
266 | size = dart_tablesize >> DART_PAGE_SHIFT; | 312 | size = dart_tablesize >> DART_PAGE_SHIFT; |
267 | if (dart_is_u4) { | 313 | if (dart_is_u4) { |
268 | size &= DART_SIZE_U4_SIZE_MASK; | 314 | size &= DART_SIZE_U4_SIZE_MASK; |
@@ -301,7 +347,7 @@ static void iommu_table_dart_setup(void) | |||
301 | iommu_table_dart.it_page_shift = IOMMU_PAGE_SHIFT_4K; | 347 | iommu_table_dart.it_page_shift = IOMMU_PAGE_SHIFT_4K; |
302 | 348 | ||
303 | /* Initialize the common IOMMU code */ | 349 | /* Initialize the common IOMMU code */ |
304 | iommu_table_dart.it_base = (unsigned long)dart_vbase; | 350 | iommu_table_dart.it_base = (unsigned long)dart_tablebase; |
305 | iommu_table_dart.it_index = 0; | 351 | iommu_table_dart.it_index = 0; |
306 | iommu_table_dart.it_blocksize = 1; | 352 | iommu_table_dart.it_blocksize = 1; |
307 | iommu_table_dart.it_ops = &iommu_dart_ops; | 353 | iommu_table_dart.it_ops = &iommu_dart_ops; |
@@ -404,75 +450,21 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) | |||
404 | } | 450 | } |
405 | 451 | ||
406 | #ifdef CONFIG_PM | 452 | #ifdef CONFIG_PM |
407 | static void iommu_dart_save(void) | ||
408 | { | ||
409 | memcpy(dart_copy, dart_vbase, 2*1024*1024); | ||
410 | } | ||
411 | |||
412 | static void iommu_dart_restore(void) | 453 | static void iommu_dart_restore(void) |
413 | { | 454 | { |
414 | memcpy(dart_vbase, dart_copy, 2*1024*1024); | 455 | dart_cache_sync(dart_tablebase, dart_tablesize / sizeof(u32)); |
415 | dart_tlb_invalidate_all(); | 456 | dart_tlb_invalidate_all(); |
416 | } | 457 | } |
417 | 458 | ||
418 | static int __init iommu_init_late_dart(void) | 459 | static int __init iommu_init_late_dart(void) |
419 | { | 460 | { |
420 | unsigned long tbasepfn; | ||
421 | struct page *p; | ||
422 | |||
423 | /* if no dart table exists then we won't need to save it | ||
424 | * and the area has also not been reserved */ | ||
425 | if (!dart_tablebase) | 461 | if (!dart_tablebase) |
426 | return 0; | 462 | return 0; |
427 | 463 | ||
428 | tbasepfn = __pa(dart_tablebase) >> PAGE_SHIFT; | ||
429 | register_nosave_region_late(tbasepfn, | ||
430 | tbasepfn + ((1<<24) >> PAGE_SHIFT)); | ||
431 | |||
432 | /* For suspend we need to copy the dart contents because | ||
433 | * it is not part of the regular mapping (see above) and | ||
434 | * thus not saved automatically. The memory for this copy | ||
435 | * must be allocated early because we need 2 MB. */ | ||
436 | p = alloc_pages(GFP_KERNEL, 21 - PAGE_SHIFT); | ||
437 | BUG_ON(!p); | ||
438 | dart_copy = page_address(p); | ||
439 | |||
440 | ppc_md.iommu_save = iommu_dart_save; | ||
441 | ppc_md.iommu_restore = iommu_dart_restore; | 464 | ppc_md.iommu_restore = iommu_dart_restore; |
442 | 465 | ||
443 | return 0; | 466 | return 0; |
444 | } | 467 | } |
445 | 468 | ||
446 | late_initcall(iommu_init_late_dart); | 469 | late_initcall(iommu_init_late_dart); |
447 | #endif | 470 | #endif /* CONFIG_PM */ |
448 | |||
449 | void __init alloc_dart_table(void) | ||
450 | { | ||
451 | /* Only reserve DART space if machine has more than 1GB of RAM | ||
452 | * or if requested with iommu=on on cmdline. | ||
453 | * | ||
454 | * 1GB of RAM is picked as limit because some default devices | ||
455 | * (i.e. Airport Extreme) have 30 bit address range limits. | ||
456 | */ | ||
457 | |||
458 | if (iommu_is_off) | ||
459 | return; | ||
460 | |||
461 | if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull) | ||
462 | return; | ||
463 | |||
464 | /* 512 pages (2MB) is max DART tablesize. */ | ||
465 | dart_tablesize = 1UL << 21; | ||
466 | /* 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we | ||
467 | * will blow up an entire large page anyway in the kernel mapping | ||
468 | */ | ||
469 | dart_tablebase = (unsigned long) | ||
470 | __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); | ||
471 | /* | ||
472 | * The DART space is later unmapped from the kernel linear mapping and | ||
473 | * accessing dart_tablebase during kmemleak scanning will fault. | ||
474 | */ | ||
475 | kmemleak_no_scan((void *)dart_tablebase); | ||
476 | |||
477 | printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); | ||
478 | } | ||