diff options
author | Jens.Osterkamp@de.ibm.com <Jens.Osterkamp@de.ibm.com> | 2005-12-09 13:04:20 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-01-08 22:53:24 -0500 |
commit | 49d65b3ac5614431041abcd2eabc0d77eff5e32d (patch) | |
tree | df090de4f35468904fc37011ac06b9316393ff0f /arch | |
parent | 38307341af3a0be8ec5319756361b51ac29dffc7 (diff) |
[PATCH] powerpc/cell: add iommu support for larger memory
So far, the iommu code was hardwired to a linear mapping
between 0x20000000 and 0x40000000, so it could only support
512MB of RAM.
This patch still keeps the linear mapping, but looks for
proper ibm,dma-window properties to set up larger windows,
this makes the maximum supported RAM size 2GB.
If there is anything unusual about the dma-window properties,
we fall back to the old behavior.
We also support switching off the iommu completely now
with the regular iommu=off command line option.
Signed-off-by: Arnd Bergmann <arndb@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/iommu.c | 225 |
1 files changed, 176 insertions, 49 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 74f999b4ac9e..46e7cb9c3e64 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/bootmem.h> | 29 | #include <linux/bootmem.h> |
30 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/kernel.h> | ||
33 | #include <linux/compiler.h> | ||
32 | 34 | ||
33 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
34 | #include <asm/iommu.h> | 36 | #include <asm/iommu.h> |
@@ -40,6 +42,7 @@ | |||
40 | #include <asm/abs_addr.h> | 42 | #include <asm/abs_addr.h> |
41 | #include <asm/system.h> | 43 | #include <asm/system.h> |
42 | #include <asm/ppc-pci.h> | 44 | #include <asm/ppc-pci.h> |
45 | #include <asm/udbg.h> | ||
43 | 46 | ||
44 | #include "iommu.h" | 47 | #include "iommu.h" |
45 | 48 | ||
@@ -220,8 +223,6 @@ set_iopt_cache(void __iomem *base, unsigned long index, | |||
220 | { | 223 | { |
221 | unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR; | 224 | unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR; |
222 | unsigned long __iomem *p = base + IOC_PT_CACHE_REG; | 225 | unsigned long __iomem *p = base + IOC_PT_CACHE_REG; |
223 | pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n", | ||
224 | index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag); | ||
225 | 226 | ||
226 | out_be64(p, val); | 227 | out_be64(p, val); |
227 | out_be64(&tags[index], tag); | 228 | out_be64(&tags[index], tag); |
@@ -248,67 +249,176 @@ set_iocmd_config(void __iomem *base) | |||
248 | out_be64(p, conf | IOCMD_CONF_TE); | 249 | out_be64(p, conf | IOCMD_CONF_TE); |
249 | } | 250 | } |
250 | 251 | ||
251 | /* FIXME: get these from the device tree */ | 252 | static void enable_mapping(void __iomem *base, void __iomem *mmio_base) |
252 | #define ioc_base 0x20000511000ull | ||
253 | #define ioc_mmio_base 0x20000510000ull | ||
254 | #define ioid 0x48a | ||
255 | #define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */ | ||
256 | #define io_page_size 0x1000000 | ||
257 | |||
258 | static unsigned long map_iopt_entry(unsigned long address) | ||
259 | { | 253 | { |
260 | switch (address >> 20) { | 254 | set_iocmd_config(base); |
261 | case 0x600: | 255 | set_iost_origin(mmio_base); |
262 | address = 0x24020000000ull; /* spider i/o */ | ||
263 | break; | ||
264 | default: | ||
265 | address += iopt_phys_offset; | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | return get_iopt_entry(address, ioid, IOPT_PROT_RW); | ||
270 | } | 256 | } |
271 | 257 | ||
272 | static void iommu_bus_setup_null(struct pci_bus *b) { } | ||
273 | static void iommu_dev_setup_null(struct pci_dev *d) { } | 258 | static void iommu_dev_setup_null(struct pci_dev *d) { } |
259 | static void iommu_bus_setup_null(struct pci_bus *b) { } | ||
260 | |||
261 | struct cell_iommu { | ||
262 | unsigned long base; | ||
263 | unsigned long mmio_base; | ||
264 | void __iomem *mapped_base; | ||
265 | void __iomem *mapped_mmio_base; | ||
266 | }; | ||
267 | |||
268 | static struct cell_iommu cell_iommus[NR_CPUS]; | ||
274 | 269 | ||
275 | /* initialize the iommu to support a simple linear mapping | 270 | /* initialize the iommu to support a simple linear mapping |
276 | * for each DMA window used by any device. For now, we | 271 | * for each DMA window used by any device. For now, we |
277 | * happen to know that there is only one DMA window in use, | 272 | * happen to know that there is only one DMA window in use, |
278 | * starting at iopt_phys_offset. */ | 273 | * starting at iopt_phys_offset. */ |
279 | static void cell_map_iommu(void) | 274 | static void cell_do_map_iommu(struct cell_iommu *iommu, |
275 | unsigned int ioid, | ||
276 | unsigned long map_start, | ||
277 | unsigned long map_size) | ||
280 | { | 278 | { |
281 | unsigned long address; | 279 | unsigned long io_address, real_address; |
282 | void __iomem *base; | 280 | void __iomem *ioc_base, *ioc_mmio_base; |
283 | ioste ioste; | 281 | ioste ioste; |
284 | unsigned long index; | 282 | unsigned long index; |
285 | 283 | ||
286 | base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE); | 284 | /* we pretend the io page table was at a very high address */ |
287 | pr_debug("%lx mapped to %p\n", ioc_base, base); | 285 | const unsigned long fake_iopt = 0x10000000000ul; |
288 | set_iocmd_config(base); | 286 | const unsigned long io_page_size = 0x1000000; /* use 16M pages */ |
289 | iounmap(base); | 287 | const unsigned long io_segment_size = 0x10000000; /* 256M */ |
288 | |||
289 | ioc_base = iommu->mapped_base; | ||
290 | ioc_mmio_base = iommu->mapped_mmio_base; | ||
291 | |||
292 | for (real_address = 0, io_address = 0; | ||
293 | io_address <= map_start + map_size; | ||
294 | real_address += io_page_size, io_address += io_page_size) { | ||
295 | ioste = get_iost_entry(fake_iopt, io_address, io_page_size); | ||
296 | if ((real_address % io_segment_size) == 0) /* segment start */ | ||
297 | set_iost_cache(ioc_mmio_base, | ||
298 | io_address >> 28, ioste); | ||
299 | index = get_ioc_hash_1way(ioste, io_address); | ||
300 | pr_debug("addr %08lx, index %02lx, ioste %016lx\n", | ||
301 | io_address, index, ioste.val); | ||
302 | set_iopt_cache(ioc_mmio_base, | ||
303 | get_ioc_hash_1way(ioste, io_address), | ||
304 | get_ioc_tag(ioste, io_address), | ||
305 | get_iopt_entry(real_address-map_start, ioid, IOPT_PROT_RW)); | ||
306 | } | ||
307 | } | ||
290 | 308 | ||
291 | base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE); | 309 | static void iommu_devnode_setup(struct device_node *d) |
292 | pr_debug("%lx mapped to %p\n", ioc_mmio_base, base); | 310 | { |
311 | unsigned int *ioid; | ||
312 | unsigned long *dma_window, map_start, map_size, token; | ||
313 | struct cell_iommu *iommu; | ||
293 | 314 | ||
294 | set_iost_origin(base); | 315 | ioid = (unsigned int *)get_property(d, "ioid", NULL); |
316 | if (!ioid) | ||
317 | pr_debug("No ioid entry found !\n"); | ||
295 | 318 | ||
296 | for (address = 0; address < 0x100000000ul; address += io_page_size) { | 319 | dma_window = (unsigned long *)get_property(d, "ibm,dma-window", NULL); |
297 | ioste = get_iost_entry(0x10000000000ul, address, io_page_size); | 320 | if (!dma_window) |
298 | if ((address & 0xfffffff) == 0) /* segment start */ | 321 | pr_debug("No ibm,dma-window entry found !\n"); |
299 | set_iost_cache(base, address >> 28, ioste); | 322 | |
300 | index = get_ioc_hash_1way(ioste, address); | 323 | map_start = dma_window[1]; |
301 | pr_debug("addr %08lx, index %02lx, ioste %016lx\n", | 324 | map_size = dma_window[2]; |
302 | address, index, ioste.val); | 325 | token = dma_window[0] >> 32; |
303 | set_iopt_cache(base, | 326 | |
304 | get_ioc_hash_1way(ioste, address), | 327 | iommu = &cell_iommus[token]; |
305 | get_ioc_tag(ioste, address), | 328 | |
306 | map_iopt_entry(address)); | 329 | cell_do_map_iommu(iommu, *ioid, map_start, map_size); |
307 | } | 330 | } |
308 | iounmap(base); | 331 | |
332 | static void iommu_bus_setup(struct pci_bus *b) | ||
333 | { | ||
334 | struct device_node *d = (struct device_node *)b->sysdata; | ||
335 | iommu_devnode_setup(d); | ||
336 | } | ||
337 | |||
338 | |||
339 | static int cell_map_iommu_hardcoded(int num_nodes) | ||
340 | { | ||
341 | struct cell_iommu *iommu = NULL; | ||
342 | |||
343 | pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__); | ||
344 | |||
345 | /* node 0 */ | ||
346 | iommu = &cell_iommus[0]; | ||
347 | iommu->mapped_base = __ioremap(0x20000511000, 0x1000, _PAGE_NO_CACHE); | ||
348 | iommu->mapped_mmio_base = __ioremap(0x20000510000, 0x1000, _PAGE_NO_CACHE); | ||
349 | |||
350 | enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base); | ||
351 | |||
352 | cell_do_map_iommu(iommu, 0x048a, | ||
353 | 0x20000000ul,0x20000000ul); | ||
354 | |||
355 | if (num_nodes < 2) | ||
356 | return 0; | ||
357 | |||
358 | /* node 1 */ | ||
359 | iommu = &cell_iommus[1]; | ||
360 | iommu->mapped_base = __ioremap(0x30000511000, 0x1000, _PAGE_NO_CACHE); | ||
361 | iommu->mapped_mmio_base = __ioremap(0x30000510000, 0x1000, _PAGE_NO_CACHE); | ||
362 | |||
363 | enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base); | ||
364 | |||
365 | cell_do_map_iommu(iommu, 0x048a, | ||
366 | 0x20000000,0x20000000ul); | ||
367 | |||
368 | return 0; | ||
309 | } | 369 | } |
310 | 370 | ||
311 | 371 | ||
372 | static int cell_map_iommu(void) | ||
373 | { | ||
374 | unsigned int num_nodes = 0, *node_id; | ||
375 | unsigned long *base, *mmio_base; | ||
376 | struct device_node *dn; | ||
377 | struct cell_iommu *iommu = NULL; | ||
378 | |||
379 | /* determine number of nodes (=iommus) */ | ||
380 | pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__); | ||
381 | for(dn = of_find_node_by_type(NULL, "cpu"); | ||
382 | dn; | ||
383 | dn = of_find_node_by_type(dn, "cpu")) { | ||
384 | node_id = (unsigned int *)get_property(dn, "node-id", NULL); | ||
385 | |||
386 | if (num_nodes < *node_id) | ||
387 | num_nodes = *node_id; | ||
388 | } | ||
389 | |||
390 | num_nodes++; | ||
391 | pr_debug("%i found.\n", num_nodes); | ||
392 | |||
393 | /* map the iommu registers for each node */ | ||
394 | pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__); | ||
395 | for(dn = of_find_node_by_type(NULL, "cpu"); | ||
396 | dn; | ||
397 | dn = of_find_node_by_type(dn, "cpu")) { | ||
398 | |||
399 | node_id = (unsigned int *)get_property(dn, "node-id", NULL); | ||
400 | base = (unsigned long *)get_property(dn, "ioc-cache", NULL); | ||
401 | mmio_base = (unsigned long *)get_property(dn, "ioc-translation", NULL); | ||
402 | |||
403 | if (!base || !mmio_base || !node_id) | ||
404 | return cell_map_iommu_hardcoded(num_nodes); | ||
405 | |||
406 | iommu = &cell_iommus[*node_id]; | ||
407 | iommu->base = *base; | ||
408 | iommu->mmio_base = *mmio_base; | ||
409 | |||
410 | iommu->mapped_base = __ioremap(*base, 0x1000, _PAGE_NO_CACHE); | ||
411 | iommu->mapped_mmio_base = __ioremap(*mmio_base, 0x1000, _PAGE_NO_CACHE); | ||
412 | |||
413 | enable_mapping(iommu->mapped_base, | ||
414 | iommu->mapped_mmio_base); | ||
415 | |||
416 | /* everything else will be done in iommu_bus_setup */ | ||
417 | } | ||
418 | |||
419 | return 1; | ||
420 | } | ||
421 | |||
312 | static void *cell_alloc_coherent(struct device *hwdev, size_t size, | 422 | static void *cell_alloc_coherent(struct device *hwdev, size_t size, |
313 | dma_addr_t *dma_handle, gfp_t flag) | 423 | dma_addr_t *dma_handle, gfp_t flag) |
314 | { | 424 | { |
@@ -365,11 +475,28 @@ static int cell_dma_supported(struct device *dev, u64 mask) | |||
365 | 475 | ||
366 | void cell_init_iommu(void) | 476 | void cell_init_iommu(void) |
367 | { | 477 | { |
368 | cell_map_iommu(); | 478 | int setup_bus = 0; |
369 | 479 | ||
370 | /* Direct I/O, IOMMU off */ | 480 | if (of_find_node_by_path("/mambo")) { |
371 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | 481 | pr_info("Not using iommu on systemsim\n"); |
372 | ppc_md.iommu_bus_setup = iommu_bus_setup_null; | 482 | } else { |
483 | |||
484 | if (!(of_chosen && | ||
485 | get_property(of_chosen, "linux,iommu-off", NULL))) | ||
486 | setup_bus = cell_map_iommu(); | ||
487 | |||
488 | if (setup_bus) { | ||
489 | pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__); | ||
490 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | ||
491 | ppc_md.iommu_bus_setup = iommu_bus_setup; | ||
492 | } else { | ||
493 | pr_debug("%s: IOMMU mapping activated, " | ||
494 | "no device action necessary\n", __FUNCTION__); | ||
495 | /* Direct I/O, IOMMU off */ | ||
496 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | ||
497 | ppc_md.iommu_bus_setup = iommu_bus_setup_null; | ||
498 | } | ||
499 | } | ||
373 | 500 | ||
374 | pci_dma_ops.alloc_coherent = cell_alloc_coherent; | 501 | pci_dma_ops.alloc_coherent = cell_alloc_coherent; |
375 | pci_dma_ops.free_coherent = cell_free_coherent; | 502 | pci_dma_ops.free_coherent = cell_free_coherent; |