diff options
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r-- | arch/sparc/kernel/ioport.c | 116 |
1 files changed, 62 insertions, 54 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 41f7e4e0f72a..1c9c80a1a86a 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -50,10 +50,19 @@ | |||
50 | #include <asm/io-unit.h> | 50 | #include <asm/io-unit.h> |
51 | #include <asm/leon.h> | 51 | #include <asm/leon.h> |
52 | 52 | ||
53 | #ifdef CONFIG_SPARC_LEON | 53 | /* This function must make sure that caches and memory are coherent after DMA |
54 | #define mmu_inval_dma_area(p, l) leon_flush_dcache_all() | 54 | * On LEON systems without cache snooping it flushes the entire D-CACHE. |
55 | */ | ||
56 | #ifndef CONFIG_SPARC_LEON | ||
57 | static inline void dma_make_coherent(unsigned long pa, unsigned long len) | ||
58 | { | ||
59 | } | ||
55 | #else | 60 | #else |
56 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | 61 | static inline void dma_make_coherent(unsigned long pa, unsigned long len) |
62 | { | ||
63 | if (!sparc_leon3_snooping_enabled()) | ||
64 | leon_flush_dcache_all(); | ||
65 | } | ||
57 | #endif | 66 | #endif |
58 | 67 | ||
59 | static struct resource *_sparc_find_resource(struct resource *r, | 68 | static struct resource *_sparc_find_resource(struct resource *r, |
@@ -254,7 +263,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, | |||
254 | dma_addr_t *dma_addrp, gfp_t gfp) | 263 | dma_addr_t *dma_addrp, gfp_t gfp) |
255 | { | 264 | { |
256 | struct platform_device *op = to_platform_device(dev); | 265 | struct platform_device *op = to_platform_device(dev); |
257 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 266 | unsigned long len_total = PAGE_ALIGN(len); |
258 | unsigned long va; | 267 | unsigned long va; |
259 | struct resource *res; | 268 | struct resource *res; |
260 | int order; | 269 | int order; |
@@ -280,7 +289,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, | |||
280 | printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); | 289 | printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); |
281 | goto err_nova; | 290 | goto err_nova; |
282 | } | 291 | } |
283 | mmu_inval_dma_area(va, len_total); | 292 | |
284 | // XXX The mmu_map_dma_area does this for us below, see comments. | 293 | // XXX The mmu_map_dma_area does this for us below, see comments. |
285 | // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 294 | // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); |
286 | /* | 295 | /* |
@@ -297,9 +306,9 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, | |||
297 | err_noiommu: | 306 | err_noiommu: |
298 | release_resource(res); | 307 | release_resource(res); |
299 | err_nova: | 308 | err_nova: |
300 | free_pages(va, order); | ||
301 | err_nomem: | ||
302 | kfree(res); | 309 | kfree(res); |
310 | err_nomem: | ||
311 | free_pages(va, order); | ||
303 | err_nopages: | 312 | err_nopages: |
304 | return NULL; | 313 | return NULL; |
305 | } | 314 | } |
@@ -321,7 +330,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, | |||
321 | return; | 330 | return; |
322 | } | 331 | } |
323 | 332 | ||
324 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | 333 | n = PAGE_ALIGN(n); |
325 | if ((res->end-res->start)+1 != n) { | 334 | if ((res->end-res->start)+1 != n) { |
326 | printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", | 335 | printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", |
327 | (long)((res->end-res->start)+1), n); | 336 | (long)((res->end-res->start)+1), n); |
@@ -331,7 +340,6 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, | |||
331 | release_resource(res); | 340 | release_resource(res); |
332 | kfree(res); | 341 | kfree(res); |
333 | 342 | ||
334 | /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ | ||
335 | pgv = virt_to_page(p); | 343 | pgv = virt_to_page(p); |
336 | mmu_unmap_dma_area(dev, ba, n); | 344 | mmu_unmap_dma_area(dev, ba, n); |
337 | 345 | ||
@@ -408,9 +416,6 @@ struct dma_map_ops sbus_dma_ops = { | |||
408 | .sync_sg_for_device = sbus_sync_sg_for_device, | 416 | .sync_sg_for_device = sbus_sync_sg_for_device, |
409 | }; | 417 | }; |
410 | 418 | ||
411 | struct dma_map_ops *dma_ops = &sbus_dma_ops; | ||
412 | EXPORT_SYMBOL(dma_ops); | ||
413 | |||
414 | static int __init sparc_register_ioport(void) | 419 | static int __init sparc_register_ioport(void) |
415 | { | 420 | { |
416 | register_proc_sparc_ioport(); | 421 | register_proc_sparc_ioport(); |
@@ -422,7 +427,9 @@ arch_initcall(sparc_register_ioport); | |||
422 | 427 | ||
423 | #endif /* CONFIG_SBUS */ | 428 | #endif /* CONFIG_SBUS */ |
424 | 429 | ||
425 | #ifdef CONFIG_PCI | 430 | |
431 | /* LEON reuses PCI DMA ops */ | ||
432 | #if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON) | ||
426 | 433 | ||
427 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 434 | /* Allocate and map kernel buffer using consistent mode DMA for a device. |
428 | * hwdev should be valid struct pci_dev pointer for PCI devices. | 435 | * hwdev should be valid struct pci_dev pointer for PCI devices. |
@@ -430,8 +437,8 @@ arch_initcall(sparc_register_ioport); | |||
430 | static void *pci32_alloc_coherent(struct device *dev, size_t len, | 437 | static void *pci32_alloc_coherent(struct device *dev, size_t len, |
431 | dma_addr_t *pba, gfp_t gfp) | 438 | dma_addr_t *pba, gfp_t gfp) |
432 | { | 439 | { |
433 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 440 | unsigned long len_total = PAGE_ALIGN(len); |
434 | unsigned long va; | 441 | void *va; |
435 | struct resource *res; | 442 | struct resource *res; |
436 | int order; | 443 | int order; |
437 | 444 | ||
@@ -443,34 +450,33 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len, | |||
443 | } | 450 | } |
444 | 451 | ||
445 | order = get_order(len_total); | 452 | order = get_order(len_total); |
446 | va = __get_free_pages(GFP_KERNEL, order); | 453 | va = (void *) __get_free_pages(GFP_KERNEL, order); |
447 | if (va == 0) { | 454 | if (va == NULL) { |
448 | printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); | 455 | printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); |
449 | return NULL; | 456 | goto err_nopages; |
450 | } | 457 | } |
451 | 458 | ||
452 | if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { | 459 | if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { |
453 | free_pages(va, order); | ||
454 | printk("pci_alloc_consistent: no core\n"); | 460 | printk("pci_alloc_consistent: no core\n"); |
455 | return NULL; | 461 | goto err_nomem; |
456 | } | 462 | } |
457 | 463 | ||
458 | if (allocate_resource(&_sparc_dvma, res, len_total, | 464 | if (allocate_resource(&_sparc_dvma, res, len_total, |
459 | _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | 465 | _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { |
460 | printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); | 466 | printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); |
461 | free_pages(va, order); | 467 | goto err_nova; |
462 | kfree(res); | ||
463 | return NULL; | ||
464 | } | 468 | } |
465 | mmu_inval_dma_area(va, len_total); | ||
466 | #if 0 | ||
467 | /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n", | ||
468 | (long)va, (long)res->start, (long)virt_to_phys(va), len_total); | ||
469 | #endif | ||
470 | sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 469 | sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); |
471 | 470 | ||
472 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | 471 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ |
473 | return (void *) res->start; | 472 | return (void *) res->start; |
473 | |||
474 | err_nova: | ||
475 | kfree(res); | ||
476 | err_nomem: | ||
477 | free_pages((unsigned long)va, order); | ||
478 | err_nopages: | ||
479 | return NULL; | ||
474 | } | 480 | } |
475 | 481 | ||
476 | /* Free and unmap a consistent DMA buffer. | 482 | /* Free and unmap a consistent DMA buffer. |
@@ -485,7 +491,6 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, | |||
485 | dma_addr_t ba) | 491 | dma_addr_t ba) |
486 | { | 492 | { |
487 | struct resource *res; | 493 | struct resource *res; |
488 | unsigned long pgp; | ||
489 | 494 | ||
490 | if ((res = _sparc_find_resource(&_sparc_dvma, | 495 | if ((res = _sparc_find_resource(&_sparc_dvma, |
491 | (unsigned long)p)) == NULL) { | 496 | (unsigned long)p)) == NULL) { |
@@ -498,21 +503,19 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, | |||
498 | return; | 503 | return; |
499 | } | 504 | } |
500 | 505 | ||
501 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | 506 | n = PAGE_ALIGN(n); |
502 | if ((res->end-res->start)+1 != n) { | 507 | if ((res->end-res->start)+1 != n) { |
503 | printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", | 508 | printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", |
504 | (long)((res->end-res->start)+1), (long)n); | 509 | (long)((res->end-res->start)+1), (long)n); |
505 | return; | 510 | return; |
506 | } | 511 | } |
507 | 512 | ||
508 | pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ | 513 | dma_make_coherent(ba, n); |
509 | mmu_inval_dma_area(pgp, n); | ||
510 | sparc_unmapiorange((unsigned long)p, n); | 514 | sparc_unmapiorange((unsigned long)p, n); |
511 | 515 | ||
512 | release_resource(res); | 516 | release_resource(res); |
513 | kfree(res); | 517 | kfree(res); |
514 | 518 | free_pages((unsigned long)phys_to_virt(ba), get_order(n)); | |
515 | free_pages(pgp, get_order(n)); | ||
516 | } | 519 | } |
517 | 520 | ||
518 | /* | 521 | /* |
@@ -527,6 +530,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page, | |||
527 | return page_to_phys(page) + offset; | 530 | return page_to_phys(page) + offset; |
528 | } | 531 | } |
529 | 532 | ||
533 | static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, | ||
534 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
535 | { | ||
536 | if (dir != PCI_DMA_TODEVICE) | ||
537 | dma_make_coherent(ba, PAGE_ALIGN(size)); | ||
538 | } | ||
539 | |||
530 | /* Map a set of buffers described by scatterlist in streaming | 540 | /* Map a set of buffers described by scatterlist in streaming |
531 | * mode for DMA. This is the scather-gather version of the | 541 | * mode for DMA. This is the scather-gather version of the |
532 | * above pci_map_single interface. Here the scatter gather list | 542 | * above pci_map_single interface. Here the scatter gather list |
@@ -551,8 +561,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl, | |||
551 | 561 | ||
552 | /* IIep is write-through, not flushing. */ | 562 | /* IIep is write-through, not flushing. */ |
553 | for_each_sg(sgl, sg, nents, n) { | 563 | for_each_sg(sgl, sg, nents, n) { |
554 | BUG_ON(page_address(sg_page(sg)) == NULL); | 564 | sg->dma_address = sg_phys(sg); |
555 | sg->dma_address = virt_to_phys(sg_virt(sg)); | ||
556 | sg->dma_length = sg->length; | 565 | sg->dma_length = sg->length; |
557 | } | 566 | } |
558 | return nents; | 567 | return nents; |
@@ -571,10 +580,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
571 | 580 | ||
572 | if (dir != PCI_DMA_TODEVICE) { | 581 | if (dir != PCI_DMA_TODEVICE) { |
573 | for_each_sg(sgl, sg, nents, n) { | 582 | for_each_sg(sgl, sg, nents, n) { |
574 | BUG_ON(page_address(sg_page(sg)) == NULL); | 583 | dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); |
575 | mmu_inval_dma_area( | ||
576 | (unsigned long) page_address(sg_page(sg)), | ||
577 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | ||
578 | } | 584 | } |
579 | } | 585 | } |
580 | } | 586 | } |
@@ -593,8 +599,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, | |||
593 | size_t size, enum dma_data_direction dir) | 599 | size_t size, enum dma_data_direction dir) |
594 | { | 600 | { |
595 | if (dir != PCI_DMA_TODEVICE) { | 601 | if (dir != PCI_DMA_TODEVICE) { |
596 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 602 | dma_make_coherent(ba, PAGE_ALIGN(size)); |
597 | (size + PAGE_SIZE-1) & PAGE_MASK); | ||
598 | } | 603 | } |
599 | } | 604 | } |
600 | 605 | ||
@@ -602,8 +607,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, | |||
602 | size_t size, enum dma_data_direction dir) | 607 | size_t size, enum dma_data_direction dir) |
603 | { | 608 | { |
604 | if (dir != PCI_DMA_TODEVICE) { | 609 | if (dir != PCI_DMA_TODEVICE) { |
605 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 610 | dma_make_coherent(ba, PAGE_ALIGN(size)); |
606 | (size + PAGE_SIZE-1) & PAGE_MASK); | ||
607 | } | 611 | } |
608 | } | 612 | } |
609 | 613 | ||
@@ -621,10 +625,7 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, | |||
621 | 625 | ||
622 | if (dir != PCI_DMA_TODEVICE) { | 626 | if (dir != PCI_DMA_TODEVICE) { |
623 | for_each_sg(sgl, sg, nents, n) { | 627 | for_each_sg(sgl, sg, nents, n) { |
624 | BUG_ON(page_address(sg_page(sg)) == NULL); | 628 | dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); |
625 | mmu_inval_dma_area( | ||
626 | (unsigned long) page_address(sg_page(sg)), | ||
627 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | ||
628 | } | 629 | } |
629 | } | 630 | } |
630 | } | 631 | } |
@@ -637,10 +638,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * | |||
637 | 638 | ||
638 | if (dir != PCI_DMA_TODEVICE) { | 639 | if (dir != PCI_DMA_TODEVICE) { |
639 | for_each_sg(sgl, sg, nents, n) { | 640 | for_each_sg(sgl, sg, nents, n) { |
640 | BUG_ON(page_address(sg_page(sg)) == NULL); | 641 | dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); |
641 | mmu_inval_dma_area( | ||
642 | (unsigned long) page_address(sg_page(sg)), | ||
643 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | ||
644 | } | 642 | } |
645 | } | 643 | } |
646 | } | 644 | } |
@@ -649,6 +647,7 @@ struct dma_map_ops pci32_dma_ops = { | |||
649 | .alloc_coherent = pci32_alloc_coherent, | 647 | .alloc_coherent = pci32_alloc_coherent, |
650 | .free_coherent = pci32_free_coherent, | 648 | .free_coherent = pci32_free_coherent, |
651 | .map_page = pci32_map_page, | 649 | .map_page = pci32_map_page, |
650 | .unmap_page = pci32_unmap_page, | ||
652 | .map_sg = pci32_map_sg, | 651 | .map_sg = pci32_map_sg, |
653 | .unmap_sg = pci32_unmap_sg, | 652 | .unmap_sg = pci32_unmap_sg, |
654 | .sync_single_for_cpu = pci32_sync_single_for_cpu, | 653 | .sync_single_for_cpu = pci32_sync_single_for_cpu, |
@@ -658,7 +657,16 @@ struct dma_map_ops pci32_dma_ops = { | |||
658 | }; | 657 | }; |
659 | EXPORT_SYMBOL(pci32_dma_ops); | 658 | EXPORT_SYMBOL(pci32_dma_ops); |
660 | 659 | ||
661 | #endif /* CONFIG_PCI */ | 660 | #endif /* CONFIG_PCI || CONFIG_SPARC_LEON */ |
661 | |||
662 | #ifdef CONFIG_SPARC_LEON | ||
663 | struct dma_map_ops *dma_ops = &pci32_dma_ops; | ||
664 | #elif defined(CONFIG_SBUS) | ||
665 | struct dma_map_ops *dma_ops = &sbus_dma_ops; | ||
666 | #endif | ||
667 | |||
668 | EXPORT_SYMBOL(dma_ops); | ||
669 | |||
662 | 670 | ||
663 | /* | 671 | /* |
664 | * Return whether the given PCI device DMA address mask can be | 672 | * Return whether the given PCI device DMA address mask can be |
@@ -717,7 +725,7 @@ static const struct file_operations sparc_io_proc_fops = { | |||
717 | static struct resource *_sparc_find_resource(struct resource *root, | 725 | static struct resource *_sparc_find_resource(struct resource *root, |
718 | unsigned long hit) | 726 | unsigned long hit) |
719 | { | 727 | { |
720 | struct resource *tmp; | 728 | struct resource *tmp; |
721 | 729 | ||
722 | for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { | 730 | for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { |
723 | if (tmp->start <= hit && tmp->end >= hit) | 731 | if (tmp->start <= hit && tmp->end >= hit) |