diff options
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r-- | arch/sparc/kernel/ioport.c | 42 |
1 files changed, 16 insertions, 26 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index c6ce9a6a4790..1c9c80a1a86a 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -50,10 +50,15 @@ | |||
50 | #include <asm/io-unit.h> | 50 | #include <asm/io-unit.h> |
51 | #include <asm/leon.h> | 51 | #include <asm/leon.h> |
52 | 52 | ||
53 | /* This function must make sure that caches and memory are coherent after DMA | ||
54 | * On LEON systems without cache snooping it flushes the entire D-CACHE. | ||
55 | */ | ||
53 | #ifndef CONFIG_SPARC_LEON | 56 | #ifndef CONFIG_SPARC_LEON |
54 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | 57 | static inline void dma_make_coherent(unsigned long pa, unsigned long len) |
58 | { | ||
59 | } | ||
55 | #else | 60 | #else |
56 | static inline void mmu_inval_dma_area(void *va, unsigned long len) | 61 | static inline void dma_make_coherent(unsigned long pa, unsigned long len) |
57 | { | 62 | { |
58 | if (!sparc_leon3_snooping_enabled()) | 63 | if (!sparc_leon3_snooping_enabled()) |
59 | leon_flush_dcache_all(); | 64 | leon_flush_dcache_all(); |
@@ -284,7 +289,6 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, | |||
284 | printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); | 289 | printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); |
285 | goto err_nova; | 290 | goto err_nova; |
286 | } | 291 | } |
287 | mmu_inval_dma_area((void *)va, len_total); | ||
288 | 292 | ||
289 | // XXX The mmu_map_dma_area does this for us below, see comments. | 293 | // XXX The mmu_map_dma_area does this for us below, see comments. |
290 | // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 294 | // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); |
@@ -336,7 +340,6 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, | |||
336 | release_resource(res); | 340 | release_resource(res); |
337 | kfree(res); | 341 | kfree(res); |
338 | 342 | ||
339 | /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ | ||
340 | pgv = virt_to_page(p); | 343 | pgv = virt_to_page(p); |
341 | mmu_unmap_dma_area(dev, ba, n); | 344 | mmu_unmap_dma_area(dev, ba, n); |
342 | 345 | ||
@@ -463,7 +466,6 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len, | |||
463 | printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); | 466 | printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); |
464 | goto err_nova; | 467 | goto err_nova; |
465 | } | 468 | } |
466 | mmu_inval_dma_area(va, len_total); | ||
467 | sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 469 | sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); |
468 | 470 | ||
469 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | 471 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ |
@@ -489,7 +491,6 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, | |||
489 | dma_addr_t ba) | 491 | dma_addr_t ba) |
490 | { | 492 | { |
491 | struct resource *res; | 493 | struct resource *res; |
492 | void *pgp; | ||
493 | 494 | ||
494 | if ((res = _sparc_find_resource(&_sparc_dvma, | 495 | if ((res = _sparc_find_resource(&_sparc_dvma, |
495 | (unsigned long)p)) == NULL) { | 496 | (unsigned long)p)) == NULL) { |
@@ -509,14 +510,12 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, | |||
509 | return; | 510 | return; |
510 | } | 511 | } |
511 | 512 | ||
512 | pgp = phys_to_virt(ba); /* bus_to_virt actually */ | 513 | dma_make_coherent(ba, n); |
513 | mmu_inval_dma_area(pgp, n); | ||
514 | sparc_unmapiorange((unsigned long)p, n); | 514 | sparc_unmapiorange((unsigned long)p, n); |
515 | 515 | ||
516 | release_resource(res); | 516 | release_resource(res); |
517 | kfree(res); | 517 | kfree(res); |
518 | 518 | free_pages((unsigned long)phys_to_virt(ba), get_order(n)); | |
519 | free_pages((unsigned long)pgp, get_order(n)); | ||
520 | } | 519 | } |
521 | 520 | ||
522 | /* | 521 | /* |
@@ -535,7 +534,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, | |||
535 | enum dma_data_direction dir, struct dma_attrs *attrs) | 534 | enum dma_data_direction dir, struct dma_attrs *attrs) |
536 | { | 535 | { |
537 | if (dir != PCI_DMA_TODEVICE) | 536 | if (dir != PCI_DMA_TODEVICE) |
538 | mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size)); | 537 | dma_make_coherent(ba, PAGE_ALIGN(size)); |
539 | } | 538 | } |
540 | 539 | ||
541 | /* Map a set of buffers described by scatterlist in streaming | 540 | /* Map a set of buffers described by scatterlist in streaming |
@@ -562,8 +561,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl, | |||
562 | 561 | ||
563 | /* IIep is write-through, not flushing. */ | 562 | /* IIep is write-through, not flushing. */ |
564 | for_each_sg(sgl, sg, nents, n) { | 563 | for_each_sg(sgl, sg, nents, n) { |
565 | BUG_ON(page_address(sg_page(sg)) == NULL); | 564 | sg->dma_address = sg_phys(sg); |
566 | sg->dma_address = virt_to_phys(sg_virt(sg)); | ||
567 | sg->dma_length = sg->length; | 565 | sg->dma_length = sg->length; |
568 | } | 566 | } |
569 | return nents; | 567 | return nents; |
@@ -582,9 +580,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
582 | 580 | ||
583 | if (dir != PCI_DMA_TODEVICE) { | 581 | if (dir != PCI_DMA_TODEVICE) { |
584 | for_each_sg(sgl, sg, nents, n) { | 582 | for_each_sg(sgl, sg, nents, n) { |
585 | BUG_ON(page_address(sg_page(sg)) == NULL); | 583 | dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); |
586 | mmu_inval_dma_area(page_address(sg_page(sg)), | ||
587 | PAGE_ALIGN(sg->length)); | ||
588 | } | 584 | } |
589 | } | 585 | } |
590 | } | 586 | } |
@@ -603,8 +599,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, | |||
603 | size_t size, enum dma_data_direction dir) | 599 | size_t size, enum dma_data_direction dir) |
604 | { | 600 | { |
605 | if (dir != PCI_DMA_TODEVICE) { | 601 | if (dir != PCI_DMA_TODEVICE) { |
606 | mmu_inval_dma_area(phys_to_virt(ba), | 602 | dma_make_coherent(ba, PAGE_ALIGN(size)); |
607 | PAGE_ALIGN(size)); | ||
608 | } | 603 | } |
609 | } | 604 | } |
610 | 605 | ||
@@ -612,8 +607,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, | |||
612 | size_t size, enum dma_data_direction dir) | 607 | size_t size, enum dma_data_direction dir) |
613 | { | 608 | { |
614 | if (dir != PCI_DMA_TODEVICE) { | 609 | if (dir != PCI_DMA_TODEVICE) { |
615 | mmu_inval_dma_area(phys_to_virt(ba), | 610 | dma_make_coherent(ba, PAGE_ALIGN(size)); |
616 | PAGE_ALIGN(size)); | ||
617 | } | 611 | } |
618 | } | 612 | } |
619 | 613 | ||
@@ -631,9 +625,7 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, | |||
631 | 625 | ||
632 | if (dir != PCI_DMA_TODEVICE) { | 626 | if (dir != PCI_DMA_TODEVICE) { |
633 | for_each_sg(sgl, sg, nents, n) { | 627 | for_each_sg(sgl, sg, nents, n) { |
634 | BUG_ON(page_address(sg_page(sg)) == NULL); | 628 | dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); |
635 | mmu_inval_dma_area(page_address(sg_page(sg)), | ||
636 | PAGE_ALIGN(sg->length)); | ||
637 | } | 629 | } |
638 | } | 630 | } |
639 | } | 631 | } |
@@ -646,9 +638,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * | |||
646 | 638 | ||
647 | if (dir != PCI_DMA_TODEVICE) { | 639 | if (dir != PCI_DMA_TODEVICE) { |
648 | for_each_sg(sgl, sg, nents, n) { | 640 | for_each_sg(sgl, sg, nents, n) { |
649 | BUG_ON(page_address(sg_page(sg)) == NULL); | 641 | dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); |
650 | mmu_inval_dma_area(page_address(sg_page(sg)), | ||
651 | PAGE_ALIGN(sg->length)); | ||
652 | } | 642 | } |
653 | } | 643 | } |
654 | } | 644 | } |