diff options
author | Kristoffer Glembo <kristoffer@gaisler.com> | 2011-01-17 23:10:28 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-03-16 21:18:59 -0400 |
commit | 7feee249d47f950a19ca142660ee41fe27b04b27 (patch) | |
tree | b0654f39f73835d866cc580102180b39f97ed8dd /arch/sparc/kernel | |
parent | 1b19274083d67f66ce3097c8d2bebc22b7911e3f (diff) |
sparc: Make mmu_inval_dma_area take void * instead of unsigned long to minimize casts.
Signed-off-by: Kristoffer Glembo <kristoffer@gaisler.com>
Acked-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r-- | arch/sparc/kernel/ioport.c | 53 |
1 files changed, 27 insertions, 26 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 815003b5fab6..bd4fb10748b1 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #ifndef CONFIG_SPARC_LEON | 53 | #ifndef CONFIG_SPARC_LEON |
54 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | 54 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ |
55 | #else | 55 | #else |
56 | static inline void mmu_inval_dma_area(unsigned long va, unsigned long len) | 56 | static inline void mmu_inval_dma_area(void *va, unsigned long len) |
57 | { | 57 | { |
58 | if (!sparc_leon3_snooping_enabled()) | 58 | if (!sparc_leon3_snooping_enabled()) |
59 | leon_flush_dcache_all(); | 59 | leon_flush_dcache_all(); |
@@ -284,7 +284,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, | |||
284 | printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); | 284 | printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); |
285 | goto err_nova; | 285 | goto err_nova; |
286 | } | 286 | } |
287 | mmu_inval_dma_area(va, len_total); | 287 | mmu_inval_dma_area((void *)va, len_total); |
288 | 288 | ||
289 | // XXX The mmu_map_dma_area does this for us below, see comments. | 289 | // XXX The mmu_map_dma_area does this for us below, see comments. |
290 | // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 290 | // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); |
@@ -435,7 +435,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len, | |||
435 | dma_addr_t *pba, gfp_t gfp) | 435 | dma_addr_t *pba, gfp_t gfp) |
436 | { | 436 | { |
437 | unsigned long len_total = PAGE_ALIGN(len); | 437 | unsigned long len_total = PAGE_ALIGN(len); |
438 | unsigned long va; | 438 | void *va; |
439 | struct resource *res; | 439 | struct resource *res; |
440 | int order; | 440 | int order; |
441 | 441 | ||
@@ -447,30 +447,34 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len, | |||
447 | } | 447 | } |
448 | 448 | ||
449 | order = get_order(len_total); | 449 | order = get_order(len_total); |
450 | va = __get_free_pages(GFP_KERNEL, order); | 450 | va = (void *) __get_free_pages(GFP_KERNEL, order); |
451 | if (va == 0) { | 451 | if (va == NULL) { |
452 | printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); | 452 | printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); |
453 | return NULL; | 453 | goto err_nopages; |
454 | } | 454 | } |
455 | 455 | ||
456 | if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { | 456 | if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { |
457 | free_pages(va, order); | ||
458 | printk("pci_alloc_consistent: no core\n"); | 457 | printk("pci_alloc_consistent: no core\n"); |
459 | return NULL; | 458 | goto err_nomem; |
460 | } | 459 | } |
461 | 460 | ||
462 | if (allocate_resource(&_sparc_dvma, res, len_total, | 461 | if (allocate_resource(&_sparc_dvma, res, len_total, |
463 | _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | 462 | _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { |
464 | printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); | 463 | printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); |
465 | free_pages(va, order); | 464 | goto err_nova; |
466 | kfree(res); | ||
467 | return NULL; | ||
468 | } | 465 | } |
469 | mmu_inval_dma_area(va, len_total); | 466 | mmu_inval_dma_area(va, len_total); |
470 | sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 467 | sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); |
471 | 468 | ||
472 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | 469 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ |
473 | return (void *) res->start; | 470 | return (void *) res->start; |
471 | |||
472 | err_nova: | ||
473 | kfree(res); | ||
474 | err_nomem: | ||
475 | free_pages((unsigned long)va, order); | ||
476 | err_nopages: | ||
477 | return NULL; | ||
474 | } | 478 | } |
475 | 479 | ||
476 | /* Free and unmap a consistent DMA buffer. | 480 | /* Free and unmap a consistent DMA buffer. |
@@ -485,7 +489,7 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, | |||
485 | dma_addr_t ba) | 489 | dma_addr_t ba) |
486 | { | 490 | { |
487 | struct resource *res; | 491 | struct resource *res; |
488 | unsigned long pgp; | 492 | void *pgp; |
489 | 493 | ||
490 | if ((res = _sparc_find_resource(&_sparc_dvma, | 494 | if ((res = _sparc_find_resource(&_sparc_dvma, |
491 | (unsigned long)p)) == NULL) { | 495 | (unsigned long)p)) == NULL) { |
@@ -505,14 +509,14 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, | |||
505 | return; | 509 | return; |
506 | } | 510 | } |
507 | 511 | ||
508 | pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ | 512 | pgp = phys_to_virt(ba); /* bus_to_virt actually */ |
509 | mmu_inval_dma_area(pgp, n); | 513 | mmu_inval_dma_area(pgp, n); |
510 | sparc_unmapiorange((unsigned long)p, n); | 514 | sparc_unmapiorange((unsigned long)p, n); |
511 | 515 | ||
512 | release_resource(res); | 516 | release_resource(res); |
513 | kfree(res); | 517 | kfree(res); |
514 | 518 | ||
515 | free_pages(pgp, get_order(n)); | 519 | free_pages((unsigned long)pgp, get_order(n)); |
516 | } | 520 | } |
517 | 521 | ||
518 | /* | 522 | /* |
@@ -531,7 +535,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, | |||
531 | enum dma_data_direction dir, struct dma_attrs *attrs) | 535 | enum dma_data_direction dir, struct dma_attrs *attrs) |
532 | { | 536 | { |
533 | if (dir != PCI_DMA_TODEVICE) | 537 | if (dir != PCI_DMA_TODEVICE) |
534 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), PAGE_ALIGN(size)); | 538 | mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size)); |
535 | } | 539 | } |
536 | 540 | ||
537 | /* Map a set of buffers described by scatterlist in streaming | 541 | /* Map a set of buffers described by scatterlist in streaming |
@@ -579,9 +583,8 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
579 | if (dir != PCI_DMA_TODEVICE) { | 583 | if (dir != PCI_DMA_TODEVICE) { |
580 | for_each_sg(sgl, sg, nents, n) { | 584 | for_each_sg(sgl, sg, nents, n) { |
581 | BUG_ON(page_address(sg_page(sg)) == NULL); | 585 | BUG_ON(page_address(sg_page(sg)) == NULL); |
582 | mmu_inval_dma_area( | 586 | mmu_inval_dma_area(page_address(sg_page(sg)), |
583 | (unsigned long) page_address(sg_page(sg)), | 587 | PAGE_ALIGN(sg->length)); |
584 | PAGE_ALIGN(sg->length)); | ||
585 | } | 588 | } |
586 | } | 589 | } |
587 | } | 590 | } |
@@ -600,7 +603,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, | |||
600 | size_t size, enum dma_data_direction dir) | 603 | size_t size, enum dma_data_direction dir) |
601 | { | 604 | { |
602 | if (dir != PCI_DMA_TODEVICE) { | 605 | if (dir != PCI_DMA_TODEVICE) { |
603 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 606 | mmu_inval_dma_area(phys_to_virt(ba), |
604 | PAGE_ALIGN(size)); | 607 | PAGE_ALIGN(size)); |
605 | } | 608 | } |
606 | } | 609 | } |
@@ -609,7 +612,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, | |||
609 | size_t size, enum dma_data_direction dir) | 612 | size_t size, enum dma_data_direction dir) |
610 | { | 613 | { |
611 | if (dir != PCI_DMA_TODEVICE) { | 614 | if (dir != PCI_DMA_TODEVICE) { |
612 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 615 | mmu_inval_dma_area(phys_to_virt(ba), |
613 | PAGE_ALIGN(size)); | 616 | PAGE_ALIGN(size)); |
614 | } | 617 | } |
615 | } | 618 | } |
@@ -629,9 +632,8 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, | |||
629 | if (dir != PCI_DMA_TODEVICE) { | 632 | if (dir != PCI_DMA_TODEVICE) { |
630 | for_each_sg(sgl, sg, nents, n) { | 633 | for_each_sg(sgl, sg, nents, n) { |
631 | BUG_ON(page_address(sg_page(sg)) == NULL); | 634 | BUG_ON(page_address(sg_page(sg)) == NULL); |
632 | mmu_inval_dma_area( | 635 | mmu_inval_dma_area(page_address(sg_page(sg)), |
633 | (unsigned long) page_address(sg_page(sg)), | 636 | PAGE_ALIGN(sg->length)); |
634 | PAGE_ALIGN(sg->length)); | ||
635 | } | 637 | } |
636 | } | 638 | } |
637 | } | 639 | } |
@@ -645,9 +647,8 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * | |||
645 | if (dir != PCI_DMA_TODEVICE) { | 647 | if (dir != PCI_DMA_TODEVICE) { |
646 | for_each_sg(sgl, sg, nents, n) { | 648 | for_each_sg(sgl, sg, nents, n) { |
647 | BUG_ON(page_address(sg_page(sg)) == NULL); | 649 | BUG_ON(page_address(sg_page(sg)) == NULL); |
648 | mmu_inval_dma_area( | 650 | mmu_inval_dma_area(page_address(sg_page(sg)), |
649 | (unsigned long) page_address(sg_page(sg)), | 651 | PAGE_ALIGN(sg->length)); |
650 | PAGE_ALIGN(sg->length)); | ||
651 | } | 652 | } |
652 | } | 653 | } |
653 | } | 654 | } |