aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/ioport.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r--arch/sparc/kernel/ioport.c190
1 files changed, 105 insertions, 85 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 87ea0d03d975..edbea232c617 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -48,8 +48,6 @@
48#include <asm/iommu.h> 48#include <asm/iommu.h>
49#include <asm/io-unit.h> 49#include <asm/io-unit.h>
50 50
51#include "dma.h"
52
53#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 51#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
54 52
55static struct resource *_sparc_find_resource(struct resource *r, 53static struct resource *_sparc_find_resource(struct resource *r,
@@ -246,7 +244,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
246 * Typically devices use them for control blocks. 244 * Typically devices use them for control blocks.
247 * CPU may access them without any explicit flushing. 245 * CPU may access them without any explicit flushing.
248 */ 246 */
249void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 247static void *sbus_alloc_coherent(struct device *dev, size_t len,
248 dma_addr_t *dma_addrp, gfp_t gfp)
250{ 249{
251 struct of_device *op = to_of_device(dev); 250 struct of_device *op = to_of_device(dev);
252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 251 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -299,7 +298,8 @@ err_nopages:
299 return NULL; 298 return NULL;
300} 299}
301 300
302void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 301static void sbus_free_coherent(struct device *dev, size_t n, void *p,
302 dma_addr_t ba)
303{ 303{
304 struct resource *res; 304 struct resource *res;
305 struct page *pgv; 305 struct page *pgv;
@@ -317,7 +317,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
317 317
318 n = (n + PAGE_SIZE-1) & PAGE_MASK; 318 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) { 319 if ((res->end-res->start)+1 != n) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 320 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
321 (long)((res->end-res->start)+1), n); 321 (long)((res->end-res->start)+1), n);
322 return; 322 return;
323 } 323 }
@@ -337,8 +337,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
337 * CPU view of this memory may be inconsistent with 337 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary. 338 * a device view and explicit flushing is necessary.
339 */ 339 */
340dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) 340static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
341 unsigned long offset, size_t len,
342 enum dma_data_direction dir,
343 struct dma_attrs *attrs)
341{ 344{
345 void *va = page_address(page) + offset;
346
342 /* XXX why are some lengths signed, others unsigned? */ 347 /* XXX why are some lengths signed, others unsigned? */
343 if (len <= 0) { 348 if (len <= 0) {
344 return 0; 349 return 0;
@@ -350,12 +355,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
350 return mmu_get_scsi_one(dev, va, len); 355 return mmu_get_scsi_one(dev, va, len);
351} 356}
352 357
353void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) 358static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
359 enum dma_data_direction dir, struct dma_attrs *attrs)
354{ 360{
355 mmu_release_scsi_one(dev, ba, n); 361 mmu_release_scsi_one(dev, ba, n);
356} 362}
357 363
358int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 364static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
365 enum dma_data_direction dir, struct dma_attrs *attrs)
359{ 366{
360 mmu_get_scsi_sgl(dev, sg, n); 367 mmu_get_scsi_sgl(dev, sg, n);
361 368
@@ -366,19 +373,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
366 return n; 373 return n;
367} 374}
368 375
369void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 376static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
377 enum dma_data_direction dir, struct dma_attrs *attrs)
370{ 378{
371 mmu_release_scsi_sgl(dev, sg, n); 379 mmu_release_scsi_sgl(dev, sg, n);
372} 380}
373 381
374void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 382static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
383 int n, enum dma_data_direction dir)
375{ 384{
385 BUG();
376} 386}
377 387
378void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 388static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
389 int n, enum dma_data_direction dir)
379{ 390{
391 BUG();
380} 392}
381 393
394struct dma_map_ops sbus_dma_ops = {
395 .alloc_coherent = sbus_alloc_coherent,
396 .free_coherent = sbus_free_coherent,
397 .map_page = sbus_map_page,
398 .unmap_page = sbus_unmap_page,
399 .map_sg = sbus_map_sg,
400 .unmap_sg = sbus_unmap_sg,
401 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
402 .sync_sg_for_device = sbus_sync_sg_for_device,
403};
404
405struct dma_map_ops *dma_ops = &sbus_dma_ops;
406EXPORT_SYMBOL(dma_ops);
407
382static int __init sparc_register_ioport(void) 408static int __init sparc_register_ioport(void)
383{ 409{
384 register_proc_sparc_ioport(); 410 register_proc_sparc_ioport();
@@ -395,7 +421,8 @@ arch_initcall(sparc_register_ioport);
395/* Allocate and map kernel buffer using consistent mode DMA for a device. 421/* Allocate and map kernel buffer using consistent mode DMA for a device.
396 * hwdev should be valid struct pci_dev pointer for PCI devices. 422 * hwdev should be valid struct pci_dev pointer for PCI devices.
397 */ 423 */
398void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 424static void *pci32_alloc_coherent(struct device *dev, size_t len,
425 dma_addr_t *pba, gfp_t gfp)
399{ 426{
400 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 427 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
401 unsigned long va; 428 unsigned long va;
@@ -439,7 +466,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
439 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 466 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
440 return (void *) res->start; 467 return (void *) res->start;
441} 468}
442EXPORT_SYMBOL(pci_alloc_consistent);
443 469
444/* Free and unmap a consistent DMA buffer. 470/* Free and unmap a consistent DMA buffer.
445 * cpu_addr is what was returned from pci_alloc_consistent, 471 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -449,7 +475,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
449 * References to the memory and mappings associated with cpu_addr/dma_addr 475 * References to the memory and mappings associated with cpu_addr/dma_addr
450 * past this call are illegal. 476 * past this call are illegal.
451 */ 477 */
452void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 478static void pci32_free_coherent(struct device *dev, size_t n, void *p,
479 dma_addr_t ba)
453{ 480{
454 struct resource *res; 481 struct resource *res;
455 unsigned long pgp; 482 unsigned long pgp;
@@ -481,60 +508,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
481 508
482 free_pages(pgp, get_order(n)); 509 free_pages(pgp, get_order(n));
483} 510}
484EXPORT_SYMBOL(pci_free_consistent);
485
486/* Map a single buffer of the indicated size for DMA in streaming mode.
487 * The 32-bit bus address to use is returned.
488 *
489 * Once the device is given the dma address, the device owns this memory
490 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
491 */
492dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
493 int direction)
494{
495 BUG_ON(direction == PCI_DMA_NONE);
496 /* IIep is write-through, not flushing. */
497 return virt_to_phys(ptr);
498}
499EXPORT_SYMBOL(pci_map_single);
500
501/* Unmap a single streaming mode DMA translation. The dma_addr and size
502 * must match what was provided for in a previous pci_map_single call. All
503 * other usages are undefined.
504 *
505 * After this call, reads by the cpu to the buffer are guaranteed to see
506 * whatever the device wrote there.
507 */
508void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
509 int direction)
510{
511 BUG_ON(direction == PCI_DMA_NONE);
512 if (direction != PCI_DMA_TODEVICE) {
513 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
514 (size + PAGE_SIZE-1) & PAGE_MASK);
515 }
516}
517EXPORT_SYMBOL(pci_unmap_single);
518 511
519/* 512/*
520 * Same as pci_map_single, but with pages. 513 * Same as pci_map_single, but with pages.
521 */ 514 */
522dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 515static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
523 unsigned long offset, size_t size, int direction) 516 unsigned long offset, size_t size,
517 enum dma_data_direction dir,
518 struct dma_attrs *attrs)
524{ 519{
525 BUG_ON(direction == PCI_DMA_NONE);
526 /* IIep is write-through, not flushing. */ 520 /* IIep is write-through, not flushing. */
527 return page_to_phys(page) + offset; 521 return page_to_phys(page) + offset;
528} 522}
529EXPORT_SYMBOL(pci_map_page);
530
531void pci_unmap_page(struct pci_dev *hwdev,
532 dma_addr_t dma_address, size_t size, int direction)
533{
534 BUG_ON(direction == PCI_DMA_NONE);
535 /* mmu_inval_dma_area XXX */
536}
537EXPORT_SYMBOL(pci_unmap_page);
538 523
539/* Map a set of buffers described by scatterlist in streaming 524/* Map a set of buffers described by scatterlist in streaming
540 * mode for DMA. This is the scather-gather version of the 525 * mode for DMA. This is the scather-gather version of the
@@ -551,13 +536,13 @@ EXPORT_SYMBOL(pci_unmap_page);
551 * Device ownership issues as mentioned above for pci_map_single are 536 * Device ownership issues as mentioned above for pci_map_single are
552 * the same here. 537 * the same here.
553 */ 538 */
554int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 539static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
555 int direction) 540 int nents, enum dma_data_direction dir,
541 struct dma_attrs *attrs)
556{ 542{
557 struct scatterlist *sg; 543 struct scatterlist *sg;
558 int n; 544 int n;
559 545
560 BUG_ON(direction == PCI_DMA_NONE);
561 /* IIep is write-through, not flushing. */ 546 /* IIep is write-through, not flushing. */
562 for_each_sg(sgl, sg, nents, n) { 547 for_each_sg(sgl, sg, nents, n) {
563 BUG_ON(page_address(sg_page(sg)) == NULL); 548 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -566,20 +551,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
566 } 551 }
567 return nents; 552 return nents;
568} 553}
569EXPORT_SYMBOL(pci_map_sg);
570 554
571/* Unmap a set of streaming mode DMA translations. 555/* Unmap a set of streaming mode DMA translations.
572 * Again, cpu read rules concerning calls here are the same as for 556 * Again, cpu read rules concerning calls here are the same as for
573 * pci_unmap_single() above. 557 * pci_unmap_single() above.
574 */ 558 */
575void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 559static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
576 int direction) 560 int nents, enum dma_data_direction dir,
561 struct dma_attrs *attrs)
577{ 562{
578 struct scatterlist *sg; 563 struct scatterlist *sg;
579 int n; 564 int n;
580 565
581 BUG_ON(direction == PCI_DMA_NONE); 566 if (dir != PCI_DMA_TODEVICE) {
582 if (direction != PCI_DMA_TODEVICE) {
583 for_each_sg(sgl, sg, nents, n) { 567 for_each_sg(sgl, sg, nents, n) {
584 BUG_ON(page_address(sg_page(sg)) == NULL); 568 BUG_ON(page_address(sg_page(sg)) == NULL);
585 mmu_inval_dma_area( 569 mmu_inval_dma_area(
@@ -588,7 +572,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
588 } 572 }
589 } 573 }
590} 574}
591EXPORT_SYMBOL(pci_unmap_sg);
592 575
593/* Make physical memory consistent for a single 576/* Make physical memory consistent for a single
594 * streaming mode DMA translation before or after a transfer. 577 * streaming mode DMA translation before or after a transfer.
@@ -600,25 +583,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
600 * must first perform a pci_dma_sync_for_device, and then the 583 * must first perform a pci_dma_sync_for_device, and then the
601 * device again owns the buffer. 584 * device again owns the buffer.
602 */ 585 */
603void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 586static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
587 size_t size, enum dma_data_direction dir)
604{ 588{
605 BUG_ON(direction == PCI_DMA_NONE); 589 if (dir != PCI_DMA_TODEVICE) {
606 if (direction != PCI_DMA_TODEVICE) {
607 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 590 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
608 (size + PAGE_SIZE-1) & PAGE_MASK); 591 (size + PAGE_SIZE-1) & PAGE_MASK);
609 } 592 }
610} 593}
611EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
612 594
613void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 595static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
596 size_t size, enum dma_data_direction dir)
614{ 597{
615 BUG_ON(direction == PCI_DMA_NONE); 598 if (dir != PCI_DMA_TODEVICE) {
616 if (direction != PCI_DMA_TODEVICE) {
617 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 599 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
618 (size + PAGE_SIZE-1) & PAGE_MASK); 600 (size + PAGE_SIZE-1) & PAGE_MASK);
619 } 601 }
620} 602}
621EXPORT_SYMBOL(pci_dma_sync_single_for_device);
622 603
623/* Make physical memory consistent for a set of streaming 604/* Make physical memory consistent for a set of streaming
624 * mode DMA translations after a transfer. 605 * mode DMA translations after a transfer.
@@ -626,13 +607,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
626 * The same as pci_dma_sync_single_* but for a scatter-gather list, 607 * The same as pci_dma_sync_single_* but for a scatter-gather list,
627 * same rules and usage. 608 * same rules and usage.
628 */ 609 */
629void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 610static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
611 int nents, enum dma_data_direction dir)
630{ 612{
631 struct scatterlist *sg; 613 struct scatterlist *sg;
632 int n; 614 int n;
633 615
634 BUG_ON(direction == PCI_DMA_NONE); 616 if (dir != PCI_DMA_TODEVICE) {
635 if (direction != PCI_DMA_TODEVICE) {
636 for_each_sg(sgl, sg, nents, n) { 617 for_each_sg(sgl, sg, nents, n) {
637 BUG_ON(page_address(sg_page(sg)) == NULL); 618 BUG_ON(page_address(sg_page(sg)) == NULL);
638 mmu_inval_dma_area( 619 mmu_inval_dma_area(
@@ -641,15 +622,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
641 } 622 }
642 } 623 }
643} 624}
644EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
645 625
646void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 626static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
627 int nents, enum dma_data_direction dir)
647{ 628{
648 struct scatterlist *sg; 629 struct scatterlist *sg;
649 int n; 630 int n;
650 631
651 BUG_ON(direction == PCI_DMA_NONE); 632 if (dir != PCI_DMA_TODEVICE) {
652 if (direction != PCI_DMA_TODEVICE) {
653 for_each_sg(sgl, sg, nents, n) { 633 for_each_sg(sgl, sg, nents, n) {
654 BUG_ON(page_address(sg_page(sg)) == NULL); 634 BUG_ON(page_address(sg_page(sg)) == NULL);
655 mmu_inval_dma_area( 635 mmu_inval_dma_area(
@@ -658,9 +638,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
658 } 638 }
659 } 639 }
660} 640}
661EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 641
642struct dma_map_ops pci32_dma_ops = {
643 .alloc_coherent = pci32_alloc_coherent,
644 .free_coherent = pci32_free_coherent,
645 .map_page = pci32_map_page,
646 .map_sg = pci32_map_sg,
647 .unmap_sg = pci32_unmap_sg,
648 .sync_single_for_cpu = pci32_sync_single_for_cpu,
649 .sync_single_for_device = pci32_sync_single_for_device,
650 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
651 .sync_sg_for_device = pci32_sync_sg_for_device,
652};
653EXPORT_SYMBOL(pci32_dma_ops);
654
662#endif /* CONFIG_PCI */ 655#endif /* CONFIG_PCI */
663 656
657/*
658 * Return whether the given PCI device DMA address mask can be
659 * supported properly. For example, if your device can only drive the
660 * low 24-bits during PCI bus mastering, then you would pass
661 * 0x00ffffff as the mask to this function.
662 */
663int dma_supported(struct device *dev, u64 mask)
664{
665#ifdef CONFIG_PCI
666 if (dev->bus == &pci_bus_type)
667 return 1;
668#endif
669 return 0;
670}
671EXPORT_SYMBOL(dma_supported);
672
673int dma_set_mask(struct device *dev, u64 dma_mask)
674{
675#ifdef CONFIG_PCI
676 if (dev->bus == &pci_bus_type)
677 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
678#endif
679 return -EOPNOTSUPP;
680}
681EXPORT_SYMBOL(dma_set_mask);
682
683
664#ifdef CONFIG_PROC_FS 684#ifdef CONFIG_PROC_FS
665 685
666static int 686static int