aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/ioport.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r--arch/sparc/kernel/ioport.c222
1 files changed, 125 insertions, 97 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 87ea0d03d975..9f61fd8cbb7b 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */ 36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/seq_file.h>
38#include <linux/scatterlist.h> 39#include <linux/scatterlist.h>
39#include <linux/of_device.h> 40#include <linux/of_device.h>
40 41
@@ -48,8 +49,6 @@
48#include <asm/iommu.h> 49#include <asm/iommu.h>
49#include <asm/io-unit.h> 50#include <asm/io-unit.h>
50 51
51#include "dma.h"
52
53#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 52#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
54 53
55static struct resource *_sparc_find_resource(struct resource *r, 54static struct resource *_sparc_find_resource(struct resource *r,
@@ -246,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
246 * Typically devices use them for control blocks. 245 * Typically devices use them for control blocks.
247 * CPU may access them without any explicit flushing. 246 * CPU may access them without any explicit flushing.
248 */ 247 */
249void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 248static void *sbus_alloc_coherent(struct device *dev, size_t len,
249 dma_addr_t *dma_addrp, gfp_t gfp)
250{ 250{
251 struct of_device *op = to_of_device(dev); 251 struct of_device *op = to_of_device(dev);
252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -299,7 +299,8 @@ err_nopages:
299 return NULL; 299 return NULL;
300} 300}
301 301
302void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 302static void sbus_free_coherent(struct device *dev, size_t n, void *p,
303 dma_addr_t ba)
303{ 304{
304 struct resource *res; 305 struct resource *res;
305 struct page *pgv; 306 struct page *pgv;
@@ -317,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
317 318
318 n = (n + PAGE_SIZE-1) & PAGE_MASK; 319 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) { 320 if ((res->end-res->start)+1 != n) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 321 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
321 (long)((res->end-res->start)+1), n); 322 (long)((res->end-res->start)+1), n);
322 return; 323 return;
323 } 324 }
@@ -337,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
337 * CPU view of this memory may be inconsistent with 338 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary. 339 * a device view and explicit flushing is necessary.
339 */ 340 */
340dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) 341static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
342 unsigned long offset, size_t len,
343 enum dma_data_direction dir,
344 struct dma_attrs *attrs)
341{ 345{
346 void *va = page_address(page) + offset;
347
342 /* XXX why are some lengths signed, others unsigned? */ 348 /* XXX why are some lengths signed, others unsigned? */
343 if (len <= 0) { 349 if (len <= 0) {
344 return 0; 350 return 0;
@@ -350,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
350 return mmu_get_scsi_one(dev, va, len); 356 return mmu_get_scsi_one(dev, va, len);
351} 357}
352 358
353void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) 359static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
360 enum dma_data_direction dir, struct dma_attrs *attrs)
354{ 361{
355 mmu_release_scsi_one(dev, ba, n); 362 mmu_release_scsi_one(dev, ba, n);
356} 363}
357 364
358int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 365static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
366 enum dma_data_direction dir, struct dma_attrs *attrs)
359{ 367{
360 mmu_get_scsi_sgl(dev, sg, n); 368 mmu_get_scsi_sgl(dev, sg, n);
361 369
@@ -366,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
366 return n; 374 return n;
367} 375}
368 376
369void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 377static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
378 enum dma_data_direction dir, struct dma_attrs *attrs)
370{ 379{
371 mmu_release_scsi_sgl(dev, sg, n); 380 mmu_release_scsi_sgl(dev, sg, n);
372} 381}
373 382
374void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 383static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
384 int n, enum dma_data_direction dir)
375{ 385{
386 BUG();
376} 387}
377 388
378void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 389static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
390 int n, enum dma_data_direction dir)
379{ 391{
392 BUG();
380} 393}
381 394
395struct dma_map_ops sbus_dma_ops = {
396 .alloc_coherent = sbus_alloc_coherent,
397 .free_coherent = sbus_free_coherent,
398 .map_page = sbus_map_page,
399 .unmap_page = sbus_unmap_page,
400 .map_sg = sbus_map_sg,
401 .unmap_sg = sbus_unmap_sg,
402 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
403 .sync_sg_for_device = sbus_sync_sg_for_device,
404};
405
406struct dma_map_ops *dma_ops = &sbus_dma_ops;
407EXPORT_SYMBOL(dma_ops);
408
382static int __init sparc_register_ioport(void) 409static int __init sparc_register_ioport(void)
383{ 410{
384 register_proc_sparc_ioport(); 411 register_proc_sparc_ioport();
@@ -395,7 +422,8 @@ arch_initcall(sparc_register_ioport);
395/* Allocate and map kernel buffer using consistent mode DMA for a device. 422/* Allocate and map kernel buffer using consistent mode DMA for a device.
396 * hwdev should be valid struct pci_dev pointer for PCI devices. 423 * hwdev should be valid struct pci_dev pointer for PCI devices.
397 */ 424 */
398void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 425static void *pci32_alloc_coherent(struct device *dev, size_t len,
426 dma_addr_t *pba, gfp_t gfp)
399{ 427{
400 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 428 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
401 unsigned long va; 429 unsigned long va;
@@ -439,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
439 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 467 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
440 return (void *) res->start; 468 return (void *) res->start;
441} 469}
442EXPORT_SYMBOL(pci_alloc_consistent);
443 470
444/* Free and unmap a consistent DMA buffer. 471/* Free and unmap a consistent DMA buffer.
445 * cpu_addr is what was returned from pci_alloc_consistent, 472 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -449,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
449 * References to the memory and mappings associated with cpu_addr/dma_addr 476 * References to the memory and mappings associated with cpu_addr/dma_addr
450 * past this call are illegal. 477 * past this call are illegal.
451 */ 478 */
452void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 479static void pci32_free_coherent(struct device *dev, size_t n, void *p,
480 dma_addr_t ba)
453{ 481{
454 struct resource *res; 482 struct resource *res;
455 unsigned long pgp; 483 unsigned long pgp;
@@ -481,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
481 509
482 free_pages(pgp, get_order(n)); 510 free_pages(pgp, get_order(n));
483} 511}
484EXPORT_SYMBOL(pci_free_consistent);
485
486/* Map a single buffer of the indicated size for DMA in streaming mode.
487 * The 32-bit bus address to use is returned.
488 *
489 * Once the device is given the dma address, the device owns this memory
490 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
491 */
492dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
493 int direction)
494{
495 BUG_ON(direction == PCI_DMA_NONE);
496 /* IIep is write-through, not flushing. */
497 return virt_to_phys(ptr);
498}
499EXPORT_SYMBOL(pci_map_single);
500
501/* Unmap a single streaming mode DMA translation. The dma_addr and size
502 * must match what was provided for in a previous pci_map_single call. All
503 * other usages are undefined.
504 *
505 * After this call, reads by the cpu to the buffer are guaranteed to see
506 * whatever the device wrote there.
507 */
508void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
509 int direction)
510{
511 BUG_ON(direction == PCI_DMA_NONE);
512 if (direction != PCI_DMA_TODEVICE) {
513 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
514 (size + PAGE_SIZE-1) & PAGE_MASK);
515 }
516}
517EXPORT_SYMBOL(pci_unmap_single);
518 512
519/* 513/*
520 * Same as pci_map_single, but with pages. 514 * Same as pci_map_single, but with pages.
521 */ 515 */
522dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 516static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
523 unsigned long offset, size_t size, int direction) 517 unsigned long offset, size_t size,
518 enum dma_data_direction dir,
519 struct dma_attrs *attrs)
524{ 520{
525 BUG_ON(direction == PCI_DMA_NONE);
526 /* IIep is write-through, not flushing. */ 521 /* IIep is write-through, not flushing. */
527 return page_to_phys(page) + offset; 522 return page_to_phys(page) + offset;
528} 523}
529EXPORT_SYMBOL(pci_map_page);
530
531void pci_unmap_page(struct pci_dev *hwdev,
532 dma_addr_t dma_address, size_t size, int direction)
533{
534 BUG_ON(direction == PCI_DMA_NONE);
535 /* mmu_inval_dma_area XXX */
536}
537EXPORT_SYMBOL(pci_unmap_page);
538 524
539/* Map a set of buffers described by scatterlist in streaming 525/* Map a set of buffers described by scatterlist in streaming
540 * mode for DMA. This is the scather-gather version of the 526 * mode for DMA. This is the scather-gather version of the
@@ -551,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page);
551 * Device ownership issues as mentioned above for pci_map_single are 537 * Device ownership issues as mentioned above for pci_map_single are
552 * the same here. 538 * the same here.
553 */ 539 */
554int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 540static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
555 int direction) 541 int nents, enum dma_data_direction dir,
542 struct dma_attrs *attrs)
556{ 543{
557 struct scatterlist *sg; 544 struct scatterlist *sg;
558 int n; 545 int n;
559 546
560 BUG_ON(direction == PCI_DMA_NONE);
561 /* IIep is write-through, not flushing. */ 547 /* IIep is write-through, not flushing. */
562 for_each_sg(sgl, sg, nents, n) { 548 for_each_sg(sgl, sg, nents, n) {
563 BUG_ON(page_address(sg_page(sg)) == NULL); 549 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -566,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
566 } 552 }
567 return nents; 553 return nents;
568} 554}
569EXPORT_SYMBOL(pci_map_sg);
570 555
571/* Unmap a set of streaming mode DMA translations. 556/* Unmap a set of streaming mode DMA translations.
572 * Again, cpu read rules concerning calls here are the same as for 557 * Again, cpu read rules concerning calls here are the same as for
573 * pci_unmap_single() above. 558 * pci_unmap_single() above.
574 */ 559 */
575void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 560static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
576 int direction) 561 int nents, enum dma_data_direction dir,
562 struct dma_attrs *attrs)
577{ 563{
578 struct scatterlist *sg; 564 struct scatterlist *sg;
579 int n; 565 int n;
580 566
581 BUG_ON(direction == PCI_DMA_NONE); 567 if (dir != PCI_DMA_TODEVICE) {
582 if (direction != PCI_DMA_TODEVICE) {
583 for_each_sg(sgl, sg, nents, n) { 568 for_each_sg(sgl, sg, nents, n) {
584 BUG_ON(page_address(sg_page(sg)) == NULL); 569 BUG_ON(page_address(sg_page(sg)) == NULL);
585 mmu_inval_dma_area( 570 mmu_inval_dma_area(
@@ -588,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
588 } 573 }
589 } 574 }
590} 575}
591EXPORT_SYMBOL(pci_unmap_sg);
592 576
593/* Make physical memory consistent for a single 577/* Make physical memory consistent for a single
594 * streaming mode DMA translation before or after a transfer. 578 * streaming mode DMA translation before or after a transfer.
@@ -600,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
600 * must first perform a pci_dma_sync_for_device, and then the 584 * must first perform a pci_dma_sync_for_device, and then the
601 * device again owns the buffer. 585 * device again owns the buffer.
602 */ 586 */
603void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 587static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
588 size_t size, enum dma_data_direction dir)
604{ 589{
605 BUG_ON(direction == PCI_DMA_NONE); 590 if (dir != PCI_DMA_TODEVICE) {
606 if (direction != PCI_DMA_TODEVICE) {
607 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 591 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
608 (size + PAGE_SIZE-1) & PAGE_MASK); 592 (size + PAGE_SIZE-1) & PAGE_MASK);
609 } 593 }
610} 594}
611EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
612 595
613void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 596static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
597 size_t size, enum dma_data_direction dir)
614{ 598{
615 BUG_ON(direction == PCI_DMA_NONE); 599 if (dir != PCI_DMA_TODEVICE) {
616 if (direction != PCI_DMA_TODEVICE) {
617 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 600 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
618 (size + PAGE_SIZE-1) & PAGE_MASK); 601 (size + PAGE_SIZE-1) & PAGE_MASK);
619 } 602 }
620} 603}
621EXPORT_SYMBOL(pci_dma_sync_single_for_device);
622 604
623/* Make physical memory consistent for a set of streaming 605/* Make physical memory consistent for a set of streaming
624 * mode DMA translations after a transfer. 606 * mode DMA translations after a transfer.
@@ -626,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
626 * The same as pci_dma_sync_single_* but for a scatter-gather list, 608 * The same as pci_dma_sync_single_* but for a scatter-gather list,
627 * same rules and usage. 609 * same rules and usage.
628 */ 610 */
629void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 611static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
612 int nents, enum dma_data_direction dir)
630{ 613{
631 struct scatterlist *sg; 614 struct scatterlist *sg;
632 int n; 615 int n;
633 616
634 BUG_ON(direction == PCI_DMA_NONE); 617 if (dir != PCI_DMA_TODEVICE) {
635 if (direction != PCI_DMA_TODEVICE) {
636 for_each_sg(sgl, sg, nents, n) { 618 for_each_sg(sgl, sg, nents, n) {
637 BUG_ON(page_address(sg_page(sg)) == NULL); 619 BUG_ON(page_address(sg_page(sg)) == NULL);
638 mmu_inval_dma_area( 620 mmu_inval_dma_area(
@@ -641,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
641 } 623 }
642 } 624 }
643} 625}
644EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
645 626
646void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 627static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
628 int nents, enum dma_data_direction dir)
647{ 629{
648 struct scatterlist *sg; 630 struct scatterlist *sg;
649 int n; 631 int n;
650 632
651 BUG_ON(direction == PCI_DMA_NONE); 633 if (dir != PCI_DMA_TODEVICE) {
652 if (direction != PCI_DMA_TODEVICE) {
653 for_each_sg(sgl, sg, nents, n) { 634 for_each_sg(sgl, sg, nents, n) {
654 BUG_ON(page_address(sg_page(sg)) == NULL); 635 BUG_ON(page_address(sg_page(sg)) == NULL);
655 mmu_inval_dma_area( 636 mmu_inval_dma_area(
@@ -658,31 +639,78 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
658 } 639 }
659 } 640 }
660} 641}
661EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 642
643struct dma_map_ops pci32_dma_ops = {
644 .alloc_coherent = pci32_alloc_coherent,
645 .free_coherent = pci32_free_coherent,
646 .map_page = pci32_map_page,
647 .map_sg = pci32_map_sg,
648 .unmap_sg = pci32_unmap_sg,
649 .sync_single_for_cpu = pci32_sync_single_for_cpu,
650 .sync_single_for_device = pci32_sync_single_for_device,
651 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
652 .sync_sg_for_device = pci32_sync_sg_for_device,
653};
654EXPORT_SYMBOL(pci32_dma_ops);
655
662#endif /* CONFIG_PCI */ 656#endif /* CONFIG_PCI */
663 657
658/*
659 * Return whether the given PCI device DMA address mask can be
660 * supported properly. For example, if your device can only drive the
661 * low 24-bits during PCI bus mastering, then you would pass
662 * 0x00ffffff as the mask to this function.
663 */
664int dma_supported(struct device *dev, u64 mask)
665{
666#ifdef CONFIG_PCI
667 if (dev->bus == &pci_bus_type)
668 return 1;
669#endif
670 return 0;
671}
672EXPORT_SYMBOL(dma_supported);
673
674int dma_set_mask(struct device *dev, u64 dma_mask)
675{
676#ifdef CONFIG_PCI
677 if (dev->bus == &pci_bus_type)
678 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
679#endif
680 return -EOPNOTSUPP;
681}
682EXPORT_SYMBOL(dma_set_mask);
683
684
664#ifdef CONFIG_PROC_FS 685#ifdef CONFIG_PROC_FS
665 686
666static int 687static int sparc_io_proc_show(struct seq_file *m, void *v)
667_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
668 void *data)
669{ 688{
670 char *p = buf, *e = buf + length; 689 struct resource *root = m->private, *r;
671 struct resource *r;
672 const char *nm; 690 const char *nm;
673 691
674 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { 692 for (r = root->child; r != NULL; r = r->sibling) {
675 if (p + 32 >= e) /* Better than nothing */
676 break;
677 if ((nm = r->name) == 0) nm = "???"; 693 if ((nm = r->name) == 0) nm = "???";
678 p += sprintf(p, "%016llx-%016llx: %s\n", 694 seq_printf(m, "%016llx-%016llx: %s\n",
679 (unsigned long long)r->start, 695 (unsigned long long)r->start,
680 (unsigned long long)r->end, nm); 696 (unsigned long long)r->end, nm);
681 } 697 }
682 698
683 return p-buf; 699 return 0;
684} 700}
685 701
702static int sparc_io_proc_open(struct inode *inode, struct file *file)
703{
704 return single_open(file, sparc_io_proc_show, PDE(inode)->data);
705}
706
707static const struct file_operations sparc_io_proc_fops = {
708 .owner = THIS_MODULE,
709 .open = sparc_io_proc_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
686#endif /* CONFIG_PROC_FS */ 714#endif /* CONFIG_PROC_FS */
687 715
688/* 716/*
@@ -707,7 +735,7 @@ static struct resource *_sparc_find_resource(struct resource *root,
707static void register_proc_sparc_ioport(void) 735static void register_proc_sparc_ioport(void)
708{ 736{
709#ifdef CONFIG_PROC_FS 737#ifdef CONFIG_PROC_FS
710 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); 738 proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap);
711 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); 739 proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma);
712#endif 740#endif
713} 741}