aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/ioport.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-09-11 23:35:13 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-11 23:35:13 -0400
commitcabc5c0f7fa1342049042d6e147db5a73773955b (patch)
tree2be09ae1777d580c7dfe05d6d5b76e57281ec447 /arch/sparc/kernel/ioport.c
parentb73d884756303316ead4cd7dad51236b2a515a1a (diff)
parent86d710146fb9975f04c505ec78caa43d227c1018 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: arch/sparc/Kconfig
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r--arch/sparc/kernel/ioport.c190
1 files changed, 105 insertions, 85 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index e71ce79d8c15..9f61fd8cbb7b 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -49,8 +49,6 @@
49#include <asm/iommu.h> 49#include <asm/iommu.h>
50#include <asm/io-unit.h> 50#include <asm/io-unit.h>
51 51
52#include "dma.h"
53
54#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 52#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
55 53
56static struct resource *_sparc_find_resource(struct resource *r, 54static struct resource *_sparc_find_resource(struct resource *r,
@@ -247,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
247 * Typically devices use them for control blocks. 245 * Typically devices use them for control blocks.
248 * CPU may access them without any explicit flushing. 246 * CPU may access them without any explicit flushing.
249 */ 247 */
250void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 248static void *sbus_alloc_coherent(struct device *dev, size_t len,
249 dma_addr_t *dma_addrp, gfp_t gfp)
251{ 250{
252 struct of_device *op = to_of_device(dev); 251 struct of_device *op = to_of_device(dev);
253 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -300,7 +299,8 @@ err_nopages:
300 return NULL; 299 return NULL;
301} 300}
302 301
303void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 302static void sbus_free_coherent(struct device *dev, size_t n, void *p,
303 dma_addr_t ba)
304{ 304{
305 struct resource *res; 305 struct resource *res;
306 struct page *pgv; 306 struct page *pgv;
@@ -318,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
318 318
319 n = (n + PAGE_SIZE-1) & PAGE_MASK; 319 n = (n + PAGE_SIZE-1) & PAGE_MASK;
320 if ((res->end-res->start)+1 != n) { 320 if ((res->end-res->start)+1 != n) {
321 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 321 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
322 (long)((res->end-res->start)+1), n); 322 (long)((res->end-res->start)+1), n);
323 return; 323 return;
324 } 324 }
@@ -338,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
338 * CPU view of this memory may be inconsistent with 338 * CPU view of this memory may be inconsistent with
339 * a device view and explicit flushing is necessary. 339 * a device view and explicit flushing is necessary.
340 */ 340 */
341dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) 341static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
342 unsigned long offset, size_t len,
343 enum dma_data_direction dir,
344 struct dma_attrs *attrs)
342{ 345{
346 void *va = page_address(page) + offset;
347
343 /* XXX why are some lengths signed, others unsigned? */ 348 /* XXX why are some lengths signed, others unsigned? */
344 if (len <= 0) { 349 if (len <= 0) {
345 return 0; 350 return 0;
@@ -351,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
351 return mmu_get_scsi_one(dev, va, len); 356 return mmu_get_scsi_one(dev, va, len);
352} 357}
353 358
354void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) 359static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
360 enum dma_data_direction dir, struct dma_attrs *attrs)
355{ 361{
356 mmu_release_scsi_one(dev, ba, n); 362 mmu_release_scsi_one(dev, ba, n);
357} 363}
358 364
359int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 365static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
366 enum dma_data_direction dir, struct dma_attrs *attrs)
360{ 367{
361 mmu_get_scsi_sgl(dev, sg, n); 368 mmu_get_scsi_sgl(dev, sg, n);
362 369
@@ -367,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
367 return n; 374 return n;
368} 375}
369 376
370void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 377static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
378 enum dma_data_direction dir, struct dma_attrs *attrs)
371{ 379{
372 mmu_release_scsi_sgl(dev, sg, n); 380 mmu_release_scsi_sgl(dev, sg, n);
373} 381}
374 382
375void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 383static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
384 int n, enum dma_data_direction dir)
376{ 385{
386 BUG();
377} 387}
378 388
379void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 389static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
390 int n, enum dma_data_direction dir)
380{ 391{
392 BUG();
381} 393}
382 394
395struct dma_map_ops sbus_dma_ops = {
396 .alloc_coherent = sbus_alloc_coherent,
397 .free_coherent = sbus_free_coherent,
398 .map_page = sbus_map_page,
399 .unmap_page = sbus_unmap_page,
400 .map_sg = sbus_map_sg,
401 .unmap_sg = sbus_unmap_sg,
402 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
403 .sync_sg_for_device = sbus_sync_sg_for_device,
404};
405
406struct dma_map_ops *dma_ops = &sbus_dma_ops;
407EXPORT_SYMBOL(dma_ops);
408
383static int __init sparc_register_ioport(void) 409static int __init sparc_register_ioport(void)
384{ 410{
385 register_proc_sparc_ioport(); 411 register_proc_sparc_ioport();
@@ -396,7 +422,8 @@ arch_initcall(sparc_register_ioport);
396/* Allocate and map kernel buffer using consistent mode DMA for a device. 422/* Allocate and map kernel buffer using consistent mode DMA for a device.
397 * hwdev should be valid struct pci_dev pointer for PCI devices. 423 * hwdev should be valid struct pci_dev pointer for PCI devices.
398 */ 424 */
399void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 425static void *pci32_alloc_coherent(struct device *dev, size_t len,
426 dma_addr_t *pba, gfp_t gfp)
400{ 427{
401 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 428 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
402 unsigned long va; 429 unsigned long va;
@@ -440,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
440 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 467 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
441 return (void *) res->start; 468 return (void *) res->start;
442} 469}
443EXPORT_SYMBOL(pci_alloc_consistent);
444 470
445/* Free and unmap a consistent DMA buffer. 471/* Free and unmap a consistent DMA buffer.
446 * cpu_addr is what was returned from pci_alloc_consistent, 472 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -450,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
450 * References to the memory and mappings associated with cpu_addr/dma_addr 476 * References to the memory and mappings associated with cpu_addr/dma_addr
451 * past this call are illegal. 477 * past this call are illegal.
452 */ 478 */
453void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 479static void pci32_free_coherent(struct device *dev, size_t n, void *p,
480 dma_addr_t ba)
454{ 481{
455 struct resource *res; 482 struct resource *res;
456 unsigned long pgp; 483 unsigned long pgp;
@@ -482,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
482 509
483 free_pages(pgp, get_order(n)); 510 free_pages(pgp, get_order(n));
484} 511}
485EXPORT_SYMBOL(pci_free_consistent);
486
487/* Map a single buffer of the indicated size for DMA in streaming mode.
488 * The 32-bit bus address to use is returned.
489 *
490 * Once the device is given the dma address, the device owns this memory
491 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
492 */
493dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
494 int direction)
495{
496 BUG_ON(direction == PCI_DMA_NONE);
497 /* IIep is write-through, not flushing. */
498 return virt_to_phys(ptr);
499}
500EXPORT_SYMBOL(pci_map_single);
501
502/* Unmap a single streaming mode DMA translation. The dma_addr and size
503 * must match what was provided for in a previous pci_map_single call. All
504 * other usages are undefined.
505 *
506 * After this call, reads by the cpu to the buffer are guaranteed to see
507 * whatever the device wrote there.
508 */
509void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
510 int direction)
511{
512 BUG_ON(direction == PCI_DMA_NONE);
513 if (direction != PCI_DMA_TODEVICE) {
514 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
515 (size + PAGE_SIZE-1) & PAGE_MASK);
516 }
517}
518EXPORT_SYMBOL(pci_unmap_single);
519 512
520/* 513/*
521 * Same as pci_map_single, but with pages. 514 * Same as pci_map_single, but with pages.
522 */ 515 */
523dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 516static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
524 unsigned long offset, size_t size, int direction) 517 unsigned long offset, size_t size,
518 enum dma_data_direction dir,
519 struct dma_attrs *attrs)
525{ 520{
526 BUG_ON(direction == PCI_DMA_NONE);
527 /* IIep is write-through, not flushing. */ 521 /* IIep is write-through, not flushing. */
528 return page_to_phys(page) + offset; 522 return page_to_phys(page) + offset;
529} 523}
530EXPORT_SYMBOL(pci_map_page);
531
532void pci_unmap_page(struct pci_dev *hwdev,
533 dma_addr_t dma_address, size_t size, int direction)
534{
535 BUG_ON(direction == PCI_DMA_NONE);
536 /* mmu_inval_dma_area XXX */
537}
538EXPORT_SYMBOL(pci_unmap_page);
539 524
540/* Map a set of buffers described by scatterlist in streaming 525/* Map a set of buffers described by scatterlist in streaming
541 * mode for DMA. This is the scather-gather version of the 526 * mode for DMA. This is the scather-gather version of the
@@ -552,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page);
552 * Device ownership issues as mentioned above for pci_map_single are 537 * Device ownership issues as mentioned above for pci_map_single are
553 * the same here. 538 * the same here.
554 */ 539 */
555int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 540static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
556 int direction) 541 int nents, enum dma_data_direction dir,
542 struct dma_attrs *attrs)
557{ 543{
558 struct scatterlist *sg; 544 struct scatterlist *sg;
559 int n; 545 int n;
560 546
561 BUG_ON(direction == PCI_DMA_NONE);
562 /* IIep is write-through, not flushing. */ 547 /* IIep is write-through, not flushing. */
563 for_each_sg(sgl, sg, nents, n) { 548 for_each_sg(sgl, sg, nents, n) {
564 BUG_ON(page_address(sg_page(sg)) == NULL); 549 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -567,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
567 } 552 }
568 return nents; 553 return nents;
569} 554}
570EXPORT_SYMBOL(pci_map_sg);
571 555
572/* Unmap a set of streaming mode DMA translations. 556/* Unmap a set of streaming mode DMA translations.
573 * Again, cpu read rules concerning calls here are the same as for 557 * Again, cpu read rules concerning calls here are the same as for
574 * pci_unmap_single() above. 558 * pci_unmap_single() above.
575 */ 559 */
576void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 560static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
577 int direction) 561 int nents, enum dma_data_direction dir,
562 struct dma_attrs *attrs)
578{ 563{
579 struct scatterlist *sg; 564 struct scatterlist *sg;
580 int n; 565 int n;
581 566
582 BUG_ON(direction == PCI_DMA_NONE); 567 if (dir != PCI_DMA_TODEVICE) {
583 if (direction != PCI_DMA_TODEVICE) {
584 for_each_sg(sgl, sg, nents, n) { 568 for_each_sg(sgl, sg, nents, n) {
585 BUG_ON(page_address(sg_page(sg)) == NULL); 569 BUG_ON(page_address(sg_page(sg)) == NULL);
586 mmu_inval_dma_area( 570 mmu_inval_dma_area(
@@ -589,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
589 } 573 }
590 } 574 }
591} 575}
592EXPORT_SYMBOL(pci_unmap_sg);
593 576
594/* Make physical memory consistent for a single 577/* Make physical memory consistent for a single
595 * streaming mode DMA translation before or after a transfer. 578 * streaming mode DMA translation before or after a transfer.
@@ -601,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
601 * must first perform a pci_dma_sync_for_device, and then the 584 * must first perform a pci_dma_sync_for_device, and then the
602 * device again owns the buffer. 585 * device again owns the buffer.
603 */ 586 */
604void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 587static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
588 size_t size, enum dma_data_direction dir)
605{ 589{
606 BUG_ON(direction == PCI_DMA_NONE); 590 if (dir != PCI_DMA_TODEVICE) {
607 if (direction != PCI_DMA_TODEVICE) {
608 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 591 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
609 (size + PAGE_SIZE-1) & PAGE_MASK); 592 (size + PAGE_SIZE-1) & PAGE_MASK);
610 } 593 }
611} 594}
612EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
613 595
614void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 596static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
597 size_t size, enum dma_data_direction dir)
615{ 598{
616 BUG_ON(direction == PCI_DMA_NONE); 599 if (dir != PCI_DMA_TODEVICE) {
617 if (direction != PCI_DMA_TODEVICE) {
618 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 600 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
619 (size + PAGE_SIZE-1) & PAGE_MASK); 601 (size + PAGE_SIZE-1) & PAGE_MASK);
620 } 602 }
621} 603}
622EXPORT_SYMBOL(pci_dma_sync_single_for_device);
623 604
624/* Make physical memory consistent for a set of streaming 605/* Make physical memory consistent for a set of streaming
625 * mode DMA translations after a transfer. 606 * mode DMA translations after a transfer.
@@ -627,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
627 * The same as pci_dma_sync_single_* but for a scatter-gather list, 608 * The same as pci_dma_sync_single_* but for a scatter-gather list,
628 * same rules and usage. 609 * same rules and usage.
629 */ 610 */
630void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 611static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
612 int nents, enum dma_data_direction dir)
631{ 613{
632 struct scatterlist *sg; 614 struct scatterlist *sg;
633 int n; 615 int n;
634 616
635 BUG_ON(direction == PCI_DMA_NONE); 617 if (dir != PCI_DMA_TODEVICE) {
636 if (direction != PCI_DMA_TODEVICE) {
637 for_each_sg(sgl, sg, nents, n) { 618 for_each_sg(sgl, sg, nents, n) {
638 BUG_ON(page_address(sg_page(sg)) == NULL); 619 BUG_ON(page_address(sg_page(sg)) == NULL);
639 mmu_inval_dma_area( 620 mmu_inval_dma_area(
@@ -642,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
642 } 623 }
643 } 624 }
644} 625}
645EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
646 626
647void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 627static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
628 int nents, enum dma_data_direction dir)
648{ 629{
649 struct scatterlist *sg; 630 struct scatterlist *sg;
650 int n; 631 int n;
651 632
652 BUG_ON(direction == PCI_DMA_NONE); 633 if (dir != PCI_DMA_TODEVICE) {
653 if (direction != PCI_DMA_TODEVICE) {
654 for_each_sg(sgl, sg, nents, n) { 634 for_each_sg(sgl, sg, nents, n) {
655 BUG_ON(page_address(sg_page(sg)) == NULL); 635 BUG_ON(page_address(sg_page(sg)) == NULL);
656 mmu_inval_dma_area( 636 mmu_inval_dma_area(
@@ -659,9 +639,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
659 } 639 }
660 } 640 }
661} 641}
662EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 642
643struct dma_map_ops pci32_dma_ops = {
644 .alloc_coherent = pci32_alloc_coherent,
645 .free_coherent = pci32_free_coherent,
646 .map_page = pci32_map_page,
647 .map_sg = pci32_map_sg,
648 .unmap_sg = pci32_unmap_sg,
649 .sync_single_for_cpu = pci32_sync_single_for_cpu,
650 .sync_single_for_device = pci32_sync_single_for_device,
651 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
652 .sync_sg_for_device = pci32_sync_sg_for_device,
653};
654EXPORT_SYMBOL(pci32_dma_ops);
655
663#endif /* CONFIG_PCI */ 656#endif /* CONFIG_PCI */
664 657
658/*
659 * Return whether the given PCI device DMA address mask can be
660 * supported properly. For example, if your device can only drive the
661 * low 24-bits during PCI bus mastering, then you would pass
662 * 0x00ffffff as the mask to this function.
663 */
664int dma_supported(struct device *dev, u64 mask)
665{
666#ifdef CONFIG_PCI
667 if (dev->bus == &pci_bus_type)
668 return 1;
669#endif
670 return 0;
671}
672EXPORT_SYMBOL(dma_supported);
673
674int dma_set_mask(struct device *dev, u64 dma_mask)
675{
676#ifdef CONFIG_PCI
677 if (dev->bus == &pci_bus_type)
678 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
679#endif
680 return -EOPNOTSUPP;
681}
682EXPORT_SYMBOL(dma_set_mask);
683
684
665#ifdef CONFIG_PROC_FS 685#ifdef CONFIG_PROC_FS
666 686
667static int sparc_io_proc_show(struct seq_file *m, void *v) 687static int sparc_io_proc_show(struct seq_file *m, void *v)