diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-06-26 15:28:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 04:12:18 -0400 |
commit | cb76c3229725c6dcae31da65e9ca57f434628c05 (patch) | |
tree | 5471ef8a2711af4aea46f967ad9cbf6643f0a0a4 /arch/x86/kernel/amd_iommu.c | |
parent | b20ac0d4d6cff969d7074bbf02a1b86058df0804 (diff) |
x86, AMD IOMMU: add generic dma_ops mapping functions
This patch adds the generic functions to map and unmap pages to a protection
domain for dma_ops usage.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: Sebastian.Biemueller@amd.com
Cc: robert.richter@amd.com
Cc: joro@8bytes.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 105 |
1 files changed, 105 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 47e80b5814bf..e00a3e7ba356 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -536,3 +536,108 @@ static int get_device_resources(struct device *dev, | |||
536 | return 1; | 536 | return 1; |
537 | } | 537 | } |
538 | 538 | ||
539 | static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | ||
540 | struct dma_ops_domain *dom, | ||
541 | unsigned long address, | ||
542 | phys_addr_t paddr, | ||
543 | int direction) | ||
544 | { | ||
545 | u64 *pte, __pte; | ||
546 | |||
547 | WARN_ON(address > dom->aperture_size); | ||
548 | |||
549 | paddr &= PAGE_MASK; | ||
550 | |||
551 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | ||
552 | pte += IOMMU_PTE_L0_INDEX(address); | ||
553 | |||
554 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | ||
555 | |||
556 | if (direction == DMA_TO_DEVICE) | ||
557 | __pte |= IOMMU_PTE_IR; | ||
558 | else if (direction == DMA_FROM_DEVICE) | ||
559 | __pte |= IOMMU_PTE_IW; | ||
560 | else if (direction == DMA_BIDIRECTIONAL) | ||
561 | __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW; | ||
562 | |||
563 | WARN_ON(*pte); | ||
564 | |||
565 | *pte = __pte; | ||
566 | |||
567 | return (dma_addr_t)address; | ||
568 | } | ||
569 | |||
570 | static void dma_ops_domain_unmap(struct amd_iommu *iommu, | ||
571 | struct dma_ops_domain *dom, | ||
572 | unsigned long address) | ||
573 | { | ||
574 | u64 *pte; | ||
575 | |||
576 | if (address >= dom->aperture_size) | ||
577 | return; | ||
578 | |||
579 | WARN_ON(address & 0xfffULL || address > dom->aperture_size); | ||
580 | |||
581 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | ||
582 | pte += IOMMU_PTE_L0_INDEX(address); | ||
583 | |||
584 | WARN_ON(!*pte); | ||
585 | |||
586 | *pte = 0ULL; | ||
587 | } | ||
588 | |||
589 | static dma_addr_t __map_single(struct device *dev, | ||
590 | struct amd_iommu *iommu, | ||
591 | struct dma_ops_domain *dma_dom, | ||
592 | phys_addr_t paddr, | ||
593 | size_t size, | ||
594 | int dir) | ||
595 | { | ||
596 | dma_addr_t offset = paddr & ~PAGE_MASK; | ||
597 | dma_addr_t address, start; | ||
598 | unsigned int pages; | ||
599 | int i; | ||
600 | |||
601 | pages = to_pages(paddr, size); | ||
602 | paddr &= PAGE_MASK; | ||
603 | |||
604 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); | ||
605 | if (unlikely(address == bad_dma_address)) | ||
606 | goto out; | ||
607 | |||
608 | start = address; | ||
609 | for (i = 0; i < pages; ++i) { | ||
610 | dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); | ||
611 | paddr += PAGE_SIZE; | ||
612 | start += PAGE_SIZE; | ||
613 | } | ||
614 | address += offset; | ||
615 | |||
616 | out: | ||
617 | return address; | ||
618 | } | ||
619 | |||
620 | static void __unmap_single(struct amd_iommu *iommu, | ||
621 | struct dma_ops_domain *dma_dom, | ||
622 | dma_addr_t dma_addr, | ||
623 | size_t size, | ||
624 | int dir) | ||
625 | { | ||
626 | dma_addr_t i, start; | ||
627 | unsigned int pages; | ||
628 | |||
629 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | ||
630 | return; | ||
631 | |||
632 | pages = to_pages(dma_addr, size); | ||
633 | dma_addr &= PAGE_MASK; | ||
634 | start = dma_addr; | ||
635 | |||
636 | for (i = 0; i < pages; ++i) { | ||
637 | dma_ops_domain_unmap(iommu, dma_dom, start); | ||
638 | start += PAGE_SIZE; | ||
639 | } | ||
640 | |||
641 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | ||
642 | } | ||
643 | |||