diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-10-16 01:02:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-16 14:21:33 -0400 |
commit | 2994a3b2653a3ab04f7b1459ce2442baecb62961 (patch) | |
tree | a95c59ce7eac2d329812bbc765dc7018463b605d /arch/powerpc/kernel/iommu.c | |
parent | 036b4c50fe99a2f308f36561335b9904ab507972 (diff) |
powerpc: use iommu_num_pages function in IOMMU code
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Muli Ben-Yehuda <muli@il.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 23 |
1 files changed, 7 insertions, 16 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index e2cf6320907e..ea1ba89f9c90 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -51,17 +51,6 @@ static int protect4gb = 1; | |||
51 | 51 | ||
52 | static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); | 52 | static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); |
53 | 53 | ||
54 | static inline unsigned long iommu_nr_pages(unsigned long vaddr, | ||
55 | unsigned long slen) | ||
56 | { | ||
57 | unsigned long npages; | ||
58 | |||
59 | npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK); | ||
60 | npages >>= IOMMU_PAGE_SHIFT; | ||
61 | |||
62 | return npages; | ||
63 | } | ||
64 | |||
65 | static int __init setup_protect4gb(char *str) | 54 | static int __init setup_protect4gb(char *str) |
66 | { | 55 | { |
67 | if (strcmp(str, "on") == 0) | 56 | if (strcmp(str, "on") == 0) |
@@ -325,7 +314,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
325 | } | 314 | } |
326 | /* Allocate iommu entries for that segment */ | 315 | /* Allocate iommu entries for that segment */ |
327 | vaddr = (unsigned long) sg_virt(s); | 316 | vaddr = (unsigned long) sg_virt(s); |
328 | npages = iommu_nr_pages(vaddr, slen); | 317 | npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); |
329 | align = 0; | 318 | align = 0; |
330 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && | 319 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && |
331 | (vaddr & ~PAGE_MASK) == 0) | 320 | (vaddr & ~PAGE_MASK) == 0) |
@@ -418,7 +407,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
418 | unsigned long vaddr, npages; | 407 | unsigned long vaddr, npages; |
419 | 408 | ||
420 | vaddr = s->dma_address & IOMMU_PAGE_MASK; | 409 | vaddr = s->dma_address & IOMMU_PAGE_MASK; |
421 | npages = iommu_nr_pages(s->dma_address, s->dma_length); | 410 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
411 | IOMMU_PAGE_SIZE); | ||
422 | __iommu_free(tbl, vaddr, npages); | 412 | __iommu_free(tbl, vaddr, npages); |
423 | s->dma_address = DMA_ERROR_CODE; | 413 | s->dma_address = DMA_ERROR_CODE; |
424 | s->dma_length = 0; | 414 | s->dma_length = 0; |
@@ -452,7 +442,8 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
452 | 442 | ||
453 | if (sg->dma_length == 0) | 443 | if (sg->dma_length == 0) |
454 | break; | 444 | break; |
455 | npages = iommu_nr_pages(dma_handle, sg->dma_length); | 445 | npages = iommu_num_pages(dma_handle, sg->dma_length, |
446 | IOMMU_PAGE_SIZE); | ||
456 | __iommu_free(tbl, dma_handle, npages); | 447 | __iommu_free(tbl, dma_handle, npages); |
457 | sg = sg_next(sg); | 448 | sg = sg_next(sg); |
458 | } | 449 | } |
@@ -584,7 +575,7 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, | |||
584 | BUG_ON(direction == DMA_NONE); | 575 | BUG_ON(direction == DMA_NONE); |
585 | 576 | ||
586 | uaddr = (unsigned long)vaddr; | 577 | uaddr = (unsigned long)vaddr; |
587 | npages = iommu_nr_pages(uaddr, size); | 578 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); |
588 | 579 | ||
589 | if (tbl) { | 580 | if (tbl) { |
590 | align = 0; | 581 | align = 0; |
@@ -617,7 +608,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | |||
617 | BUG_ON(direction == DMA_NONE); | 608 | BUG_ON(direction == DMA_NONE); |
618 | 609 | ||
619 | if (tbl) { | 610 | if (tbl) { |
620 | npages = iommu_nr_pages(dma_handle, size); | 611 | npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); |
621 | iommu_free(tbl, dma_handle, npages); | 612 | iommu_free(tbl, dma_handle, npages); |
622 | } | 613 | } |
623 | } | 614 | } |