diff options
author | Jesse Barnes <jbarnes@hobbes.lan> | 2008-07-28 17:31:10 -0400 |
---|---|---|
committer | Jesse Barnes <jbarnes@virtuousgeek.org> | 2008-07-28 17:31:10 -0400 |
commit | 29111f579f4f3f2a07385f931854ab0527ae7ea5 (patch) | |
tree | 0271f20b0c954fa364be8627e0d6065544de0534 /arch/x86 | |
parent | cc5499c3a607a392e8a7adb934aaf14b2c6a3519 (diff) | |
parent | 87e39ea5714dd59ba31e36c25833d2b20255a29d (diff) |
Merge branch 'x86/iommu' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into for-linus
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 11 |
2 files changed, 9 insertions, 15 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 74697408576f..22d7d050905d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -29,9 +29,6 @@ | |||
29 | 29 | ||
30 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 30 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
31 | 31 | ||
32 | #define to_pages(addr, size) \ | ||
33 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | ||
34 | |||
35 | #define EXIT_LOOP_COUNT 10000000 | 32 | #define EXIT_LOOP_COUNT 10000000 |
36 | 33 | ||
37 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); | 34 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); |
@@ -185,7 +182,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | |||
185 | u64 address, size_t size) | 182 | u64 address, size_t size) |
186 | { | 183 | { |
187 | int s = 0; | 184 | int s = 0; |
188 | unsigned pages = to_pages(address, size); | 185 | unsigned pages = iommu_num_pages(address, size); |
189 | 186 | ||
190 | address &= PAGE_MASK; | 187 | address &= PAGE_MASK; |
191 | 188 | ||
@@ -557,8 +554,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
557 | if (iommu->exclusion_start && | 554 | if (iommu->exclusion_start && |
558 | iommu->exclusion_start < dma_dom->aperture_size) { | 555 | iommu->exclusion_start < dma_dom->aperture_size) { |
559 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; | 556 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; |
560 | int pages = to_pages(iommu->exclusion_start, | 557 | int pages = iommu_num_pages(iommu->exclusion_start, |
561 | iommu->exclusion_length); | 558 | iommu->exclusion_length); |
562 | dma_ops_reserve_addresses(dma_dom, startpage, pages); | 559 | dma_ops_reserve_addresses(dma_dom, startpage, pages); |
563 | } | 560 | } |
564 | 561 | ||
@@ -767,7 +764,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
767 | unsigned int pages; | 764 | unsigned int pages; |
768 | int i; | 765 | int i; |
769 | 766 | ||
770 | pages = to_pages(paddr, size); | 767 | pages = iommu_num_pages(paddr, size); |
771 | paddr &= PAGE_MASK; | 768 | paddr &= PAGE_MASK; |
772 | 769 | ||
773 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); | 770 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); |
@@ -802,7 +799,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
802 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | 799 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) |
803 | return; | 800 | return; |
804 | 801 | ||
805 | pages = to_pages(dma_addr, size); | 802 | pages = iommu_num_pages(dma_addr, size); |
806 | dma_addr &= PAGE_MASK; | 803 | dma_addr &= PAGE_MASK; |
807 | start = dma_addr; | 804 | start = dma_addr; |
808 | 805 | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 744126e64950..49285f8fd4d5 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -67,9 +67,6 @@ static u32 gart_unmapped_entry; | |||
67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | 67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) |
68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | 68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) |
69 | 69 | ||
70 | #define to_pages(addr, size) \ | ||
71 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | ||
72 | |||
73 | #define EMERGENCY_PAGES 32 /* = 128KB */ | 70 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
74 | 71 | ||
75 | #ifdef CONFIG_AGP | 72 | #ifdef CONFIG_AGP |
@@ -241,7 +238,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |||
241 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | 238 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
242 | size_t size, int dir) | 239 | size_t size, int dir) |
243 | { | 240 | { |
244 | unsigned long npages = to_pages(phys_mem, size); | 241 | unsigned long npages = iommu_num_pages(phys_mem, size); |
245 | unsigned long iommu_page = alloc_iommu(dev, npages); | 242 | unsigned long iommu_page = alloc_iommu(dev, npages); |
246 | int i; | 243 | int i; |
247 | 244 | ||
@@ -304,7 +301,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
304 | return; | 301 | return; |
305 | 302 | ||
306 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | 303 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
307 | npages = to_pages(dma_addr, size); | 304 | npages = iommu_num_pages(dma_addr, size); |
308 | for (i = 0; i < npages; i++) { | 305 | for (i = 0; i < npages; i++) { |
309 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | 306 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; |
310 | CLEAR_LEAK(iommu_page + i); | 307 | CLEAR_LEAK(iommu_page + i); |
@@ -387,7 +384,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, | |||
387 | } | 384 | } |
388 | 385 | ||
389 | addr = phys_addr; | 386 | addr = phys_addr; |
390 | pages = to_pages(s->offset, s->length); | 387 | pages = iommu_num_pages(s->offset, s->length); |
391 | while (pages--) { | 388 | while (pages--) { |
392 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | 389 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
393 | SET_LEAK(iommu_page); | 390 | SET_LEAK(iommu_page); |
@@ -470,7 +467,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
470 | 467 | ||
471 | seg_size += s->length; | 468 | seg_size += s->length; |
472 | need = nextneed; | 469 | need = nextneed; |
473 | pages += to_pages(s->offset, s->length); | 470 | pages += iommu_num_pages(s->offset, s->length); |
474 | ps = s; | 471 | ps = s; |
475 | } | 472 | } |
476 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) | 473 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |