aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-gart_64.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-09-12 06:42:35 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-14 10:42:37 -0400
commitbee44f294efd8417f5e68553778a6cc957af1547 (patch)
tree765ab42985836390cb0e83403deb9ff59d1f59a8 /arch/x86/kernel/pci-gart_64.c
parent589fc9a6e2102b498978f6350581ec7fa5aeb032 (diff)
x86: make GART to respect device's dma_mask about virtual mappings
Currently, GART IOMMU ingores device's dma_mask when it does virtual mappings. So it could give a device a virtual address that the device can't access to. This patch fixes the above problem. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r--arch/x86/kernel/pci-gart_64.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 1b0c412566e5..9972c42ac925 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -83,23 +83,34 @@ static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83static int need_flush; /* global flush state. set for each gart wrap */ 83static int need_flush; /* global flush state. set for each gart wrap */
84 84
85static unsigned long alloc_iommu(struct device *dev, int size, 85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask) 86 unsigned long align_mask, u64 dma_mask)
87{ 87{
88 unsigned long offset, flags; 88 unsigned long offset, flags;
89 unsigned long boundary_size; 89 unsigned long boundary_size;
90 unsigned long base_index; 90 unsigned long base_index;
91 unsigned long limit;
91 92
92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 93 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
93 PAGE_SIZE) >> PAGE_SHIFT; 94 PAGE_SIZE) >> PAGE_SHIFT;
94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, 95 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
95 PAGE_SIZE) >> PAGE_SHIFT; 96 PAGE_SIZE) >> PAGE_SHIFT;
96 97
98 limit = iommu_device_max_index(iommu_pages,
99 DIV_ROUND_UP(iommu_bus_base, PAGE_SIZE),
100 dma_mask >> PAGE_SHIFT);
101
97 spin_lock_irqsave(&iommu_bitmap_lock, flags); 102 spin_lock_irqsave(&iommu_bitmap_lock, flags);
98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 103
104 if (limit <= next_bit) {
105 need_flush = 1;
106 next_bit = 0;
107 }
108
109 offset = iommu_area_alloc(iommu_gart_bitmap, limit, next_bit,
99 size, base_index, boundary_size, align_mask); 110 size, base_index, boundary_size, align_mask);
100 if (offset == -1) { 111 if (offset == -1 && next_bit) {
101 need_flush = 1; 112 need_flush = 1;
102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 113 offset = iommu_area_alloc(iommu_gart_bitmap, limit, 0,
103 size, base_index, boundary_size, 114 size, base_index, boundary_size,
104 align_mask); 115 align_mask);
105 } 116 }
@@ -228,12 +239,14 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
228 * Caller needs to check if the iommu is needed and flush. 239 * Caller needs to check if the iommu is needed and flush.
229 */ 240 */
230static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 241static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
231 size_t size, int dir, unsigned long align_mask) 242 size_t size, int dir, unsigned long align_mask,
243 u64 dma_mask)
232{ 244{
233 unsigned long npages = iommu_num_pages(phys_mem, size); 245 unsigned long npages = iommu_num_pages(phys_mem, size);
234 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); 246 unsigned long iommu_page;
235 int i; 247 int i;
236 248
249 iommu_page = alloc_iommu(dev, npages, align_mask, dma_mask);
237 if (iommu_page == -1) { 250 if (iommu_page == -1) {
238 if (!nonforced_iommu(dev, phys_mem, size)) 251 if (!nonforced_iommu(dev, phys_mem, size))
239 return phys_mem; 252 return phys_mem;
@@ -263,7 +276,7 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
263 if (!need_iommu(dev, paddr, size)) 276 if (!need_iommu(dev, paddr, size))
264 return paddr; 277 return paddr;
265 278
266 bus = dma_map_area(dev, paddr, size, dir, 0); 279 bus = dma_map_area(dev, paddr, size, dir, 0, dma_get_mask(dev));
267 flush_gart(); 280 flush_gart();
268 281
269 return bus; 282 return bus;
@@ -314,6 +327,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
314{ 327{
315 struct scatterlist *s; 328 struct scatterlist *s;
316 int i; 329 int i;
330 u64 dma_mask = dma_get_mask(dev);
317 331
318#ifdef CONFIG_IOMMU_DEBUG 332#ifdef CONFIG_IOMMU_DEBUG
319 printk(KERN_DEBUG "dma_map_sg overflow\n"); 333 printk(KERN_DEBUG "dma_map_sg overflow\n");
@@ -323,7 +337,8 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
323 unsigned long addr = sg_phys(s); 337 unsigned long addr = sg_phys(s);
324 338
325 if (nonforced_iommu(dev, addr, s->length)) { 339 if (nonforced_iommu(dev, addr, s->length)) {
326 addr = dma_map_area(dev, addr, s->length, dir, 0); 340 addr = dma_map_area(dev, addr, s->length, dir, 0,
341 dma_mask);
327 if (addr == bad_dma_address) { 342 if (addr == bad_dma_address) {
328 if (i > 0) 343 if (i > 0)
329 gart_unmap_sg(dev, sg, i, dir); 344 gart_unmap_sg(dev, sg, i, dir);
@@ -345,14 +360,16 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
345 int nelems, struct scatterlist *sout, 360 int nelems, struct scatterlist *sout,
346 unsigned long pages) 361 unsigned long pages)
347{ 362{
348 unsigned long iommu_start = alloc_iommu(dev, pages, 0); 363 unsigned long iommu_start;
349 unsigned long iommu_page = iommu_start; 364 unsigned long iommu_page;
350 struct scatterlist *s; 365 struct scatterlist *s;
351 int i; 366 int i;
352 367
368 iommu_start = alloc_iommu(dev, pages, 0, dma_get_mask(dev));
353 if (iommu_start == -1) 369 if (iommu_start == -1)
354 return -1; 370 return -1;
355 371
372 iommu_page = iommu_start;
356 for_each_sg(start, s, nelems, i) { 373 for_each_sg(start, s, nelems, i) {
357 unsigned long pages, addr; 374 unsigned long pages, addr;
358 unsigned long phys_addr = s->dma_address; 375 unsigned long phys_addr = s->dma_address;
@@ -497,7 +514,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
497 align_mask = (1UL << get_order(size)) - 1; 514 align_mask = (1UL << get_order(size)) - 1;
498 515
499 *dma_addr = dma_map_area(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL, 516 *dma_addr = dma_map_area(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL,
500 align_mask); 517 align_mask, dma_mask);
501 flush_gart(); 518 flush_gart();
502 519
503 if (*dma_addr != bad_dma_address) 520 if (*dma_addr != bad_dma_address)