diff options
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 48 |
1 files changed, 32 insertions, 16 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 845cbecd68e9..65f6acb025c8 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
26 | #include <linux/kdebug.h> | 26 | #include <linux/kdebug.h> |
27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/iommu-helper.h> | ||
28 | #include <asm/atomic.h> | 29 | #include <asm/atomic.h> |
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
30 | #include <asm/mtrr.h> | 31 | #include <asm/mtrr.h> |
@@ -82,17 +83,24 @@ AGPEXTERN __u32 *agp_gatt_table; | |||
82 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | 83 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ |
83 | static int need_flush; /* global flush state. set for each gart wrap */ | 84 | static int need_flush; /* global flush state. set for each gart wrap */ |
84 | 85 | ||
85 | static unsigned long alloc_iommu(int size) | 86 | static unsigned long alloc_iommu(struct device *dev, int size) |
86 | { | 87 | { |
87 | unsigned long offset, flags; | 88 | unsigned long offset, flags; |
89 | unsigned long boundary_size; | ||
90 | unsigned long base_index; | ||
91 | |||
92 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), | ||
93 | PAGE_SIZE) >> PAGE_SHIFT; | ||
94 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | ||
95 | PAGE_SIZE) >> PAGE_SHIFT; | ||
88 | 96 | ||
89 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 97 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
90 | offset = find_next_zero_string(iommu_gart_bitmap, next_bit, | 98 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, |
91 | iommu_pages, size); | 99 | size, base_index, boundary_size, 0); |
92 | if (offset == -1) { | 100 | if (offset == -1) { |
93 | need_flush = 1; | 101 | need_flush = 1; |
94 | offset = find_next_zero_string(iommu_gart_bitmap, 0, | 102 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, |
95 | iommu_pages, size); | 103 | size, base_index, boundary_size, 0); |
96 | } | 104 | } |
97 | if (offset != -1) { | 105 | if (offset != -1) { |
98 | set_bit_string(iommu_gart_bitmap, offset, size); | 106 | set_bit_string(iommu_gart_bitmap, offset, size); |
@@ -114,7 +122,7 @@ static void free_iommu(unsigned long offset, int size) | |||
114 | unsigned long flags; | 122 | unsigned long flags; |
115 | 123 | ||
116 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 124 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
117 | __clear_bit_string(iommu_gart_bitmap, offset, size); | 125 | iommu_area_free(iommu_gart_bitmap, offset, size); |
118 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 126 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
119 | } | 127 | } |
120 | 128 | ||
@@ -235,7 +243,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
235 | size_t size, int dir) | 243 | size_t size, int dir) |
236 | { | 244 | { |
237 | unsigned long npages = to_pages(phys_mem, size); | 245 | unsigned long npages = to_pages(phys_mem, size); |
238 | unsigned long iommu_page = alloc_iommu(npages); | 246 | unsigned long iommu_page = alloc_iommu(dev, npages); |
239 | int i; | 247 | int i; |
240 | 248 | ||
241 | if (iommu_page == -1) { | 249 | if (iommu_page == -1) { |
@@ -355,10 +363,11 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
355 | } | 363 | } |
356 | 364 | ||
357 | /* Map multiple scatterlist entries continuous into the first. */ | 365 | /* Map multiple scatterlist entries continuous into the first. */ |
358 | static int __dma_map_cont(struct scatterlist *start, int nelems, | 366 | static int __dma_map_cont(struct device *dev, struct scatterlist *start, |
359 | struct scatterlist *sout, unsigned long pages) | 367 | int nelems, struct scatterlist *sout, |
368 | unsigned long pages) | ||
360 | { | 369 | { |
361 | unsigned long iommu_start = alloc_iommu(pages); | 370 | unsigned long iommu_start = alloc_iommu(dev, pages); |
362 | unsigned long iommu_page = iommu_start; | 371 | unsigned long iommu_page = iommu_start; |
363 | struct scatterlist *s; | 372 | struct scatterlist *s; |
364 | int i; | 373 | int i; |
@@ -394,8 +403,8 @@ static int __dma_map_cont(struct scatterlist *start, int nelems, | |||
394 | } | 403 | } |
395 | 404 | ||
396 | static inline int | 405 | static inline int |
397 | dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, | 406 | dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, |
398 | unsigned long pages, int need) | 407 | struct scatterlist *sout, unsigned long pages, int need) |
399 | { | 408 | { |
400 | if (!need) { | 409 | if (!need) { |
401 | BUG_ON(nelems != 1); | 410 | BUG_ON(nelems != 1); |
@@ -403,7 +412,7 @@ dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, | |||
403 | sout->dma_length = start->length; | 412 | sout->dma_length = start->length; |
404 | return 0; | 413 | return 0; |
405 | } | 414 | } |
406 | return __dma_map_cont(start, nelems, sout, pages); | 415 | return __dma_map_cont(dev, start, nelems, sout, pages); |
407 | } | 416 | } |
408 | 417 | ||
409 | /* | 418 | /* |
@@ -416,6 +425,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
416 | struct scatterlist *s, *ps, *start_sg, *sgmap; | 425 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
417 | int need = 0, nextneed, i, out, start; | 426 | int need = 0, nextneed, i, out, start; |
418 | unsigned long pages = 0; | 427 | unsigned long pages = 0; |
428 | unsigned int seg_size; | ||
429 | unsigned int max_seg_size; | ||
419 | 430 | ||
420 | if (nents == 0) | 431 | if (nents == 0) |
421 | return 0; | 432 | return 0; |
@@ -426,6 +437,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
426 | out = 0; | 437 | out = 0; |
427 | start = 0; | 438 | start = 0; |
428 | start_sg = sgmap = sg; | 439 | start_sg = sgmap = sg; |
440 | seg_size = 0; | ||
441 | max_seg_size = dma_get_max_seg_size(dev); | ||
429 | ps = NULL; /* shut up gcc */ | 442 | ps = NULL; /* shut up gcc */ |
430 | for_each_sg(sg, s, nents, i) { | 443 | for_each_sg(sg, s, nents, i) { |
431 | dma_addr_t addr = sg_phys(s); | 444 | dma_addr_t addr = sg_phys(s); |
@@ -443,11 +456,13 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
443 | * offset. | 456 | * offset. |
444 | */ | 457 | */ |
445 | if (!iommu_merge || !nextneed || !need || s->offset || | 458 | if (!iommu_merge || !nextneed || !need || s->offset || |
459 | (s->length + seg_size > max_seg_size) || | ||
446 | (ps->offset + ps->length) % PAGE_SIZE) { | 460 | (ps->offset + ps->length) % PAGE_SIZE) { |
447 | if (dma_map_cont(start_sg, i - start, sgmap, | 461 | if (dma_map_cont(dev, start_sg, i - start, |
448 | pages, need) < 0) | 462 | sgmap, pages, need) < 0) |
449 | goto error; | 463 | goto error; |
450 | out++; | 464 | out++; |
465 | seg_size = 0; | ||
451 | sgmap = sg_next(sgmap); | 466 | sgmap = sg_next(sgmap); |
452 | pages = 0; | 467 | pages = 0; |
453 | start = i; | 468 | start = i; |
@@ -455,11 +470,12 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
455 | } | 470 | } |
456 | } | 471 | } |
457 | 472 | ||
473 | seg_size += s->length; | ||
458 | need = nextneed; | 474 | need = nextneed; |
459 | pages += to_pages(s->offset, s->length); | 475 | pages += to_pages(s->offset, s->length); |
460 | ps = s; | 476 | ps = s; |
461 | } | 477 | } |
462 | if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) | 478 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |
463 | goto error; | 479 | goto error; |
464 | out++; | 480 | out++; |
465 | flush_gart(); | 481 | flush_gart(); |