aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-08-17 11:36:18 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-17 13:36:59 -0400
commit7b22ff5344fda666e0938e5261ea7b9a3dfce497 (patch)
tree3fc6994f3432a7264867688b67db631878b81d7c /arch
parentf3efbe582b5396d134024c03a5fa253f2a85d9a6 (diff)
x86 gart: allocate size-aligned address for alloc_coherent, v2
This patch changes GART IOMMU to return a size aligned address wrt dma_alloc_coherent, as DMA-mapping.txt defines: The cpu return address and the DMA bus master address are both guaranteed to be aligned to the smallest PAGE_SIZE order which is greater than or equal to the requested size. This invariant exists (for example) to guarantee that if you allocate a chunk which is smaller than or equal to 64 kilobytes, the extent of the buffer you receive will not cross a 64K boundary. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/pci-gart_64.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index cdab67849074..4d8efb05428d 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -82,7 +82,8 @@ AGPEXTERN __u32 *agp_gatt_table;
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83static int need_flush; /* global flush state. set for each gart wrap */ 83static int need_flush; /* global flush state. set for each gart wrap */
84 84
85static unsigned long alloc_iommu(struct device *dev, int size) 85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
86{ 87{
87 unsigned long offset, flags; 88 unsigned long offset, flags;
88 unsigned long boundary_size; 89 unsigned long boundary_size;
@@ -95,11 +96,12 @@ static unsigned long alloc_iommu(struct device *dev, int size)
95 96
96 spin_lock_irqsave(&iommu_bitmap_lock, flags); 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
97 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
98 size, base_index, boundary_size, 0); 99 size, base_index, boundary_size, align_mask);
99 if (offset == -1) { 100 if (offset == -1) {
100 need_flush = 1; 101 need_flush = 1;
101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
102 size, base_index, boundary_size, 0); 103 size, base_index, boundary_size,
104 align_mask);
103 } 105 }
104 if (offset != -1) { 106 if (offset != -1) {
105 next_bit = offset+size; 107 next_bit = offset+size;
@@ -236,10 +238,10 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
236 * Caller needs to check if the iommu is needed and flush. 238 * Caller needs to check if the iommu is needed and flush.
237 */ 239 */
238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 240static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
239 size_t size, int dir) 241 size_t size, int dir, unsigned long align_mask)
240{ 242{
241 unsigned long npages = iommu_num_pages(phys_mem, size); 243 unsigned long npages = iommu_num_pages(phys_mem, size);
242 unsigned long iommu_page = alloc_iommu(dev, npages); 244 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
243 int i; 245 int i;
244 246
245 if (iommu_page == -1) { 247 if (iommu_page == -1) {
@@ -262,7 +264,11 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
262static dma_addr_t 264static dma_addr_t
263gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir) 265gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
264{ 266{
265 dma_addr_t map = dma_map_area(dev, paddr, size, dir); 267 dma_addr_t map;
268 unsigned long align_mask;
269
270 align_mask = (1UL << get_order(size)) - 1;
271 map = dma_map_area(dev, paddr, size, dir, align_mask);
266 272
267 flush_gart(); 273 flush_gart();
268 274
@@ -281,7 +287,8 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
281 if (!need_iommu(dev, paddr, size)) 287 if (!need_iommu(dev, paddr, size))
282 return paddr; 288 return paddr;
283 289
284 bus = gart_map_simple(dev, paddr, size, dir); 290 bus = dma_map_area(dev, paddr, size, dir, 0);
291 flush_gart();
285 292
286 return bus; 293 return bus;
287} 294}
@@ -340,7 +347,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
340 unsigned long addr = sg_phys(s); 347 unsigned long addr = sg_phys(s);
341 348
342 if (nonforced_iommu(dev, addr, s->length)) { 349 if (nonforced_iommu(dev, addr, s->length)) {
343 addr = dma_map_area(dev, addr, s->length, dir); 350 addr = dma_map_area(dev, addr, s->length, dir, 0);
344 if (addr == bad_dma_address) { 351 if (addr == bad_dma_address) {
345 if (i > 0) 352 if (i > 0)
346 gart_unmap_sg(dev, sg, i, dir); 353 gart_unmap_sg(dev, sg, i, dir);
@@ -362,7 +369,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
362 int nelems, struct scatterlist *sout, 369 int nelems, struct scatterlist *sout,
363 unsigned long pages) 370 unsigned long pages)
364{ 371{
365 unsigned long iommu_start = alloc_iommu(dev, pages); 372 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
366 unsigned long iommu_page = iommu_start; 373 unsigned long iommu_page = iommu_start;
367 struct scatterlist *s; 374 struct scatterlist *s;
368 int i; 375 int i;