diff options
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 65 |
1 files changed, 36 insertions, 29 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 4918c575d582..cfcc84e6c350 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/bitops.h> | 24 | #include <linux/bitops.h> |
25 | #include <linux/kdebug.h> | 25 | #include <linux/kdebug.h> |
26 | #include <linux/scatterlist.h> | ||
26 | #include <asm/atomic.h> | 27 | #include <asm/atomic.h> |
27 | #include <asm/io.h> | 28 | #include <asm/io.h> |
28 | #include <asm/mtrr.h> | 29 | #include <asm/mtrr.h> |
@@ -278,10 +279,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
278 | */ | 279 | */ |
279 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 280 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
280 | { | 281 | { |
282 | struct scatterlist *s; | ||
281 | int i; | 283 | int i; |
282 | 284 | ||
283 | for (i = 0; i < nents; i++) { | 285 | for_each_sg(sg, s, nents, i) { |
284 | struct scatterlist *s = &sg[i]; | ||
285 | if (!s->dma_length || !s->length) | 286 | if (!s->dma_length || !s->length) |
286 | break; | 287 | break; |
287 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); | 288 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); |
@@ -292,14 +293,14 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
292 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | 293 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, |
293 | int nents, int dir) | 294 | int nents, int dir) |
294 | { | 295 | { |
296 | struct scatterlist *s; | ||
295 | int i; | 297 | int i; |
296 | 298 | ||
297 | #ifdef CONFIG_IOMMU_DEBUG | 299 | #ifdef CONFIG_IOMMU_DEBUG |
298 | printk(KERN_DEBUG "dma_map_sg overflow\n"); | 300 | printk(KERN_DEBUG "dma_map_sg overflow\n"); |
299 | #endif | 301 | #endif |
300 | 302 | ||
301 | for (i = 0; i < nents; i++ ) { | 303 | for_each_sg(sg, s, nents, i) { |
302 | struct scatterlist *s = &sg[i]; | ||
303 | unsigned long addr = page_to_phys(s->page) + s->offset; | 304 | unsigned long addr = page_to_phys(s->page) + s->offset; |
304 | if (nonforced_iommu(dev, addr, s->length)) { | 305 | if (nonforced_iommu(dev, addr, s->length)) { |
305 | addr = dma_map_area(dev, addr, s->length, dir); | 306 | addr = dma_map_area(dev, addr, s->length, dir); |
@@ -319,23 +320,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
319 | } | 320 | } |
320 | 321 | ||
321 | /* Map multiple scatterlist entries continuous into the first. */ | 322 | /* Map multiple scatterlist entries continuous into the first. */ |
322 | static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, | 323 | static int __dma_map_cont(struct scatterlist *start, int nelems, |
323 | struct scatterlist *sout, unsigned long pages) | 324 | struct scatterlist *sout, unsigned long pages) |
324 | { | 325 | { |
325 | unsigned long iommu_start = alloc_iommu(pages); | 326 | unsigned long iommu_start = alloc_iommu(pages); |
326 | unsigned long iommu_page = iommu_start; | 327 | unsigned long iommu_page = iommu_start; |
328 | struct scatterlist *s; | ||
327 | int i; | 329 | int i; |
328 | 330 | ||
329 | if (iommu_start == -1) | 331 | if (iommu_start == -1) |
330 | return -1; | 332 | return -1; |
331 | 333 | ||
332 | for (i = start; i < stopat; i++) { | 334 | for_each_sg(start, s, nelems, i) { |
333 | struct scatterlist *s = &sg[i]; | ||
334 | unsigned long pages, addr; | 335 | unsigned long pages, addr; |
335 | unsigned long phys_addr = s->dma_address; | 336 | unsigned long phys_addr = s->dma_address; |
336 | 337 | ||
337 | BUG_ON(i > start && s->offset); | 338 | BUG_ON(s != start && s->offset); |
338 | if (i == start) { | 339 | if (s == start) { |
339 | *sout = *s; | 340 | *sout = *s; |
340 | sout->dma_address = iommu_bus_base; | 341 | sout->dma_address = iommu_bus_base; |
341 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | 342 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; |
@@ -357,17 +358,17 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, | |||
357 | return 0; | 358 | return 0; |
358 | } | 359 | } |
359 | 360 | ||
360 | static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat, | 361 | static inline int dma_map_cont(struct scatterlist *start, int nelems, |
361 | struct scatterlist *sout, | 362 | struct scatterlist *sout, |
362 | unsigned long pages, int need) | 363 | unsigned long pages, int need) |
363 | { | 364 | { |
364 | if (!need) { | 365 | if (!need) { |
365 | BUG_ON(stopat - start != 1); | 366 | BUG_ON(nelems != 1); |
366 | *sout = sg[start]; | 367 | *sout = *start; |
367 | sout->dma_length = sg[start].length; | 368 | sout->dma_length = start->length; |
368 | return 0; | 369 | return 0; |
369 | } | 370 | } |
370 | return __dma_map_cont(sg, start, stopat, sout, pages); | 371 | return __dma_map_cont(start, nelems, sout, pages); |
371 | } | 372 | } |
372 | 373 | ||
373 | /* | 374 | /* |
@@ -381,6 +382,7 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
381 | int start; | 382 | int start; |
382 | unsigned long pages = 0; | 383 | unsigned long pages = 0; |
383 | int need = 0, nextneed; | 384 | int need = 0, nextneed; |
385 | struct scatterlist *s, *ps, *start_sg, *sgmap; | ||
384 | 386 | ||
385 | if (nents == 0) | 387 | if (nents == 0) |
386 | return 0; | 388 | return 0; |
@@ -390,8 +392,9 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
390 | 392 | ||
391 | out = 0; | 393 | out = 0; |
392 | start = 0; | 394 | start = 0; |
393 | for (i = 0; i < nents; i++) { | 395 | start_sg = sgmap = sg; |
394 | struct scatterlist *s = &sg[i]; | 396 | ps = NULL; /* shut up gcc */ |
397 | for_each_sg(sg, s, nents, i) { | ||
395 | dma_addr_t addr = page_to_phys(s->page) + s->offset; | 398 | dma_addr_t addr = page_to_phys(s->page) + s->offset; |
396 | s->dma_address = addr; | 399 | s->dma_address = addr; |
397 | BUG_ON(s->length == 0); | 400 | BUG_ON(s->length == 0); |
@@ -400,29 +403,33 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
400 | 403 | ||
401 | /* Handle the previous not yet processed entries */ | 404 | /* Handle the previous not yet processed entries */ |
402 | if (i > start) { | 405 | if (i > start) { |
403 | struct scatterlist *ps = &sg[i-1]; | ||
404 | /* Can only merge when the last chunk ends on a page | 406 | /* Can only merge when the last chunk ends on a page |
405 | boundary and the new one doesn't have an offset. */ | 407 | boundary and the new one doesn't have an offset. */ |
406 | if (!iommu_merge || !nextneed || !need || s->offset || | 408 | if (!iommu_merge || !nextneed || !need || s->offset || |
407 | (ps->offset + ps->length) % PAGE_SIZE) { | 409 | (ps->offset + ps->length) % PAGE_SIZE) { |
408 | if (dma_map_cont(sg, start, i, sg+out, pages, | 410 | if (dma_map_cont(start_sg, i - start, sgmap, |
409 | need) < 0) | 411 | pages, need) < 0) |
410 | goto error; | 412 | goto error; |
411 | out++; | 413 | out++; |
414 | sgmap = sg_next(sgmap); | ||
412 | pages = 0; | 415 | pages = 0; |
413 | start = i; | 416 | start = i; |
417 | start_sg = s; | ||
414 | } | 418 | } |
415 | } | 419 | } |
416 | 420 | ||
417 | need = nextneed; | 421 | need = nextneed; |
418 | pages += to_pages(s->offset, s->length); | 422 | pages += to_pages(s->offset, s->length); |
423 | ps = s; | ||
419 | } | 424 | } |
420 | if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) | 425 | if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) |
421 | goto error; | 426 | goto error; |
422 | out++; | 427 | out++; |
423 | flush_gart(); | 428 | flush_gart(); |
424 | if (out < nents) | 429 | if (out < nents) { |
425 | sg[out].dma_length = 0; | 430 | sgmap = sg_next(sgmap); |
431 | sgmap->dma_length = 0; | ||
432 | } | ||
426 | return out; | 433 | return out; |
427 | 434 | ||
428 | error: | 435 | error: |
@@ -437,8 +444,8 @@ error: | |||
437 | if (panic_on_overflow) | 444 | if (panic_on_overflow) |
438 | panic("dma_map_sg: overflow on %lu pages\n", pages); | 445 | panic("dma_map_sg: overflow on %lu pages\n", pages); |
439 | iommu_full(dev, pages << PAGE_SHIFT, dir); | 446 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
440 | for (i = 0; i < nents; i++) | 447 | for_each_sg(sg, s, nents, i) |
441 | sg[i].dma_address = bad_dma_address; | 448 | s->dma_address = bad_dma_address; |
442 | return 0; | 449 | return 0; |
443 | } | 450 | } |
444 | 451 | ||