aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/swiotlb.c120
1 files changed, 30 insertions, 90 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 3657da8ebbc3..98a7a4450e02 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -89,10 +89,7 @@ static unsigned int io_tlb_index;
89 * We need to save away the original address corresponding to a mapped entry 89 * We need to save away the original address corresponding to a mapped entry
90 * for the sync operations. 90 * for the sync operations.
91 */ 91 */
92static struct swiotlb_phys_addr { 92static phys_addr_t *io_tlb_orig_addr;
93 struct page *page;
94 unsigned int offset;
95} *io_tlb_orig_addr;
96 93
97/* 94/*
98 * Protect the above data structures in the map and unmap calls 95 * Protect the above data structures in the map and unmap calls
@@ -204,7 +201,7 @@ swiotlb_init_with_default_size(size_t default_size)
204 for (i = 0; i < io_tlb_nslabs; i++) 201 for (i = 0; i < io_tlb_nslabs; i++)
205 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 202 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
206 io_tlb_index = 0; 203 io_tlb_index = 0;
207 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); 204 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
208 205
209 /* 206 /*
210 * Get the overflow emergency buffer 207 * Get the overflow emergency buffer
@@ -278,12 +275,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
278 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 275 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
279 io_tlb_index = 0; 276 io_tlb_index = 0;
280 277
281 io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL, 278 io_tlb_orig_addr = (phys_addr_t *)
282 get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr))); 279 __get_free_pages(GFP_KERNEL,
280 get_order(io_tlb_nslabs *
281 sizeof(phys_addr_t)));
283 if (!io_tlb_orig_addr) 282 if (!io_tlb_orig_addr)
284 goto cleanup3; 283 goto cleanup3;
285 284
286 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); 285 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
287 286
288 /* 287 /*
289 * Get the overflow emergency buffer 288 * Get the overflow emergency buffer
@@ -298,8 +297,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
298 return 0; 297 return 0;
299 298
300cleanup4: 299cleanup4:
301 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 300 free_pages((unsigned long)io_tlb_orig_addr,
302 sizeof(char *))); 301 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
303 io_tlb_orig_addr = NULL; 302 io_tlb_orig_addr = NULL;
304cleanup3: 303cleanup3:
305 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 304 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
@@ -330,59 +329,11 @@ static int is_swiotlb_buffer(char *addr)
330 return addr >= io_tlb_start && addr < io_tlb_end; 329 return addr >= io_tlb_start && addr < io_tlb_end;
331} 330}
332 331
333static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr)
334{
335 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
336 struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index];
337 buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
338 buffer.page += buffer.offset >> PAGE_SHIFT;
339 buffer.offset &= PAGE_SIZE - 1;
340 return buffer;
341}
342
343static void
344__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir)
345{
346 if (PageHighMem(buffer.page)) {
347 size_t len, bytes;
348 char *dev, *host, *kmp;
349
350 len = size;
351 while (len != 0) {
352 unsigned long flags;
353
354 bytes = len;
355 if ((bytes + buffer.offset) > PAGE_SIZE)
356 bytes = PAGE_SIZE - buffer.offset;
357 local_irq_save(flags); /* protects KM_BOUNCE_READ */
358 kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
359 dev = dma_addr + size - len;
360 host = kmp + buffer.offset;
361 if (dir == DMA_FROM_DEVICE)
362 memcpy(host, dev, bytes);
363 else
364 memcpy(dev, host, bytes);
365 kunmap_atomic(kmp, KM_BOUNCE_READ);
366 local_irq_restore(flags);
367 len -= bytes;
368 buffer.page++;
369 buffer.offset = 0;
370 }
371 } else {
372 void *v = page_address(buffer.page) + buffer.offset;
373
374 if (dir == DMA_TO_DEVICE)
375 memcpy(dma_addr, v, size);
376 else
377 memcpy(v, dma_addr, size);
378 }
379}
380
381/* 332/*
382 * Allocates bounce buffer and returns its kernel virtual address. 333 * Allocates bounce buffer and returns its kernel virtual address.
383 */ 334 */
384static void * 335static void *
385map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir) 336map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
386{ 337{
387 unsigned long flags; 338 unsigned long flags;
388 char *dma_addr; 339 char *dma_addr;
@@ -392,7 +343,6 @@ map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, i
392 unsigned long mask; 343 unsigned long mask;
393 unsigned long offset_slots; 344 unsigned long offset_slots;
394 unsigned long max_slots; 345 unsigned long max_slots;
395 struct swiotlb_phys_addr slot_buf;
396 346
397 mask = dma_get_seg_boundary(hwdev); 347 mask = dma_get_seg_boundary(hwdev);
398 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; 348 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
@@ -477,15 +427,10 @@ found:
477 * This is needed when we sync the memory. Then we sync the buffer if 427 * This is needed when we sync the memory. Then we sync the buffer if
478 * needed. 428 * needed.
479 */ 429 */
480 slot_buf = buffer; 430 for (i = 0; i < nslots; i++)
481 for (i = 0; i < nslots; i++) { 431 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
482 slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
483 slot_buf.offset &= PAGE_SIZE - 1;
484 io_tlb_orig_addr[index+i] = slot_buf;
485 slot_buf.offset += 1 << IO_TLB_SHIFT;
486 }
487 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 432 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
488 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); 433 memcpy(dma_addr, phys_to_virt(phys), size);
489 434
490 return dma_addr; 435 return dma_addr;
491} 436}
@@ -499,17 +444,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
499 unsigned long flags; 444 unsigned long flags;
500 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 445 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
501 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 446 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
502 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); 447 phys_addr_t phys = io_tlb_orig_addr[index];
503 448
504 /* 449 /*
505 * First, sync the memory before unmapping the entry 450 * First, sync the memory before unmapping the entry
506 */ 451 */
507 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)) 452 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
508 /* 453 /*
509 * bounce... copy the data back into the original buffer * and 454 * bounce... copy the data back into the original buffer * and
510 * delete the bounce buffer. 455 * delete the bounce buffer.
511 */ 456 */
512 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); 457 memcpy(phys_to_virt(phys), dma_addr, size);
513 458
514 /* 459 /*
515 * Return the buffer to the free list by setting the corresponding 460 * Return the buffer to the free list by setting the corresponding
@@ -541,18 +486,21 @@ static void
541sync_single(struct device *hwdev, char *dma_addr, size_t size, 486sync_single(struct device *hwdev, char *dma_addr, size_t size,
542 int dir, int target) 487 int dir, int target)
543{ 488{
544 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); 489 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
490 phys_addr_t phys = io_tlb_orig_addr[index];
491
492 phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
545 493
546 switch (target) { 494 switch (target) {
547 case SYNC_FOR_CPU: 495 case SYNC_FOR_CPU:
548 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 496 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
549 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); 497 memcpy(phys_to_virt(phys), dma_addr, size);
550 else 498 else
551 BUG_ON(dir != DMA_TO_DEVICE); 499 BUG_ON(dir != DMA_TO_DEVICE);
552 break; 500 break;
553 case SYNC_FOR_DEVICE: 501 case SYNC_FOR_DEVICE:
554 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 502 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
555 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); 503 memcpy(dma_addr, phys_to_virt(phys), size);
556 else 504 else
557 BUG_ON(dir != DMA_FROM_DEVICE); 505 BUG_ON(dir != DMA_FROM_DEVICE);
558 break; 506 break;
@@ -591,10 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
591 * swiotlb_map_single(), which will grab memory from 539 * swiotlb_map_single(), which will grab memory from
592 * the lowest available address range. 540 * the lowest available address range.
593 */ 541 */
594 struct swiotlb_phys_addr buffer; 542 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
595 buffer.page = virt_to_page(NULL);
596 buffer.offset = 0;
597 ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
598 if (!ret) 543 if (!ret)
599 return NULL; 544 return NULL;
600 } 545 }
@@ -662,7 +607,6 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
662{ 607{
663 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); 608 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr);
664 void *map; 609 void *map;
665 struct swiotlb_phys_addr buffer;
666 610
667 BUG_ON(dir == DMA_NONE); 611 BUG_ON(dir == DMA_NONE);
668 /* 612 /*
@@ -677,9 +621,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
677 /* 621 /*
678 * Oh well, have to allocate and map a bounce buffer. 622 * Oh well, have to allocate and map a bounce buffer.
679 */ 623 */
680 buffer.page = virt_to_page(ptr); 624 map = map_single(hwdev, virt_to_phys(ptr), size, dir);
681 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
682 map = map_single(hwdev, buffer, size, dir);
683 if (!map) { 625 if (!map) {
684 swiotlb_full(hwdev, size, dir, 1); 626 swiotlb_full(hwdev, size, dir, 1);
685 map = io_tlb_overflow_buffer; 627 map = io_tlb_overflow_buffer;
@@ -824,20 +766,18 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
824 int dir, struct dma_attrs *attrs) 766 int dir, struct dma_attrs *attrs)
825{ 767{
826 struct scatterlist *sg; 768 struct scatterlist *sg;
827 struct swiotlb_phys_addr buffer;
828 dma_addr_t dev_addr;
829 int i; 769 int i;
830 770
831 BUG_ON(dir == DMA_NONE); 771 BUG_ON(dir == DMA_NONE);
832 772
833 for_each_sg(sgl, sg, nelems, i) { 773 for_each_sg(sgl, sg, nelems, i) {
834 dev_addr = swiotlb_sg_to_bus(hwdev, sg); 774 void *addr = sg_virt(sg);
835 if (range_needs_mapping(sg_virt(sg), sg->length) || 775 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr);
776
777 if (range_needs_mapping(addr, sg->length) ||
836 address_needs_mapping(hwdev, dev_addr, sg->length)) { 778 address_needs_mapping(hwdev, dev_addr, sg->length)) {
837 void *map; 779 void *map = map_single(hwdev, sg_phys(sg),
838 buffer.page = sg_page(sg); 780 sg->length, dir);
839 buffer.offset = sg->offset;
840 map = map_single(hwdev, buffer, sg->length, dir);
841 if (!map) { 781 if (!map) {
842 /* Don't panic here, we expect map_sg users 782 /* Don't panic here, we expect map_sg users
843 to do proper error handling. */ 783 to do proper error handling. */