aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorCarlo Caione <carlo@caione.org>2015-02-09 04:38:35 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-02-23 09:43:59 -0500
commit6e8266e3333bd01700decf9866725e254d84f21a (patch)
tree59968b8da6ae5cecbdb79dc7ffe90220ba3e53e5 /arch/arm
parent415ae101caf9fbf6746a88126494eda333174e90 (diff)
ARM: 8304/1: Respect NO_KERNEL_MAPPING when we don't have an IOMMU
Even without an iommu, NO_KERNEL_MAPPING is still convenient to save on kernel address space in places where we don't need a kernel mapping. Implement support for it in the two places where we're creating an expensive mapping. __alloc_from_pool uses an internal pool from which we already have virtual addresses, so it's not relevant, and __alloc_simple_buffer uses alloc_pages, which will always return a lowmem page, which is already mapped into kernel space, so we can't prevent a mapping for it in that case. Signed-off-by: Jasper St. Pierre <jstpierre@mecheye.net> Signed-off-by: Carlo Caione <carlo@caione.org> Reviewed-by: Rob Clark <robdclark@gmail.com> Reviewed-by: Daniel Drake <dsd@endlessm.com> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mm/dma-mapping.c67
1 files changed, 41 insertions, 26 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 170a116d1b29..713761643e38 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -289,11 +289,11 @@ static void __dma_free_buffer(struct page *page, size_t size)
289 289
290static void *__alloc_from_contiguous(struct device *dev, size_t size, 290static void *__alloc_from_contiguous(struct device *dev, size_t size,
291 pgprot_t prot, struct page **ret_page, 291 pgprot_t prot, struct page **ret_page,
292 const void *caller); 292 const void *caller, bool want_vaddr);
293 293
294static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 294static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
295 pgprot_t prot, struct page **ret_page, 295 pgprot_t prot, struct page **ret_page,
296 const void *caller); 296 const void *caller, bool want_vaddr);
297 297
298static void * 298static void *
299__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 299__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
@@ -357,10 +357,10 @@ static int __init atomic_pool_init(void)
357 357
358 if (dev_get_cma_area(NULL)) 358 if (dev_get_cma_area(NULL))
359 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 359 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
360 &page, atomic_pool_init); 360 &page, atomic_pool_init, true);
361 else 361 else
362 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 362 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
363 &page, atomic_pool_init); 363 &page, atomic_pool_init, true);
364 if (ptr) { 364 if (ptr) {
365 int ret; 365 int ret;
366 366
@@ -467,13 +467,15 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
467 467
468static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 468static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
469 pgprot_t prot, struct page **ret_page, 469 pgprot_t prot, struct page **ret_page,
470 const void *caller) 470 const void *caller, bool want_vaddr)
471{ 471{
472 struct page *page; 472 struct page *page;
473 void *ptr; 473 void *ptr = NULL;
474 page = __dma_alloc_buffer(dev, size, gfp); 474 page = __dma_alloc_buffer(dev, size, gfp);
475 if (!page) 475 if (!page)
476 return NULL; 476 return NULL;
477 if (!want_vaddr)
478 goto out;
477 479
478 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 480 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
479 if (!ptr) { 481 if (!ptr) {
@@ -481,6 +483,7 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
481 return NULL; 483 return NULL;
482 } 484 }
483 485
486 out:
484 *ret_page = page; 487 *ret_page = page;
485 return ptr; 488 return ptr;
486} 489}
@@ -523,12 +526,12 @@ static int __free_from_pool(void *start, size_t size)
523 526
524static void *__alloc_from_contiguous(struct device *dev, size_t size, 527static void *__alloc_from_contiguous(struct device *dev, size_t size,
525 pgprot_t prot, struct page **ret_page, 528 pgprot_t prot, struct page **ret_page,
526 const void *caller) 529 const void *caller, bool want_vaddr)
527{ 530{
528 unsigned long order = get_order(size); 531 unsigned long order = get_order(size);
529 size_t count = size >> PAGE_SHIFT; 532 size_t count = size >> PAGE_SHIFT;
530 struct page *page; 533 struct page *page;
531 void *ptr; 534 void *ptr = NULL;
532 535
533 page = dma_alloc_from_contiguous(dev, count, order); 536 page = dma_alloc_from_contiguous(dev, count, order);
534 if (!page) 537 if (!page)
@@ -536,6 +539,9 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
536 539
537 __dma_clear_buffer(page, size); 540 __dma_clear_buffer(page, size);
538 541
542 if (!want_vaddr)
543 goto out;
544
539 if (PageHighMem(page)) { 545 if (PageHighMem(page)) {
540 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 546 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
541 if (!ptr) { 547 if (!ptr) {
@@ -546,17 +552,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
546 __dma_remap(page, size, prot); 552 __dma_remap(page, size, prot);
547 ptr = page_address(page); 553 ptr = page_address(page);
548 } 554 }
555
556 out:
549 *ret_page = page; 557 *ret_page = page;
550 return ptr; 558 return ptr;
551} 559}
552 560
553static void __free_from_contiguous(struct device *dev, struct page *page, 561static void __free_from_contiguous(struct device *dev, struct page *page,
554 void *cpu_addr, size_t size) 562 void *cpu_addr, size_t size, bool want_vaddr)
555{ 563{
556 if (PageHighMem(page)) 564 if (want_vaddr) {
557 __dma_free_remap(cpu_addr, size); 565 if (PageHighMem(page))
558 else 566 __dma_free_remap(cpu_addr, size);
559 __dma_remap(page, size, PAGE_KERNEL); 567 else
568 __dma_remap(page, size, PAGE_KERNEL);
569 }
560 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 570 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
561} 571}
562 572
@@ -574,12 +584,12 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
574 584
575#define nommu() 1 585#define nommu() 1
576 586
577#define __get_dma_pgprot(attrs, prot) __pgprot(0) 587#define __get_dma_pgprot(attrs, prot) __pgprot(0)
578#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 588#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
579#define __alloc_from_pool(size, ret_page) NULL 589#define __alloc_from_pool(size, ret_page) NULL
580#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL 590#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
581#define __free_from_pool(cpu_addr, size) 0 591#define __free_from_pool(cpu_addr, size) 0
582#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) 592#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
583#define __dma_free_remap(cpu_addr, size) do { } while (0) 593#define __dma_free_remap(cpu_addr, size) do { } while (0)
584 594
585#endif /* CONFIG_MMU */ 595#endif /* CONFIG_MMU */
@@ -599,11 +609,13 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
599 609
600 610
601static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 611static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
602 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 612 gfp_t gfp, pgprot_t prot, bool is_coherent,
613 struct dma_attrs *attrs, const void *caller)
603{ 614{
604 u64 mask = get_coherent_dma_mask(dev); 615 u64 mask = get_coherent_dma_mask(dev);
605 struct page *page = NULL; 616 struct page *page = NULL;
606 void *addr; 617 void *addr;
618 bool want_vaddr;
607 619
608#ifdef CONFIG_DMA_API_DEBUG 620#ifdef CONFIG_DMA_API_DEBUG
609 u64 limit = (mask + 1) & ~mask; 621 u64 limit = (mask + 1) & ~mask;
@@ -631,20 +643,21 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
631 643
632 *handle = DMA_ERROR_CODE; 644 *handle = DMA_ERROR_CODE;
633 size = PAGE_ALIGN(size); 645 size = PAGE_ALIGN(size);
646 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
634 647
635 if (is_coherent || nommu()) 648 if (is_coherent || nommu())
636 addr = __alloc_simple_buffer(dev, size, gfp, &page); 649 addr = __alloc_simple_buffer(dev, size, gfp, &page);
637 else if (!(gfp & __GFP_WAIT)) 650 else if (!(gfp & __GFP_WAIT))
638 addr = __alloc_from_pool(size, &page); 651 addr = __alloc_from_pool(size, &page);
639 else if (!dev_get_cma_area(dev)) 652 else if (!dev_get_cma_area(dev))
640 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 653 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
641 else 654 else
642 addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 655 addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
643 656
644 if (addr) 657 if (page)
645 *handle = pfn_to_dma(dev, page_to_pfn(page)); 658 *handle = pfn_to_dma(dev, page_to_pfn(page));
646 659
647 return addr; 660 return want_vaddr ? addr : page;
648} 661}
649 662
650/* 663/*
@@ -661,7 +674,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
661 return memory; 674 return memory;
662 675
663 return __dma_alloc(dev, size, handle, gfp, prot, false, 676 return __dma_alloc(dev, size, handle, gfp, prot, false,
664 __builtin_return_address(0)); 677 attrs, __builtin_return_address(0));
665} 678}
666 679
667static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 680static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
@@ -674,7 +687,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
674 return memory; 687 return memory;
675 688
676 return __dma_alloc(dev, size, handle, gfp, prot, true, 689 return __dma_alloc(dev, size, handle, gfp, prot, true,
677 __builtin_return_address(0)); 690 attrs, __builtin_return_address(0));
678} 691}
679 692
680/* 693/*
@@ -715,6 +728,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
715 bool is_coherent) 728 bool is_coherent)
716{ 729{
717 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 730 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
731 bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
718 732
719 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 733 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
720 return; 734 return;
@@ -726,14 +740,15 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
726 } else if (__free_from_pool(cpu_addr, size)) { 740 } else if (__free_from_pool(cpu_addr, size)) {
727 return; 741 return;
728 } else if (!dev_get_cma_area(dev)) { 742 } else if (!dev_get_cma_area(dev)) {
729 __dma_free_remap(cpu_addr, size); 743 if (want_vaddr)
744 __dma_free_remap(cpu_addr, size);
730 __dma_free_buffer(page, size); 745 __dma_free_buffer(page, size);
731 } else { 746 } else {
732 /* 747 /*
733 * Non-atomic allocations cannot be freed with IRQs disabled 748 * Non-atomic allocations cannot be freed with IRQs disabled
734 */ 749 */
735 WARN_ON(irqs_disabled()); 750 WARN_ON(irqs_disabled());
736 __free_from_contiguous(dev, page, cpu_addr, size); 751 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr);
737 } 752 }
738} 753}
739 754