aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2013-08-28 18:41:59 -0400
committerAlexander Graf <agraf@suse.de>2013-08-28 18:41:59 -0400
commitbf550fc93d9855872a95e69e4002256110d89858 (patch)
tree10876bb4304bffe54c4160a132e7b8de6577ac4e /arch/arm/mm
parent7e48c101e0c53e6095c5f4f5e63d14df50aae8fc (diff)
parentcc2df20c7c4ce594c3e17e9cc260c330646012c8 (diff)
Merge remote-tracking branch 'origin/next' into kvm-ppc-next
Conflicts: mm/Kconfig CMA DMA split and ZSWAP introduction were conflicting, fix up manually.
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c40
-rw-r--r--arch/arm/mm/init.c58
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c2
4 files changed, 40 insertions, 62 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 19a19bd44447..dbddc07a3bbd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1328,6 +1328,15 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1328 if (gfp & GFP_ATOMIC) 1328 if (gfp & GFP_ATOMIC)
1329 return __iommu_alloc_atomic(dev, size, handle); 1329 return __iommu_alloc_atomic(dev, size, handle);
1330 1330
1331 /*
1332 * Following is a work-around (a.k.a. hack) to prevent pages
1333 * with __GFP_COMP being passed to split_page() which cannot
1334 * handle them. The real problem is that this flag probably
1335 * should be 0 on ARM as it is not supported on this
1336 * platform; see CONFIG_HUGETLBFS.
1337 */
1338 gfp &= ~(__GFP_COMP);
1339
1331 pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 1340 pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
1332 if (!pages) 1341 if (!pages)
1333 return NULL; 1342 return NULL;
@@ -1386,16 +1395,17 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1386void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1395void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1387 dma_addr_t handle, struct dma_attrs *attrs) 1396 dma_addr_t handle, struct dma_attrs *attrs)
1388{ 1397{
1389 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1398 struct page **pages;
1390 size = PAGE_ALIGN(size); 1399 size = PAGE_ALIGN(size);
1391 1400
1392 if (!pages) { 1401 if (__in_atomic_pool(cpu_addr, size)) {
1393 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1402 __iommu_free_atomic(dev, cpu_addr, handle, size);
1394 return; 1403 return;
1395 } 1404 }
1396 1405
1397 if (__in_atomic_pool(cpu_addr, size)) { 1406 pages = __iommu_get_pages(cpu_addr, attrs);
1398 __iommu_free_atomic(dev, cpu_addr, handle, size); 1407 if (!pages) {
1408 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1399 return; 1409 return;
1400 } 1410 }
1401 1411
@@ -1650,13 +1660,27 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1650{ 1660{
1651 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1661 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1652 dma_addr_t dma_addr; 1662 dma_addr_t dma_addr;
1653 int ret, len = PAGE_ALIGN(size + offset); 1663 int ret, prot, len = PAGE_ALIGN(size + offset);
1654 1664
1655 dma_addr = __alloc_iova(mapping, len); 1665 dma_addr = __alloc_iova(mapping, len);
1656 if (dma_addr == DMA_ERROR_CODE) 1666 if (dma_addr == DMA_ERROR_CODE)
1657 return dma_addr; 1667 return dma_addr;
1658 1668
1659 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); 1669 switch (dir) {
1670 case DMA_BIDIRECTIONAL:
1671 prot = IOMMU_READ | IOMMU_WRITE;
1672 break;
1673 case DMA_TO_DEVICE:
1674 prot = IOMMU_READ;
1675 break;
1676 case DMA_FROM_DEVICE:
1677 prot = IOMMU_WRITE;
1678 break;
1679 default:
1680 prot = 0;
1681 }
1682
1683 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1660 if (ret < 0) 1684 if (ret < 0)
1661 goto fail; 1685 goto fail;
1662 1686
@@ -1921,7 +1945,7 @@ void arm_iommu_detach_device(struct device *dev)
1921 1945
1922 iommu_detach_device(mapping->domain, dev); 1946 iommu_detach_device(mapping->domain, dev);
1923 kref_put(&mapping->kref, release_iommu_mapping); 1947 kref_put(&mapping->kref, release_iommu_mapping);
1924 mapping = NULL; 1948 dev->archdata.mapping = NULL;
1925 set_dma_ops(dev, NULL); 1949 set_dma_ops(dev, NULL);
1926 1950
1927 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 1951 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2ffee02d1d5c..15225d829d71 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -583,9 +583,6 @@ static void __init free_highpages(void)
583 */ 583 */
584void __init mem_init(void) 584void __init mem_init(void)
585{ 585{
586 unsigned long reserved_pages, free_pages;
587 struct memblock_region *reg;
588 int i;
589#ifdef CONFIG_HAVE_TCM 586#ifdef CONFIG_HAVE_TCM
590 /* These pointers are filled in on TCM detection */ 587 /* These pointers are filled in on TCM detection */
591 extern u32 dtcm_end; 588 extern u32 dtcm_end;
@@ -596,57 +593,16 @@ void __init mem_init(void)
596 593
597 /* this will put all unused low memory onto the freelists */ 594 /* this will put all unused low memory onto the freelists */
598 free_unused_memmap(&meminfo); 595 free_unused_memmap(&meminfo);
599 596 free_all_bootmem();
600 totalram_pages += free_all_bootmem();
601 597
602#ifdef CONFIG_SA1111 598#ifdef CONFIG_SA1111
603 /* now that our DMA memory is actually so designated, we can free it */ 599 /* now that our DMA memory is actually so designated, we can free it */
604 free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); 600 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
605#endif 601#endif
606 602
607 free_highpages(); 603 free_highpages();
608 604
609 reserved_pages = free_pages = 0; 605 mem_init_print_info(NULL);
610
611 for_each_bank(i, &meminfo) {
612 struct membank *bank = &meminfo.bank[i];
613 unsigned int pfn1, pfn2;
614 struct page *page, *end;
615
616 pfn1 = bank_pfn_start(bank);
617 pfn2 = bank_pfn_end(bank);
618
619 page = pfn_to_page(pfn1);
620 end = pfn_to_page(pfn2 - 1) + 1;
621
622 do {
623 if (PageReserved(page))
624 reserved_pages++;
625 else if (!page_count(page))
626 free_pages++;
627 page++;
628 } while (page < end);
629 }
630
631 /*
632 * Since our memory may not be contiguous, calculate the
633 * real number of pages we have in this system
634 */
635 printk(KERN_INFO "Memory:");
636 num_physpages = 0;
637 for_each_memblock(memory, reg) {
638 unsigned long pages = memblock_region_memory_end_pfn(reg) -
639 memblock_region_memory_base_pfn(reg);
640 num_physpages += pages;
641 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
642 }
643 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
644
645 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
646 nr_free_pages() << (PAGE_SHIFT-10),
647 free_pages << (PAGE_SHIFT-10),
648 reserved_pages << (PAGE_SHIFT-10),
649 totalhigh_pages << (PAGE_SHIFT-10));
650 606
651#define MLK(b, t) b, t, ((t) - (b)) >> 10 607#define MLK(b, t) b, t, ((t) - (b)) >> 10
652#define MLM(b, t) b, t, ((t) - (b)) >> 20 608#define MLM(b, t) b, t, ((t) - (b)) >> 20
@@ -712,7 +668,7 @@ void __init mem_init(void)
712 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 668 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
713#endif 669#endif
714 670
715 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 671 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
716 extern int sysctl_overcommit_memory; 672 extern int sysctl_overcommit_memory;
717 /* 673 /*
718 * On a machine this small we won't get 674 * On a machine this small we won't get
@@ -729,12 +685,12 @@ void free_initmem(void)
729 extern char __tcm_start, __tcm_end; 685 extern char __tcm_start, __tcm_end;
730 686
731 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 687 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
732 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link"); 688 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
733#endif 689#endif
734 690
735 poison_init_mem(__init_begin, __init_end - __init_begin); 691 poison_init_mem(__init_begin, __init_end - __init_begin);
736 if (!machine_is_integrator() && !machine_is_cintegrator()) 692 if (!machine_is_integrator() && !machine_is_cintegrator())
737 free_initmem_default(0); 693 free_initmem_default(-1);
738} 694}
739 695
740#ifdef CONFIG_BLK_DEV_INITRD 696#ifdef CONFIG_BLK_DEV_INITRD
@@ -745,7 +701,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
745{ 701{
746 if (!keep_initrd) { 702 if (!keep_initrd) {
747 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 703 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
748 free_reserved_area(start, end, 0, "initrd"); 704 free_reserved_area((void *)start, (void *)end, -1, "initrd");
749 } 705 }
750} 706}
751 707
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 10062ceadd1c..0c6356255fe3 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
181 if (mmap_is_legacy()) { 181 if (mmap_is_legacy()) {
182 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 182 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
183 mm->get_unmapped_area = arch_get_unmapped_area; 183 mm->get_unmapped_area = arch_get_unmapped_area;
184 mm->unmap_area = arch_unmap_area;
185 } else { 184 } else {
186 mm->mmap_base = mmap_base(random_factor); 185 mm->mmap_base = mmap_base(random_factor);
187 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 186 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
188 mm->unmap_area = arch_unmap_area_topdown;
189 } 187 }
190} 188}
191 189
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d7229d28c7f8..4f56617a2392 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -950,7 +950,7 @@ void __init debug_ll_io_init(void)
950 map.virtual &= PAGE_MASK; 950 map.virtual &= PAGE_MASK;
951 map.length = PAGE_SIZE; 951 map.length = PAGE_SIZE;
952 map.type = MT_DEVICE; 952 map.type = MT_DEVICE;
953 create_mapping(&map); 953 iotable_init(&map, 1);
954} 954}
955#endif 955#endif
956 956