aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-23 22:22:43 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-23 22:22:43 -0400
commitd5c1d8c567781932e3ab2c62e8fcfee0283d9580 (patch)
treec04fca1719409426b42af6a7caa75cc26ba940a9 /arch/arm64
parentba7c95ea3870fe7b847466d39a049ab6f156aa2c (diff)
parent90a5a895cc8b284ac522757a01de15e36710c2b9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/netfilter/nf_tables_core.c The nf_tables_core.c conflict was resolved using a conflict resolution from Stephen Rothwell as a guide. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/proc-fns.h6
-rw-r--r--arch/arm64/kernel/efi.c6
-rw-r--r--arch/arm64/mm/dma-mapping.c12
3 files changed, 19 insertions, 5 deletions
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 9a8fd84f8fb2..941c375616e2 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
39 39
40#include <asm/memory.h> 40#include <asm/memory.h>
41 41
42#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 42#define cpu_switch_mm(pgd,mm) \
43do { \
44 BUG_ON(pgd == swapper_pg_dir); \
45 cpu_do_switch_mm(virt_to_phys(pgd),mm); \
46} while (0)
43 47
44#define cpu_get_pgd() \ 48#define cpu_get_pgd() \
45({ \ 49({ \
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2b8d70164428..ab21e0d58278 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init);
337 337
338static void efi_set_pgd(struct mm_struct *mm) 338static void efi_set_pgd(struct mm_struct *mm)
339{ 339{
340 cpu_switch_mm(mm->pgd, mm); 340 if (mm == &init_mm)
341 cpu_set_reserved_ttbr0();
342 else
343 cpu_switch_mm(mm->pgd, mm);
344
341 flush_tlb_all(); 345 flush_tlb_all();
342 if (icache_is_aivivt()) 346 if (icache_is_aivivt())
343 __flush_icache_all(); 347 __flush_icache_all();
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 58e0c2bdde04..ef7d112f5ce0 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
51} 51}
52early_param("coherent_pool", early_coherent_pool); 52early_param("coherent_pool", early_coherent_pool);
53 53
54static void *__alloc_from_pool(size_t size, struct page **ret_page) 54static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
55{ 55{
56 unsigned long val; 56 unsigned long val;
57 void *ptr = NULL; 57 void *ptr = NULL;
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
67 67
68 *ret_page = phys_to_page(phys); 68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val; 69 ptr = (void *)val;
70 if (flags & __GFP_ZERO)
71 memset(ptr, 0, size);
70 } 72 }
71 73
72 return ptr; 74 return ptr;
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
101 flags |= GFP_DMA; 103 flags |= GFP_DMA;
102 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { 104 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
103 struct page *page; 105 struct page *page;
106 void *addr;
104 107
105 size = PAGE_ALIGN(size); 108 size = PAGE_ALIGN(size);
106 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
109 return NULL; 112 return NULL;
110 113
111 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 114 *dma_handle = phys_to_dma(dev, page_to_phys(page));
112 return page_address(page); 115 addr = page_address(page);
116 if (flags & __GFP_ZERO)
117 memset(addr, 0, size);
118 return addr;
113 } else { 119 } else {
114 return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 120 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
115 } 121 }
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
146 152
147 if (!coherent && !(flags & __GFP_WAIT)) { 153 if (!coherent && !(flags & __GFP_WAIT)) {
148 struct page *page = NULL; 154 struct page *page = NULL;
149 void *addr = __alloc_from_pool(size, &page); 155 void *addr = __alloc_from_pool(size, &page, flags);
150 156
151 if (addr) 157 if (addr)
152 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 158 *dma_handle = phys_to_dma(dev, page_to_phys(page));