diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-25 20:02:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-25 20:02:24 -0500 |
commit | e1171aca7da6407c0e65ffb1f38509ee4486a3e7 (patch) | |
tree | fdcb399884487631d112719669662dbf0293c425 | |
parent | c23a75759191e84f4ba15b85ea4f97bd544b5362 (diff) | |
parent | 6137e4166004e2ec383ac05d5ca15831f4668806 (diff) |
Merge tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa
Pull Xtensa fixes from Max Filippov:
"Two fixes for reserved memory/DMA buffers allocation in high memory on
xtensa architecture
- fix memory accounting when reserved memory is in high memory region
- fix DMA allocation from high memory"
* tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa:
xtensa: support DMA buffers in high memory
xtensa: fix high memory/reserved memory collision
-rw-r--r-- | arch/xtensa/kernel/pci-dma.c | 40 | ||||
-rw-r--r-- | arch/xtensa/mm/init.c | 70 |
2 files changed, 93 insertions, 17 deletions
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 623720a11143..732631ce250f 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c | |||
@@ -16,6 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/dma-contiguous.h> | 18 | #include <linux/dma-contiguous.h> |
19 | #include <linux/dma-direct.h> | ||
19 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
20 | #include <linux/highmem.h> | 21 | #include <linux/highmem.h> |
21 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
123 | unsigned long attrs) | 124 | unsigned long attrs) |
124 | { | 125 | { |
125 | unsigned long ret; | 126 | unsigned long ret; |
126 | unsigned long uncached = 0; | 127 | unsigned long uncached; |
127 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 128 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
128 | struct page *page = NULL; | 129 | struct page *page = NULL; |
129 | 130 | ||
@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
144 | if (!page) | 145 | if (!page) |
145 | return NULL; | 146 | return NULL; |
146 | 147 | ||
147 | ret = (unsigned long)page_address(page); | 148 | *handle = phys_to_dma(dev, page_to_phys(page)); |
148 | 149 | ||
149 | /* We currently don't support coherent memory outside KSEG */ | 150 | #ifdef CONFIG_MMU |
151 | if (PageHighMem(page)) { | ||
152 | void *p; | ||
150 | 153 | ||
154 | p = dma_common_contiguous_remap(page, size, VM_MAP, | ||
155 | pgprot_noncached(PAGE_KERNEL), | ||
156 | __builtin_return_address(0)); | ||
157 | if (!p) { | ||
158 | if (!dma_release_from_contiguous(dev, page, count)) | ||
159 | __free_pages(page, get_order(size)); | ||
160 | } | ||
161 | return p; | ||
162 | } | ||
163 | #endif | ||
164 | ret = (unsigned long)page_address(page); | ||
151 | BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || | 165 | BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || |
152 | ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); | 166 | ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); |
153 | 167 | ||
154 | uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; | 168 | uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; |
155 | *handle = virt_to_bus((void *)ret); | ||
156 | __invalidate_dcache_range(ret, size); | 169 | __invalidate_dcache_range(ret, size); |
157 | 170 | ||
158 | return (void *)uncached; | 171 | return (void *)uncached; |
@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
161 | static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr, | 174 | static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr, |
162 | dma_addr_t dma_handle, unsigned long attrs) | 175 | dma_addr_t dma_handle, unsigned long attrs) |
163 | { | 176 | { |
164 | unsigned long addr = (unsigned long)vaddr + | ||
165 | XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; | ||
166 | struct page *page = virt_to_page(addr); | ||
167 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 177 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
168 | 178 | unsigned long addr = (unsigned long)vaddr; | |
169 | BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || | 179 | struct page *page; |
170 | addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); | 180 | |
181 | if (addr >= XCHAL_KSEG_BYPASS_VADDR && | ||
182 | addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) { | ||
183 | addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; | ||
184 | page = virt_to_page(addr); | ||
185 | } else { | ||
186 | #ifdef CONFIG_MMU | ||
187 | dma_common_free_remap(vaddr, size, VM_MAP); | ||
188 | #endif | ||
189 | page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle))); | ||
190 | } | ||
171 | 191 | ||
172 | if (!dma_release_from_contiguous(dev, page, count)) | 192 | if (!dma_release_from_contiguous(dev, page, count)) |
173 | __free_pages(page, get_order(size)); | 193 | __free_pages(page, get_order(size)); |
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index d776ec0d7b22..34aead7dcb48 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c | |||
@@ -79,19 +79,75 @@ void __init zones_init(void) | |||
79 | free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); | 79 | free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); |
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_HIGHMEM | ||
83 | static void __init free_area_high(unsigned long pfn, unsigned long end) | ||
84 | { | ||
85 | for (; pfn < end; pfn++) | ||
86 | free_highmem_page(pfn_to_page(pfn)); | ||
87 | } | ||
88 | |||
89 | static void __init free_highpages(void) | ||
90 | { | ||
91 | unsigned long max_low = max_low_pfn; | ||
92 | struct memblock_region *mem, *res; | ||
93 | |||
94 | reset_all_zones_managed_pages(); | ||
95 | /* set highmem page free */ | ||
96 | for_each_memblock(memory, mem) { | ||
97 | unsigned long start = memblock_region_memory_base_pfn(mem); | ||
98 | unsigned long end = memblock_region_memory_end_pfn(mem); | ||
99 | |||
100 | /* Ignore complete lowmem entries */ | ||
101 | if (end <= max_low) | ||
102 | continue; | ||
103 | |||
104 | if (memblock_is_nomap(mem)) | ||
105 | continue; | ||
106 | |||
107 | /* Truncate partial highmem entries */ | ||
108 | if (start < max_low) | ||
109 | start = max_low; | ||
110 | |||
111 | /* Find and exclude any reserved regions */ | ||
112 | for_each_memblock(reserved, res) { | ||
113 | unsigned long res_start, res_end; | ||
114 | |||
115 | res_start = memblock_region_reserved_base_pfn(res); | ||
116 | res_end = memblock_region_reserved_end_pfn(res); | ||
117 | |||
118 | if (res_end < start) | ||
119 | continue; | ||
120 | if (res_start < start) | ||
121 | res_start = start; | ||
122 | if (res_start > end) | ||
123 | res_start = end; | ||
124 | if (res_end > end) | ||
125 | res_end = end; | ||
126 | if (res_start != start) | ||
127 | free_area_high(start, res_start); | ||
128 | start = res_end; | ||
129 | if (start == end) | ||
130 | break; | ||
131 | } | ||
132 | |||
133 | /* And now free anything which remains */ | ||
134 | if (start < end) | ||
135 | free_area_high(start, end); | ||
136 | } | ||
137 | } | ||
138 | #else | ||
139 | static void __init free_highpages(void) | ||
140 | { | ||
141 | } | ||
142 | #endif | ||
143 | |||
82 | /* | 144 | /* |
83 | * Initialize memory pages. | 145 | * Initialize memory pages. |
84 | */ | 146 | */ |
85 | 147 | ||
86 | void __init mem_init(void) | 148 | void __init mem_init(void) |
87 | { | 149 | { |
88 | #ifdef CONFIG_HIGHMEM | 150 | free_highpages(); |
89 | unsigned long tmp; | ||
90 | |||
91 | reset_all_zones_managed_pages(); | ||
92 | for (tmp = max_low_pfn; tmp < max_pfn; tmp++) | ||
93 | free_highmem_page(pfn_to_page(tmp)); | ||
94 | #endif | ||
95 | 151 | ||
96 | max_mapnr = max_pfn - ARCH_PFN_OFFSET; | 152 | max_mapnr = max_pfn - ARCH_PFN_OFFSET; |
97 | high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); | 153 | high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); |