aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-05-08 20:11:57 -0400
committerDavid S. Miller <davem@davemloft.net>2019-05-08 20:12:19 -0400
commitdac21527df5290edf54a40479c3e22fbff7ec14e (patch)
tree315a42274d7c63be20da451771130d86585f3f62
parent269fe56551c68cde57e477a6810ed57921dfe54f (diff)
parent376b1371a9f29112ae000cc0cade174a9a670053 (diff)
Merge branch 'sparc32-iommu-SG-list'
Christoph Hellwig says: ==================== fix SG list handling in the sparc32 iommu driver this series fixes some long standing bugs in the sparc32 iommu driver, mostly the lack of handling of large sglist offsets in the map_sg method, but also a few other smaller bits. These now show up all the time do some block layer changes in linux-next. ==================== Tested-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/mm/iommu.c142
1 files changed, 58 insertions, 84 deletions
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index e8d5d73ca40d..71ac353032b6 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -175,16 +175,37 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
175 } 175 }
176} 176}
177 177
178static u32 iommu_get_one(struct device *dev, struct page *page, int npages) 178static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
179 unsigned long offset, size_t len, bool per_page_flush)
179{ 180{
180 struct iommu_struct *iommu = dev->archdata.iommu; 181 struct iommu_struct *iommu = dev->archdata.iommu;
181 int ioptex; 182 phys_addr_t paddr = page_to_phys(page) + offset;
182 iopte_t *iopte, *iopte0; 183 unsigned long off = paddr & ~PAGE_MASK;
184 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
185 unsigned long pfn = __phys_to_pfn(paddr);
183 unsigned int busa, busa0; 186 unsigned int busa, busa0;
184 int i; 187 iopte_t *iopte, *iopte0;
188 int ioptex, i;
189
190 /* XXX So what is maxphys for us and how do drivers know it? */
191 if (!len || len > 256 * 1024)
192 return DMA_MAPPING_ERROR;
193
194 /*
195 * We expect unmapped highmem pages to be not in the cache.
196 * XXX Is this a good assumption?
197 * XXX What if someone else unmaps it here and races us?
198 */
199 if (per_page_flush && !PageHighMem(page)) {
200 unsigned long vaddr, p;
201
202 vaddr = (unsigned long)page_address(page) + offset;
203 for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
204 flush_page_for_dma(p);
205 }
185 206
186 /* page color = pfn of page */ 207 /* page color = pfn of page */
187 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); 208 ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
188 if (ioptex < 0) 209 if (ioptex < 0)
189 panic("iommu out"); 210 panic("iommu out");
190 busa0 = iommu->start + (ioptex << PAGE_SHIFT); 211 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
@@ -193,29 +214,15 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
193 busa = busa0; 214 busa = busa0;
194 iopte = iopte0; 215 iopte = iopte0;
195 for (i = 0; i < npages; i++) { 216 for (i = 0; i < npages; i++) {
196 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM); 217 iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
197 iommu_invalidate_page(iommu->regs, busa); 218 iommu_invalidate_page(iommu->regs, busa);
198 busa += PAGE_SIZE; 219 busa += PAGE_SIZE;
199 iopte++; 220 iopte++;
200 page++; 221 pfn++;
201 } 222 }
202 223
203 iommu_flush_iotlb(iopte0, npages); 224 iommu_flush_iotlb(iopte0, npages);
204 225 return busa0 + off;
205 return busa0;
206}
207
208static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
209 unsigned long offset, size_t len)
210{
211 void *vaddr = page_address(page) + offset;
212 unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
213 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
214
215 /* XXX So what is maxphys for us and how do drivers know it? */
216 if (!len || len > 256 * 1024)
217 return DMA_MAPPING_ERROR;
218 return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
219} 226}
220 227
221static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, 228static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
@@ -223,81 +230,58 @@ static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
223 enum dma_data_direction dir, unsigned long attrs) 230 enum dma_data_direction dir, unsigned long attrs)
224{ 231{
225 flush_page_for_dma(0); 232 flush_page_for_dma(0);
226 return __sbus_iommu_map_page(dev, page, offset, len); 233 return __sbus_iommu_map_page(dev, page, offset, len, false);
227} 234}
228 235
229static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev, 236static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
230 struct page *page, unsigned long offset, size_t len, 237 struct page *page, unsigned long offset, size_t len,
231 enum dma_data_direction dir, unsigned long attrs) 238 enum dma_data_direction dir, unsigned long attrs)
232{ 239{
233 void *vaddr = page_address(page) + offset; 240 return __sbus_iommu_map_page(dev, page, offset, len, true);
234 unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
235
236 while (p < (unsigned long)vaddr + len) {
237 flush_page_for_dma(p);
238 p += PAGE_SIZE;
239 }
240
241 return __sbus_iommu_map_page(dev, page, offset, len);
242} 241}
243 242
244static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl, 243static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
245 int nents, enum dma_data_direction dir, unsigned long attrs) 244 int nents, enum dma_data_direction dir, unsigned long attrs,
245 bool per_page_flush)
246{ 246{
247 struct scatterlist *sg; 247 struct scatterlist *sg;
248 int i, n; 248 int j;
249
250 flush_page_for_dma(0);
251 249
252 for_each_sg(sgl, sg, nents, i) { 250 for_each_sg(sgl, sg, nents, j) {
253 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; 251 sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
254 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; 252 sg->offset, sg->length, per_page_flush);
253 if (sg->dma_address == DMA_MAPPING_ERROR)
254 return 0;
255 sg->dma_length = sg->length; 255 sg->dma_length = sg->length;
256 } 256 }
257 257
258 return nents; 258 return nents;
259} 259}
260 260
261static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, 261static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
262 int nents, enum dma_data_direction dir, unsigned long attrs) 262 int nents, enum dma_data_direction dir, unsigned long attrs)
263{ 263{
264 unsigned long page, oldpage = 0; 264 flush_page_for_dma(0);
265 struct scatterlist *sg; 265 return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
266 int i, j, n; 266}
267
268 for_each_sg(sgl, sg, nents, j) {
269 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
270
271 /*
272 * We expect unmapped highmem pages to be not in the cache.
273 * XXX Is this a good assumption?
274 * XXX What if someone else unmaps it here and races us?
275 */
276 if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
277 for (i = 0; i < n; i++) {
278 if (page != oldpage) { /* Already flushed? */
279 flush_page_for_dma(page);
280 oldpage = page;
281 }
282 page += PAGE_SIZE;
283 }
284 }
285
286 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
287 sg->dma_length = sg->length;
288 }
289 267
290 return nents; 268static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
269 int nents, enum dma_data_direction dir, unsigned long attrs)
270{
271 return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
291} 272}
292 273
293static void iommu_release_one(struct device *dev, u32 busa, int npages) 274static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
275 size_t len, enum dma_data_direction dir, unsigned long attrs)
294{ 276{
295 struct iommu_struct *iommu = dev->archdata.iommu; 277 struct iommu_struct *iommu = dev->archdata.iommu;
296 int ioptex; 278 unsigned int busa = dma_addr & PAGE_MASK;
297 int i; 279 unsigned long off = dma_addr & ~PAGE_MASK;
280 unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
281 unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
282 unsigned int i;
298 283
299 BUG_ON(busa < iommu->start); 284 BUG_ON(busa < iommu->start);
300 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
301 for (i = 0; i < npages; i++) { 285 for (i = 0; i < npages; i++) {
302 iopte_val(iommu->page_table[ioptex + i]) = 0; 286 iopte_val(iommu->page_table[ioptex + i]) = 0;
303 iommu_invalidate_page(iommu->regs, busa); 287 iommu_invalidate_page(iommu->regs, busa);
@@ -306,25 +290,15 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages)
306 bit_map_clear(&iommu->usemap, ioptex, npages); 290 bit_map_clear(&iommu->usemap, ioptex, npages);
307} 291}
308 292
309static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
310 size_t len, enum dma_data_direction dir, unsigned long attrs)
311{
312 unsigned long off = dma_addr & ~PAGE_MASK;
313 int npages;
314
315 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
316 iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
317}
318
319static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, 293static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
320 int nents, enum dma_data_direction dir, unsigned long attrs) 294 int nents, enum dma_data_direction dir, unsigned long attrs)
321{ 295{
322 struct scatterlist *sg; 296 struct scatterlist *sg;
323 int i, n; 297 int i;
324 298
325 for_each_sg(sgl, sg, nents, i) { 299 for_each_sg(sgl, sg, nents, i) {
326 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; 300 sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
327 iommu_release_one(dev, sg->dma_address & PAGE_MASK, n); 301 attrs);
328 sg->dma_address = 0x21212121; 302 sg->dma_address = 0x21212121;
329 } 303 }
330} 304}