summaryrefslogtreecommitdiffstats
path: root/arch/s390/pci
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2016-08-17 07:51:11 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-09-22 07:42:31 -0400
commit8cb63b78791eef67ea95831c6ef5e6039c572b14 (patch)
treee6f92315c264ace747e66ce950b5127ceeedfc89 /arch/s390/pci
parent3b13f1fea1be44f29be4150246624502a0227ebd (diff)
s390/pci_dma: simplify dma address calculation
Simplify the code we use to calculate dma addresses by putting everything related in a dma_alloc_address function. Also provide a dma_free_address counterpart. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/pci')
-rw-r--r--arch/s390/pci/pci_dma.c37
1 files changed, 18 insertions, 19 deletions
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 658123961f42..12b58b6b7f79 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -230,34 +230,36 @@ static unsigned long __dma_alloc_iommu(struct device *dev,
230 boundary_size, 0); 230 boundary_size, 0);
231} 231}
232 232
233static unsigned long dma_alloc_iommu(struct device *dev, int size) 233static dma_addr_t dma_alloc_address(struct device *dev, int size)
234{ 234{
235 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 235 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
236 unsigned long offset, flags; 236 unsigned long offset, flags;
237 int wrap = 0;
238 237
239 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); 238 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
240 offset = __dma_alloc_iommu(dev, zdev->next_bit, size); 239 offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
241 if (offset == -1) { 240 if (offset == -1) {
242 /* wrap-around */ 241 /* wrap-around */
243 offset = __dma_alloc_iommu(dev, 0, size); 242 offset = __dma_alloc_iommu(dev, 0, size);
244 wrap = 1; 243 if (offset == -1) {
245 } 244 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
246 245 return DMA_ERROR_CODE;
247 if (offset != -1) { 246 }
248 zdev->next_bit = offset + size; 247 if (!zdev->tlb_refresh && !s390_iommu_strict)
249 if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
250 /* global flush after wrap-around with lazy unmap */ 248 /* global flush after wrap-around with lazy unmap */
251 zpci_refresh_global(zdev); 249 zpci_refresh_global(zdev);
252 } 250 }
251 zdev->next_bit = offset + size;
253 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 252 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
254 return offset; 253
254 return zdev->start_dma + offset * PAGE_SIZE;
255} 255}
256 256
257static void dma_free_iommu(struct device *dev, unsigned long offset, int size) 257static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
258{ 258{
259 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 259 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
260 unsigned long flags; 260 unsigned long flags, offset;
261
262 offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
261 263
262 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); 264 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
263 if (!zdev->iommu_bitmap) 265 if (!zdev->iommu_bitmap)
@@ -289,23 +291,22 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
289 unsigned long attrs) 291 unsigned long attrs)
290{ 292{
291 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 293 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
292 unsigned long nr_pages, iommu_page_index;
293 unsigned long pa = page_to_phys(page) + offset; 294 unsigned long pa = page_to_phys(page) + offset;
294 int flags = ZPCI_PTE_VALID; 295 int flags = ZPCI_PTE_VALID;
296 unsigned long nr_pages;
295 dma_addr_t dma_addr; 297 dma_addr_t dma_addr;
296 int ret; 298 int ret;
297 299
298 /* This rounds up number of pages based on size and offset */ 300 /* This rounds up number of pages based on size and offset */
299 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); 301 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
300 iommu_page_index = dma_alloc_iommu(dev, nr_pages); 302 dma_addr = dma_alloc_address(dev, nr_pages);
301 if (iommu_page_index == -1) { 303 if (dma_addr == DMA_ERROR_CODE) {
302 ret = -ENOSPC; 304 ret = -ENOSPC;
303 goto out_err; 305 goto out_err;
304 } 306 }
305 307
306 /* Use rounded up size */ 308 /* Use rounded up size */
307 size = nr_pages * PAGE_SIZE; 309 size = nr_pages * PAGE_SIZE;
308 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
309 310
310 if (direction == DMA_NONE || direction == DMA_TO_DEVICE) 311 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
311 flags |= ZPCI_TABLE_PROTECTED; 312 flags |= ZPCI_TABLE_PROTECTED;
@@ -318,7 +319,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
318 return dma_addr + (offset & ~PAGE_MASK); 319 return dma_addr + (offset & ~PAGE_MASK);
319 320
320out_free: 321out_free:
321 dma_free_iommu(dev, iommu_page_index, nr_pages); 322 dma_free_address(dev, dma_addr, nr_pages);
322out_err: 323out_err:
323 zpci_err("map error:\n"); 324 zpci_err("map error:\n");
324 zpci_err_dma(ret, pa); 325 zpci_err_dma(ret, pa);
@@ -330,7 +331,6 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
330 unsigned long attrs) 331 unsigned long attrs)
331{ 332{
332 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 333 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
333 unsigned long iommu_page_index;
334 int npages, ret; 334 int npages, ret;
335 335
336 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 336 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -344,8 +344,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
344 } 344 }
345 345
346 atomic64_add(npages, &zdev->unmapped_pages); 346 atomic64_add(npages, &zdev->unmapped_pages);
347 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; 347 dma_free_address(dev, dma_addr, npages);
348 dma_free_iommu(dev, iommu_page_index, npages);
349} 348}
350 349
351static void *s390_dma_alloc(struct device *dev, size_t size, 350static void *s390_dma_alloc(struct device *dev, size_t size,