aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2015-10-26 06:20:44 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-11-09 03:10:49 -0500
commit52d43d8184b1840c7cf6136724223585f51a1074 (patch)
tree35b734de0905dc0849a4d65daa5fd450ed3c1fb2
parent66728eeea6d80060e4b9df55c7845c838ff2799f (diff)
s390/pci_dma: improve debugging of errors during dma map
Improve debugging to find out what went wrong during a failed dma map/unmap operation. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/pci/pci_dma.c41
1 files changed, 30 insertions, 11 deletions
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index f137949c9abf..d348f2c09a1e 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -269,6 +269,16 @@ out:
269 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 269 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
270} 270}
271 271
272static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
273{
274 struct {
275 unsigned long rc;
276 unsigned long addr;
277 } __packed data = {rc, addr};
278
279 zpci_err_hex(&data, sizeof(data));
280}
281
272static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, 282static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
273 unsigned long offset, size_t size, 283 unsigned long offset, size_t size,
274 enum dma_data_direction direction, 284 enum dma_data_direction direction,
@@ -279,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
279 unsigned long pa = page_to_phys(page) + offset; 289 unsigned long pa = page_to_phys(page) + offset;
280 int flags = ZPCI_PTE_VALID; 290 int flags = ZPCI_PTE_VALID;
281 dma_addr_t dma_addr; 291 dma_addr_t dma_addr;
292 int ret;
282 293
283 /* This rounds up number of pages based on size and offset */ 294 /* This rounds up number of pages based on size and offset */
284 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); 295 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
285 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); 296 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
286 if (iommu_page_index == -1) 297 if (iommu_page_index == -1) {
298 ret = -ENOSPC;
287 goto out_err; 299 goto out_err;
300 }
288 301
289 /* Use rounded up size */ 302 /* Use rounded up size */
290 size = nr_pages * PAGE_SIZE; 303 size = nr_pages * PAGE_SIZE;
291 304
292 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; 305 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
293 if (dma_addr + size > zdev->end_dma) 306 if (dma_addr + size > zdev->end_dma) {
307 ret = -ERANGE;
294 goto out_free; 308 goto out_free;
309 }
295 310
296 if (direction == DMA_NONE || direction == DMA_TO_DEVICE) 311 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
297 flags |= ZPCI_TABLE_PROTECTED; 312 flags |= ZPCI_TABLE_PROTECTED;
298 313
299 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { 314 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
300 atomic64_add(nr_pages, &zdev->mapped_pages); 315 if (ret)
301 return dma_addr + (offset & ~PAGE_MASK); 316 goto out_free;
302 } 317
318 atomic64_add(nr_pages, &zdev->mapped_pages);
319 return dma_addr + (offset & ~PAGE_MASK);
303 320
304out_free: 321out_free:
305 dma_free_iommu(zdev, iommu_page_index, nr_pages); 322 dma_free_iommu(zdev, iommu_page_index, nr_pages);
306out_err: 323out_err:
307 zpci_err("map error:\n"); 324 zpci_err("map error:\n");
308 zpci_err_hex(&pa, sizeof(pa)); 325 zpci_err_dma(ret, pa);
309 return DMA_ERROR_CODE; 326 return DMA_ERROR_CODE;
310} 327}
311 328
@@ -315,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
315{ 332{
316 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 333 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
317 unsigned long iommu_page_index; 334 unsigned long iommu_page_index;
318 int npages; 335 int npages, ret;
319 336
320 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 337 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
321 dma_addr = dma_addr & PAGE_MASK; 338 dma_addr = dma_addr & PAGE_MASK;
322 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, 339 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
323 ZPCI_PTE_INVALID)) { 340 ZPCI_PTE_INVALID);
341 if (ret) {
324 zpci_err("unmap error:\n"); 342 zpci_err("unmap error:\n");
325 zpci_err_hex(&dma_addr, sizeof(dma_addr)); 343 zpci_err_dma(ret, dma_addr);
344 return;
326 } 345 }
327 346
328 atomic64_add(npages, &zdev->unmapped_pages); 347 atomic64_add(npages, &zdev->unmapped_pages);