aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-11-04 14:30:57 -0500
committerDavid S. Miller <davem@davemloft.net>2015-11-04 14:30:57 -0500
commitd618382ba5f1a4905db63f4980bf7b0a5826de9d (patch)
tree5bc612d222a70276b0e4d9df476b2548c1286d9e
parent73958c651fbf70d8d8bf2a60b871af5f7a2e3199 (diff)
iommu-common: Fix error code used in iommu_tbl_range_{alloc,free}().
The value returned from iommu_tbl_range_alloc() (and the one passed in as a fourth argument to iommu_tbl_range_free) is not a DMA address, it is rather an index into the IOMMU page table. Therefore using DMA_ERROR_CODE is not appropriate. Use a more type matching error code define, IOMMU_ERROR_CODE, and update all users of this interface. Reported-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/kernel/iommu.c12
-rw-r--r--arch/sparc/kernel/ldc.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c18
-rw-r--r--include/linux/iommu-common.h1
-rw-r--r--lib/iommu-common.c10
5 files changed, 20 insertions, 23 deletions
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 5320689c06e9..37686828c3d9 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -161,7 +161,7 @@ static inline iopte_t *alloc_npages(struct device *dev,
161 161
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
163 (unsigned long)(-1), 0); 163 (unsigned long)(-1), 0);
164 if (unlikely(entry == DMA_ERROR_CODE)) 164 if (unlikely(entry == IOMMU_ERROR_CODE))
165 return NULL; 165 return NULL;
166 166
167 return iommu->page_table + entry; 167 return iommu->page_table + entry;
@@ -253,7 +253,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
254 iommu = dev->archdata.iommu; 254 iommu = dev->archdata.iommu;
255 255
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); 256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
257 257
258 order = get_order(size); 258 order = get_order(size);
259 if (order < 10) 259 if (order < 10)
@@ -426,7 +426,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
426 iommu_free_ctx(iommu, ctx); 426 iommu_free_ctx(iommu, ctx);
427 spin_unlock_irqrestore(&iommu->lock, flags); 427 spin_unlock_irqrestore(&iommu->lock, flags);
428 428
429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); 429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
430} 430}
431 431
432static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 432static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -492,7 +492,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
492 &handle, (unsigned long)(-1), 0); 492 &handle, (unsigned long)(-1), 0);
493 493
494 /* Handle failure */ 494 /* Handle failure */
495 if (unlikely(entry == DMA_ERROR_CODE)) { 495 if (unlikely(entry == IOMMU_ERROR_CODE)) {
496 if (printk_ratelimit()) 496 if (printk_ratelimit())
497 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 497 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
498 " npages %lx\n", iommu, paddr, npages); 498 " npages %lx\n", iommu, paddr, npages);
@@ -571,7 +571,7 @@ iommu_map_failed:
571 iopte_make_dummy(iommu, base + j); 571 iopte_make_dummy(iommu, base + j);
572 572
573 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 573 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
574 DMA_ERROR_CODE); 574 IOMMU_ERROR_CODE);
575 575
576 s->dma_address = DMA_ERROR_CODE; 576 s->dma_address = DMA_ERROR_CODE;
577 s->dma_length = 0; 577 s->dma_length = 0;
@@ -648,7 +648,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
648 iopte_make_dummy(iommu, base + i); 648 iopte_make_dummy(iommu, base + i);
649 649
650 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, 650 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
651 DMA_ERROR_CODE); 651 IOMMU_ERROR_CODE);
652 sg = sg_next(sg); 652 sg = sg_next(sg);
653 } 653 }
654 654
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 1ae5eb1bb045..59d503866431 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1953,7 +1953,7 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
1953 1953
1954 entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table, 1954 entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
1955 npages, NULL, (unsigned long)-1, 0); 1955 npages, NULL, (unsigned long)-1, 0);
1956 if (unlikely(entry < 0)) 1956 if (unlikely(entry == IOMMU_ERROR_CODE))
1957 return NULL; 1957 return NULL;
1958 1958
1959 return iommu->page_table + entry; 1959 return iommu->page_table + entry;
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index d2fe57dad433..836e8cef47e2 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -159,7 +159,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
159 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 159 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
160 (unsigned long)(-1), 0); 160 (unsigned long)(-1), 0);
161 161
162 if (unlikely(entry == DMA_ERROR_CODE)) 162 if (unlikely(entry == IOMMU_ERROR_CODE))
163 goto range_alloc_fail; 163 goto range_alloc_fail;
164 164
165 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 165 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
@@ -187,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
187 return ret; 187 return ret;
188 188
189iommu_map_fail: 189iommu_map_fail:
190 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); 190 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
191 191
192range_alloc_fail: 192range_alloc_fail:
193 free_pages(first_page, order); 193 free_pages(first_page, order);
@@ -226,7 +226,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
226 devhandle = pbm->devhandle; 226 devhandle = pbm->devhandle;
227 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); 227 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
228 dma_4v_iommu_demap(&devhandle, entry, npages); 228 dma_4v_iommu_demap(&devhandle, entry, npages);
229 iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); 229 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
230 order = get_order(size); 230 order = get_order(size);
231 if (order < 10) 231 if (order < 10)
232 free_pages((unsigned long)cpu, order); 232 free_pages((unsigned long)cpu, order);
@@ -256,7 +256,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
256 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 256 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
257 (unsigned long)(-1), 0); 257 (unsigned long)(-1), 0);
258 258
259 if (unlikely(entry == DMA_ERROR_CODE)) 259 if (unlikely(entry == IOMMU_ERROR_CODE))
260 goto bad; 260 goto bad;
261 261
262 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 262 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
@@ -288,7 +288,7 @@ bad:
288 return DMA_ERROR_CODE; 288 return DMA_ERROR_CODE;
289 289
290iommu_map_fail: 290iommu_map_fail:
291 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); 291 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
292 return DMA_ERROR_CODE; 292 return DMA_ERROR_CODE;
293} 293}
294 294
@@ -317,7 +317,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
317 bus_addr &= IO_PAGE_MASK; 317 bus_addr &= IO_PAGE_MASK;
318 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; 318 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
319 dma_4v_iommu_demap(&devhandle, entry, npages); 319 dma_4v_iommu_demap(&devhandle, entry, npages);
320 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); 320 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
321} 321}
322 322
323static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 323static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -376,7 +376,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
376 &handle, (unsigned long)(-1), 0); 376 &handle, (unsigned long)(-1), 0);
377 377
378 /* Handle failure */ 378 /* Handle failure */
379 if (unlikely(entry == DMA_ERROR_CODE)) { 379 if (unlikely(entry == IOMMU_ERROR_CODE)) {
380 if (printk_ratelimit()) 380 if (printk_ratelimit())
381 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 381 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
382 " npages %lx\n", iommu, paddr, npages); 382 " npages %lx\n", iommu, paddr, npages);
@@ -451,7 +451,7 @@ iommu_map_failed:
451 npages = iommu_num_pages(s->dma_address, s->dma_length, 451 npages = iommu_num_pages(s->dma_address, s->dma_length,
452 IO_PAGE_SIZE); 452 IO_PAGE_SIZE);
453 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 453 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
454 DMA_ERROR_CODE); 454 IOMMU_ERROR_CODE);
455 /* XXX demap? XXX */ 455 /* XXX demap? XXX */
456 s->dma_address = DMA_ERROR_CODE; 456 s->dma_address = DMA_ERROR_CODE;
457 s->dma_length = 0; 457 s->dma_length = 0;
@@ -496,7 +496,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
496 entry = ((dma_handle - tbl->table_map_base) >> shift); 496 entry = ((dma_handle - tbl->table_map_base) >> shift);
497 dma_4v_iommu_demap(&devhandle, entry, npages); 497 dma_4v_iommu_demap(&devhandle, entry, npages);
498 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, 498 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
499 DMA_ERROR_CODE); 499 IOMMU_ERROR_CODE);
500 sg = sg_next(sg); 500 sg = sg_next(sg);
501 } 501 }
502 502
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
index bbced83b32ee..376a27c9cc6a 100644
--- a/include/linux/iommu-common.h
+++ b/include/linux/iommu-common.h
@@ -7,6 +7,7 @@
7 7
8#define IOMMU_POOL_HASHBITS 4 8#define IOMMU_POOL_HASHBITS 4
9#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) 9#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
10#define IOMMU_ERROR_CODE (~(unsigned long) 0)
10 11
11struct iommu_pool { 12struct iommu_pool {
12 unsigned long start; 13 unsigned long start;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index ff19f66d3f7f..0f2c887be770 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -11,10 +11,6 @@
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/hash.h> 12#include <linux/hash.h>
13 13
14#ifndef DMA_ERROR_CODE
15#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
16#endif
17
18static unsigned long iommu_large_alloc = 15; 14static unsigned long iommu_large_alloc = 15;
19 15
20static DEFINE_PER_CPU(unsigned int, iommu_hash_common); 16static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
@@ -124,7 +120,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
124 /* Sanity check */ 120 /* Sanity check */
125 if (unlikely(npages == 0)) { 121 if (unlikely(npages == 0)) {
126 WARN_ON_ONCE(1); 122 WARN_ON_ONCE(1);
127 return DMA_ERROR_CODE; 123 return IOMMU_ERROR_CODE;
128 } 124 }
129 125
130 if (largealloc) { 126 if (largealloc) {
@@ -207,7 +203,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
207 goto again; 203 goto again;
208 } else { 204 } else {
209 /* give up */ 205 /* give up */
210 n = DMA_ERROR_CODE; 206 n = IOMMU_ERROR_CODE;
211 goto bail; 207 goto bail;
212 } 208 }
213 } 209 }
@@ -259,7 +255,7 @@ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
259 unsigned long flags; 255 unsigned long flags;
260 unsigned long shift = iommu->table_shift; 256 unsigned long shift = iommu->table_shift;
261 257
262 if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */ 258 if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
263 entry = (dma_addr - iommu->table_map_base) >> shift; 259 entry = (dma_addr - iommu->table_map_base) >> shift;
264 pool = get_pool(iommu, entry); 260 pool = get_pool(iommu, entry);
265 261