diff options
author | Robert Jennings <rcj@linux.vnet.ibm.com> | 2008-07-23 14:31:16 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-25 01:44:43 -0400 |
commit | 6490c4903d12f242bec4454301f76f6a7520e399 (patch) | |
tree | 8b4bc5fb45618ed4af993da51985be2e42a39475 /arch/powerpc/kernel/iommu.c | |
parent | ffa5abbd0c399b32fc13a1b4718d87ee7a716999 (diff) |
powerpc/pseries: iommu enablement for CMO
To support Cooperative Memory Overcommitment (CMO), we need to check
for failure from some of the tce hcalls.
These changes for the pseries platform affect the powerpc architecture;
patches for the other affected platforms are included in this patch.
pSeries platform IOMMU code changes:
* platform TCE functions must handle H_NOT_ENOUGH_RESOURCES errors and
return an error.
Architecture IOMMU code changes:
* Calls to ppc_md.tce_build need to check return values and return
DMA_MAPPING_ERROR for transient errors.
Architecture changes:
* struct machdep_calls for tce_build*_pSeriesLP functions need to change
to indicate failure.
* all other platforms will need updates to iommu functions to match the new
calling semantics; they will return 0 on success. The other platforms
default configs have been built, but no further testing was performed.
Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 28 |
1 files changed, 23 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 2385f68c1751..550a19399bfa 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -49,6 +49,8 @@ static int novmerge = 1; | |||
49 | 49 | ||
50 | static int protect4gb = 1; | 50 | static int protect4gb = 1; |
51 | 51 | ||
52 | static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); | ||
53 | |||
52 | static inline unsigned long iommu_num_pages(unsigned long vaddr, | 54 | static inline unsigned long iommu_num_pages(unsigned long vaddr, |
53 | unsigned long slen) | 55 | unsigned long slen) |
54 | { | 56 | { |
@@ -191,6 +193,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
191 | { | 193 | { |
192 | unsigned long entry, flags; | 194 | unsigned long entry, flags; |
193 | dma_addr_t ret = DMA_ERROR_CODE; | 195 | dma_addr_t ret = DMA_ERROR_CODE; |
196 | int build_fail; | ||
194 | 197 | ||
195 | spin_lock_irqsave(&(tbl->it_lock), flags); | 198 | spin_lock_irqsave(&(tbl->it_lock), flags); |
196 | 199 | ||
@@ -205,9 +208,21 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
205 | ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ | 208 | ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ |
206 | 209 | ||
207 | /* Put the TCEs in the HW table */ | 210 | /* Put the TCEs in the HW table */ |
208 | ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, | 211 | build_fail = ppc_md.tce_build(tbl, entry, npages, |
209 | direction, attrs); | 212 | (unsigned long)page & IOMMU_PAGE_MASK, |
213 | direction, attrs); | ||
214 | |||
215 | /* ppc_md.tce_build() only returns non-zero for transient errors. | ||
216 | * Clean up the table bitmap in this case and return | ||
217 | * DMA_ERROR_CODE. For all other errors the functionality is | ||
218 | * not altered. | ||
219 | */ | ||
220 | if (unlikely(build_fail)) { | ||
221 | __iommu_free(tbl, ret, npages); | ||
210 | 222 | ||
223 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | ||
224 | return DMA_ERROR_CODE; | ||
225 | } | ||
211 | 226 | ||
212 | /* Flush/invalidate TLB caches if necessary */ | 227 | /* Flush/invalidate TLB caches if necessary */ |
213 | if (ppc_md.tce_flush) | 228 | if (ppc_md.tce_flush) |
@@ -276,7 +291,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
276 | dma_addr_t dma_next = 0, dma_addr; | 291 | dma_addr_t dma_next = 0, dma_addr; |
277 | unsigned long flags; | 292 | unsigned long flags; |
278 | struct scatterlist *s, *outs, *segstart; | 293 | struct scatterlist *s, *outs, *segstart; |
279 | int outcount, incount, i; | 294 | int outcount, incount, i, build_fail = 0; |
280 | unsigned int align; | 295 | unsigned int align; |
281 | unsigned long handle; | 296 | unsigned long handle; |
282 | unsigned int max_seg_size; | 297 | unsigned int max_seg_size; |
@@ -337,8 +352,11 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
337 | npages, entry, dma_addr); | 352 | npages, entry, dma_addr); |
338 | 353 | ||
339 | /* Insert into HW table */ | 354 | /* Insert into HW table */ |
340 | ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, | 355 | build_fail = ppc_md.tce_build(tbl, entry, npages, |
341 | direction, attrs); | 356 | vaddr & IOMMU_PAGE_MASK, |
357 | direction, attrs); | ||
358 | if(unlikely(build_fail)) | ||
359 | goto failure; | ||
342 | 360 | ||
343 | /* If we are in an open segment, try merging */ | 361 | /* If we are in an open segment, try merging */ |
344 | if (segstart != s) { | 362 | if (segstart != s) { |