aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/iommu.c15
1 files changed, 5 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 359f078571c7..9c8967fa1e63 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -170,13 +170,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
170 int build_fail; 170 int build_fail;
171 171
172 spin_lock_irqsave(&(tbl->it_lock), flags); 172 spin_lock_irqsave(&(tbl->it_lock), flags);
173
174 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); 173 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
174 spin_unlock_irqrestore(&(tbl->it_lock), flags);
175 175
176 if (unlikely(entry == DMA_ERROR_CODE)) { 176 if (unlikely(entry == DMA_ERROR_CODE))
177 spin_unlock_irqrestore(&(tbl->it_lock), flags);
178 return DMA_ERROR_CODE; 177 return DMA_ERROR_CODE;
179 }
180 178
181 entry += tbl->it_offset; /* Offset into real TCE table */ 179 entry += tbl->it_offset; /* Offset into real TCE table */
182 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 180 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
@@ -192,9 +190,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
192 * not altered. 190 * not altered.
193 */ 191 */
194 if (unlikely(build_fail)) { 192 if (unlikely(build_fail)) {
193 spin_lock_irqsave(&(tbl->it_lock), flags);
195 __iommu_free(tbl, ret, npages); 194 __iommu_free(tbl, ret, npages);
196
197 spin_unlock_irqrestore(&(tbl->it_lock), flags); 195 spin_unlock_irqrestore(&(tbl->it_lock), flags);
196
198 return DMA_ERROR_CODE; 197 return DMA_ERROR_CODE;
199 } 198 }
200 199
@@ -202,8 +201,6 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
202 if (ppc_md.tce_flush) 201 if (ppc_md.tce_flush)
203 ppc_md.tce_flush(tbl); 202 ppc_md.tce_flush(tbl);
204 203
205 spin_unlock_irqrestore(&(tbl->it_lock), flags);
206
207 /* Make sure updates are seen by hardware */ 204 /* Make sure updates are seen by hardware */
208 mb(); 205 mb();
209 206
@@ -244,8 +241,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
244 unsigned long flags; 241 unsigned long flags;
245 242
246 spin_lock_irqsave(&(tbl->it_lock), flags); 243 spin_lock_irqsave(&(tbl->it_lock), flags);
247
248 __iommu_free(tbl, dma_addr, npages); 244 __iommu_free(tbl, dma_addr, npages);
245 spin_unlock_irqrestore(&(tbl->it_lock), flags);
249 246
250 /* Make sure TLB cache is flushed if the HW needs it. We do 247 /* Make sure TLB cache is flushed if the HW needs it. We do
251 * not do an mb() here on purpose, it is not needed on any of 248 * not do an mb() here on purpose, it is not needed on any of
@@ -253,8 +250,6 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
253 */ 250 */
254 if (ppc_md.tce_flush) 251 if (ppc_md.tce_flush)
255 ppc_md.tce_flush(tbl); 252 ppc_md.tce_flush(tbl);
256
257 spin_unlock_irqrestore(&(tbl->it_lock), flags);
258} 253}
259 254
260int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 255int iommu_map_sg(struct device *dev, struct iommu_table *tbl,