aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2012-06-03 15:43:44 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-07-03 00:14:47 -0400
commit67ca141567519a6b0ec81850a7b6569b6d8c2b52 (patch)
tree204f16ef14591930529216357f39a4ed88ced14e /arch/powerpc/kernel/iommu.c
parent0e4bc95d87394364f408627067238453830bdbf3 (diff)
powerpc/iommu: Reduce spinlock coverage in iommu_free
This patch moves tce_free outside of the lock in iommu_free. Some performance numbers were obtained with a Chelsio T3 adapter on two POWER7 boxes, running a 100 session TCP round robin test. Performance improved 25% with this patch applied. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c51
1 files changed, 40 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 9c8967fa1e63..d855cfc0732d 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -190,10 +190,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
190 * not altered. 190 * not altered.
191 */ 191 */
192 if (unlikely(build_fail)) { 192 if (unlikely(build_fail)) {
193 spin_lock_irqsave(&(tbl->it_lock), flags);
194 __iommu_free(tbl, ret, npages); 193 __iommu_free(tbl, ret, npages);
195 spin_unlock_irqrestore(&(tbl->it_lock), flags);
196
197 return DMA_ERROR_CODE; 194 return DMA_ERROR_CODE;
198 } 195 }
199 196
@@ -207,8 +204,8 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
207 return ret; 204 return ret;
208} 205}
209 206
210static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 207static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
211 unsigned int npages) 208 unsigned int npages)
212{ 209{
213 unsigned long entry, free_entry; 210 unsigned long entry, free_entry;
214 211
@@ -228,21 +225,53 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
228 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); 225 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
229 WARN_ON(1); 226 WARN_ON(1);
230 } 227 }
231 return; 228
229 return false;
232 } 230 }
233 231
232 return true;
233}
234
235static void __iommu_free_locked(struct iommu_table *tbl, dma_addr_t dma_addr,
236 unsigned int npages)
237{
238 unsigned long entry, free_entry;
239
240 BUG_ON(!spin_is_locked(&tbl->it_lock));
241
242 entry = dma_addr >> IOMMU_PAGE_SHIFT;
243 free_entry = entry - tbl->it_offset;
244
245 if (!iommu_free_check(tbl, dma_addr, npages))
246 return;
247
234 ppc_md.tce_free(tbl, entry, npages); 248 ppc_md.tce_free(tbl, entry, npages);
235 bitmap_clear(tbl->it_map, free_entry, npages); 249 bitmap_clear(tbl->it_map, free_entry, npages);
236} 250}
237 251
238static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 252static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
239 unsigned int npages) 253 unsigned int npages)
240{ 254{
255 unsigned long entry, free_entry;
241 unsigned long flags; 256 unsigned long flags;
242 257
258 entry = dma_addr >> IOMMU_PAGE_SHIFT;
259 free_entry = entry - tbl->it_offset;
260
261 if (!iommu_free_check(tbl, dma_addr, npages))
262 return;
263
264 ppc_md.tce_free(tbl, entry, npages);
265
243 spin_lock_irqsave(&(tbl->it_lock), flags); 266 spin_lock_irqsave(&(tbl->it_lock), flags);
244 __iommu_free(tbl, dma_addr, npages); 267 bitmap_clear(tbl->it_map, free_entry, npages);
245 spin_unlock_irqrestore(&(tbl->it_lock), flags); 268 spin_unlock_irqrestore(&(tbl->it_lock), flags);
269}
270
271static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
272 unsigned int npages)
273{
274 __iommu_free(tbl, dma_addr, npages);
246 275
247 /* Make sure TLB cache is flushed if the HW needs it. We do 276 /* Make sure TLB cache is flushed if the HW needs it. We do
248 * not do an mb() here on purpose, it is not needed on any of 277 * not do an mb() here on purpose, it is not needed on any of
@@ -390,7 +419,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
390 vaddr = s->dma_address & IOMMU_PAGE_MASK; 419 vaddr = s->dma_address & IOMMU_PAGE_MASK;
391 npages = iommu_num_pages(s->dma_address, s->dma_length, 420 npages = iommu_num_pages(s->dma_address, s->dma_length,
392 IOMMU_PAGE_SIZE); 421 IOMMU_PAGE_SIZE);
393 __iommu_free(tbl, vaddr, npages); 422 __iommu_free_locked(tbl, vaddr, npages);
394 s->dma_address = DMA_ERROR_CODE; 423 s->dma_address = DMA_ERROR_CODE;
395 s->dma_length = 0; 424 s->dma_length = 0;
396 } 425 }
@@ -425,7 +454,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
425 break; 454 break;
426 npages = iommu_num_pages(dma_handle, sg->dma_length, 455 npages = iommu_num_pages(dma_handle, sg->dma_length,
427 IOMMU_PAGE_SIZE); 456 IOMMU_PAGE_SIZE);
428 __iommu_free(tbl, dma_handle, npages); 457 __iommu_free_locked(tbl, dma_handle, npages);
429 sg = sg_next(sg); 458 sg = sg_next(sg);
430 } 459 }
431 460