aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/pci_sun4v.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/pci_sun4v.c')
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c206
1 files changed, 132 insertions, 74 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index c8b6199a5dc4..ddca6c6c0b49 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -89,6 +89,17 @@ static long iommu_batch_flush(struct iommu_batch *p)
89 return 0; 89 return 0;
90} 90}
91 91
92static inline void iommu_batch_new_entry(unsigned long entry)
93{
94 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
95
96 if (p->entry + p->npages == entry)
97 return;
98 if (p->entry != ~0UL)
99 iommu_batch_flush(p);
100 p->entry = entry;
101}
102
92/* Interrupts must be disabled. */ 103/* Interrupts must be disabled. */
93static inline long iommu_batch_add(u64 phys_page) 104static inline long iommu_batch_add(u64 phys_page)
94{ 105{
@@ -320,88 +331,131 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
320static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 331static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
321 int nelems, enum dma_data_direction direction) 332 int nelems, enum dma_data_direction direction)
322{ 333{
323 unsigned long flags, npages, i, prot; 334 struct scatterlist *s, *outs, *segstart;
324 u32 dma_base, orig_dma_base; 335 unsigned long flags, handle, prot;
325 struct scatterlist *sg; 336 dma_addr_t dma_next = 0, dma_addr;
337 unsigned int max_seg_size;
338 int outcount, incount, i;
326 struct iommu *iommu; 339 struct iommu *iommu;
327 long entry, err; 340 long err;
328 341
329 /* Fast path single entry scatterlists. */ 342 BUG_ON(direction == DMA_NONE);
330 if (nelems == 1) {
331 sglist->dma_address =
332 dma_4v_map_single(dev, sg_virt(sglist),
333 sglist->length, direction);
334 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
335 return 0;
336 sglist->dma_length = sglist->length;
337 return 1;
338 }
339 343
340 iommu = dev->archdata.iommu; 344 iommu = dev->archdata.iommu;
345 if (nelems == 0 || !iommu)
346 return 0;
341 347
342 if (unlikely(direction == DMA_NONE)) 348 prot = HV_PCI_MAP_ATTR_READ;
343 goto bad; 349 if (direction != DMA_TO_DEVICE)
344 350 prot |= HV_PCI_MAP_ATTR_WRITE;
345 npages = calc_npages(sglist, nelems);
346 351
347 spin_lock_irqsave(&iommu->lock, flags); 352 outs = s = segstart = &sglist[0];
348 entry = iommu_range_alloc(dev, iommu, npages, NULL); 353 outcount = 1;
349 spin_unlock_irqrestore(&iommu->lock, flags); 354 incount = nelems;
355 handle = 0;
350 356
351 if (unlikely(entry == DMA_ERROR_CODE)) 357 /* Init first segment length for backout at failure */
352 goto bad; 358 outs->dma_length = 0;
353 359
354 orig_dma_base = dma_base = iommu->page_table_map_base + 360 spin_lock_irqsave(&iommu->lock, flags);
355 (entry << IO_PAGE_SHIFT);
356 361
357 prot = HV_PCI_MAP_ATTR_READ; 362 iommu_batch_start(dev, prot, ~0UL);
358 if (direction != DMA_TO_DEVICE)
359 prot |= HV_PCI_MAP_ATTR_WRITE;
360 363
361 local_irq_save(flags); 364 max_seg_size = dma_get_max_seg_size(dev);
365 for_each_sg(sglist, s, nelems, i) {
366 unsigned long paddr, npages, entry, slen;
362 367
363 iommu_batch_start(dev, prot, entry); 368 slen = s->length;
369 /* Sanity check */
370 if (slen == 0) {
371 dma_next = 0;
372 continue;
373 }
374 /* Allocate iommu entries for that segment */
375 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
376 npages = iommu_num_pages(paddr, slen);
377 entry = iommu_range_alloc(dev, iommu, npages, &handle);
364 378
365 for_each_sg(sglist, sg, nelems, i) { 379 /* Handle failure */
366 unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg); 380 if (unlikely(entry == DMA_ERROR_CODE)) {
367 unsigned long slen = sg->length; 381 if (printk_ratelimit())
368 unsigned long this_npages; 382 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
383 " npages %lx\n", iommu, paddr, npages);
384 goto iommu_map_failed;
385 }
369 386
370 this_npages = iommu_num_pages(paddr, slen); 387 iommu_batch_new_entry(entry);
371 388
372 sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK); 389 /* Convert entry to a dma_addr_t */
373 sg->dma_length = slen; 390 dma_addr = iommu->page_table_map_base +
391 (entry << IO_PAGE_SHIFT);
392 dma_addr |= (s->offset & ~IO_PAGE_MASK);
374 393
394 /* Insert into HW table */
375 paddr &= IO_PAGE_MASK; 395 paddr &= IO_PAGE_MASK;
376 while (this_npages--) { 396 while (npages--) {
377 err = iommu_batch_add(paddr); 397 err = iommu_batch_add(paddr);
378 if (unlikely(err < 0L)) { 398 if (unlikely(err < 0L))
379 local_irq_restore(flags);
380 goto iommu_map_failed; 399 goto iommu_map_failed;
400 paddr += IO_PAGE_SIZE;
401 }
402
403 /* If we are in an open segment, try merging */
404 if (segstart != s) {
405 /* We cannot merge if:
406 * - allocated dma_addr isn't contiguous to previous allocation
407 */
408 if ((dma_addr != dma_next) ||
409 (outs->dma_length + s->length > max_seg_size)) {
410 /* Can't merge: create a new segment */
411 segstart = s;
412 outcount++;
413 outs = sg_next(outs);
414 } else {
415 outs->dma_length += s->length;
381 } 416 }
417 }
382 418
383 paddr += IO_PAGE_SIZE; 419 if (segstart == s) {
384 dma_base += IO_PAGE_SIZE; 420 /* This is a new segment, fill entries */
421 outs->dma_address = dma_addr;
422 outs->dma_length = slen;
385 } 423 }
424
425 /* Calculate next page pointer for contiguous check */
426 dma_next = dma_addr + slen;
386 } 427 }
387 428
388 err = iommu_batch_end(); 429 err = iommu_batch_end();
389 430
390 local_irq_restore(flags);
391
392 if (unlikely(err < 0L)) 431 if (unlikely(err < 0L))
393 goto iommu_map_failed; 432 goto iommu_map_failed;
394 433
395 return nelems; 434 spin_unlock_irqrestore(&iommu->lock, flags);
396 435
397bad: 436 if (outcount < incount) {
398 if (printk_ratelimit()) 437 outs = sg_next(outs);
399 WARN_ON(1); 438 outs->dma_address = DMA_ERROR_CODE;
400 return 0; 439 outs->dma_length = 0;
440 }
441
442 return outcount;
401 443
402iommu_map_failed: 444iommu_map_failed:
403 spin_lock_irqsave(&iommu->lock, flags); 445 for_each_sg(sglist, s, nelems, i) {
404 iommu_range_free(iommu, orig_dma_base, npages); 446 if (s->dma_length != 0) {
447 unsigned long vaddr, npages;
448
449 vaddr = s->dma_address & IO_PAGE_MASK;
450 npages = iommu_num_pages(s->dma_address, s->dma_length);
451 iommu_range_free(iommu, vaddr, npages);
452 /* XXX demap? XXX */
453 s->dma_address = DMA_ERROR_CODE;
454 s->dma_length = 0;
455 }
456 if (s == outs)
457 break;
458 }
405 spin_unlock_irqrestore(&iommu->lock, flags); 459 spin_unlock_irqrestore(&iommu->lock, flags);
406 460
407 return 0; 461 return 0;
@@ -410,39 +464,43 @@ iommu_map_failed:
410static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 464static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
411 int nelems, enum dma_data_direction direction) 465 int nelems, enum dma_data_direction direction)
412{ 466{
413 unsigned long flags, npages;
414 struct pci_pbm_info *pbm; 467 struct pci_pbm_info *pbm;
415 u32 devhandle, bus_addr; 468 struct scatterlist *sg;
416 struct iommu *iommu; 469 struct iommu *iommu;
417 long entry; 470 unsigned long flags;
471 u32 devhandle;
418 472
419 if (unlikely(direction == DMA_NONE)) { 473 BUG_ON(direction == DMA_NONE);
420 if (printk_ratelimit())
421 WARN_ON(1);
422 }
423 474
424 iommu = dev->archdata.iommu; 475 iommu = dev->archdata.iommu;
425 pbm = dev->archdata.host_controller; 476 pbm = dev->archdata.host_controller;
426 devhandle = pbm->devhandle; 477 devhandle = pbm->devhandle;
427 478
428 bus_addr = sglist->dma_address & IO_PAGE_MASK;
429
430 npages = calc_npages(sglist, nelems);
431
432 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
433
434 spin_lock_irqsave(&iommu->lock, flags); 479 spin_lock_irqsave(&iommu->lock, flags);
435 480
436 iommu_range_free(iommu, bus_addr, npages); 481 sg = sglist;
437 482 while (nelems--) {
438 do { 483 dma_addr_t dma_handle = sg->dma_address;
439 unsigned long num; 484 unsigned int len = sg->dma_length;
485 unsigned long npages, entry;
486
487 if (!len)
488 break;
489 npages = iommu_num_pages(dma_handle, len);
490 iommu_range_free(iommu, dma_handle, npages);
491
492 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
493 while (npages) {
494 unsigned long num;
495
496 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
497 npages);
498 entry += num;
499 npages -= num;
500 }
440 501
441 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 502 sg = sg_next(sg);
442 npages); 503 }
443 entry += num;
444 npages -= num;
445 } while (npages != 0);
446 504
447 spin_unlock_irqrestore(&iommu->lock, flags); 505 spin_unlock_irqrestore(&iommu->lock, flags);
448} 506}