diff options
Diffstat (limited to 'arch/sparc/kernel/pci_sun4v.c')
-rw-r--r-- | arch/sparc/kernel/pci_sun4v.c | 183 |
1 files changed, 82 insertions, 101 deletions
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 47ddbd496a1e..d2fe57dad433 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
16 | #include <linux/log2.h> | 16 | #include <linux/log2.h> |
17 | #include <linux/of_device.h> | 17 | #include <linux/of_device.h> |
18 | #include <linux/iommu-common.h> | ||
18 | 19 | ||
19 | #include <asm/iommu.h> | 20 | #include <asm/iommu.h> |
20 | #include <asm/irq.h> | 21 | #include <asm/irq.h> |
@@ -155,15 +156,13 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
155 | 156 | ||
156 | iommu = dev->archdata.iommu; | 157 | iommu = dev->archdata.iommu; |
157 | 158 | ||
158 | spin_lock_irqsave(&iommu->lock, flags); | 159 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
159 | entry = iommu_range_alloc(dev, iommu, npages, NULL); | 160 | (unsigned long)(-1), 0); |
160 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
161 | 161 | ||
162 | if (unlikely(entry == DMA_ERROR_CODE)) | 162 | if (unlikely(entry == DMA_ERROR_CODE)) |
163 | goto range_alloc_fail; | 163 | goto range_alloc_fail; |
164 | 164 | ||
165 | *dma_addrp = (iommu->page_table_map_base + | 165 | *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); |
166 | (entry << IO_PAGE_SHIFT)); | ||
167 | ret = (void *) first_page; | 166 | ret = (void *) first_page; |
168 | first_page = __pa(first_page); | 167 | first_page = __pa(first_page); |
169 | 168 | ||
@@ -188,45 +187,46 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
188 | return ret; | 187 | return ret; |
189 | 188 | ||
190 | iommu_map_fail: | 189 | iommu_map_fail: |
191 | /* Interrupts are disabled. */ | 190 | iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); |
192 | spin_lock(&iommu->lock); | ||
193 | iommu_range_free(iommu, *dma_addrp, npages); | ||
194 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
195 | 191 | ||
196 | range_alloc_fail: | 192 | range_alloc_fail: |
197 | free_pages(first_page, order); | 193 | free_pages(first_page, order); |
198 | return NULL; | 194 | return NULL; |
199 | } | 195 | } |
200 | 196 | ||
197 | static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, | ||
198 | unsigned long npages) | ||
199 | { | ||
200 | u32 devhandle = *(u32 *)demap_arg; | ||
201 | unsigned long num, flags; | ||
202 | |||
203 | local_irq_save(flags); | ||
204 | do { | ||
205 | num = pci_sun4v_iommu_demap(devhandle, | ||
206 | HV_PCI_TSBID(0, entry), | ||
207 | npages); | ||
208 | |||
209 | entry += num; | ||
210 | npages -= num; | ||
211 | } while (npages != 0); | ||
212 | local_irq_restore(flags); | ||
213 | } | ||
214 | |||
201 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | 215 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, |
202 | dma_addr_t dvma, struct dma_attrs *attrs) | 216 | dma_addr_t dvma, struct dma_attrs *attrs) |
203 | { | 217 | { |
204 | struct pci_pbm_info *pbm; | 218 | struct pci_pbm_info *pbm; |
205 | struct iommu *iommu; | 219 | struct iommu *iommu; |
206 | unsigned long flags, order, npages, entry; | 220 | unsigned long order, npages, entry; |
207 | u32 devhandle; | 221 | u32 devhandle; |
208 | 222 | ||
209 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 223 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
210 | iommu = dev->archdata.iommu; | 224 | iommu = dev->archdata.iommu; |
211 | pbm = dev->archdata.host_controller; | 225 | pbm = dev->archdata.host_controller; |
212 | devhandle = pbm->devhandle; | 226 | devhandle = pbm->devhandle; |
213 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | 227 | entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); |
214 | 228 | dma_4v_iommu_demap(&devhandle, entry, npages); | |
215 | spin_lock_irqsave(&iommu->lock, flags); | 229 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); |
216 | |||
217 | iommu_range_free(iommu, dvma, npages); | ||
218 | |||
219 | do { | ||
220 | unsigned long num; | ||
221 | |||
222 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
223 | npages); | ||
224 | entry += num; | ||
225 | npages -= num; | ||
226 | } while (npages != 0); | ||
227 | |||
228 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
229 | |||
230 | order = get_order(size); | 230 | order = get_order(size); |
231 | if (order < 10) | 231 | if (order < 10) |
232 | free_pages((unsigned long)cpu, order); | 232 | free_pages((unsigned long)cpu, order); |
@@ -253,15 +253,13 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | |||
253 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | 253 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); |
254 | npages >>= IO_PAGE_SHIFT; | 254 | npages >>= IO_PAGE_SHIFT; |
255 | 255 | ||
256 | spin_lock_irqsave(&iommu->lock, flags); | 256 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
257 | entry = iommu_range_alloc(dev, iommu, npages, NULL); | 257 | (unsigned long)(-1), 0); |
258 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
259 | 258 | ||
260 | if (unlikely(entry == DMA_ERROR_CODE)) | 259 | if (unlikely(entry == DMA_ERROR_CODE)) |
261 | goto bad; | 260 | goto bad; |
262 | 261 | ||
263 | bus_addr = (iommu->page_table_map_base + | 262 | bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); |
264 | (entry << IO_PAGE_SHIFT)); | ||
265 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | 263 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); |
266 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 264 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
267 | prot = HV_PCI_MAP_ATTR_READ; | 265 | prot = HV_PCI_MAP_ATTR_READ; |
@@ -290,11 +288,7 @@ bad: | |||
290 | return DMA_ERROR_CODE; | 288 | return DMA_ERROR_CODE; |
291 | 289 | ||
292 | iommu_map_fail: | 290 | iommu_map_fail: |
293 | /* Interrupts are disabled. */ | 291 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); |
294 | spin_lock(&iommu->lock); | ||
295 | iommu_range_free(iommu, bus_addr, npages); | ||
296 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
297 | |||
298 | return DMA_ERROR_CODE; | 292 | return DMA_ERROR_CODE; |
299 | } | 293 | } |
300 | 294 | ||
@@ -304,7 +298,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
304 | { | 298 | { |
305 | struct pci_pbm_info *pbm; | 299 | struct pci_pbm_info *pbm; |
306 | struct iommu *iommu; | 300 | struct iommu *iommu; |
307 | unsigned long flags, npages; | 301 | unsigned long npages; |
308 | long entry; | 302 | long entry; |
309 | u32 devhandle; | 303 | u32 devhandle; |
310 | 304 | ||
@@ -321,22 +315,9 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
321 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 315 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
322 | npages >>= IO_PAGE_SHIFT; | 316 | npages >>= IO_PAGE_SHIFT; |
323 | bus_addr &= IO_PAGE_MASK; | 317 | bus_addr &= IO_PAGE_MASK; |
324 | 318 | entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; | |
325 | spin_lock_irqsave(&iommu->lock, flags); | 319 | dma_4v_iommu_demap(&devhandle, entry, npages); |
326 | 320 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); | |
327 | iommu_range_free(iommu, bus_addr, npages); | ||
328 | |||
329 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | ||
330 | do { | ||
331 | unsigned long num; | ||
332 | |||
333 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
334 | npages); | ||
335 | entry += num; | ||
336 | npages -= num; | ||
337 | } while (npages != 0); | ||
338 | |||
339 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
340 | } | 321 | } |
341 | 322 | ||
342 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 323 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
@@ -371,14 +352,14 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
371 | /* Init first segment length for backout at failure */ | 352 | /* Init first segment length for backout at failure */ |
372 | outs->dma_length = 0; | 353 | outs->dma_length = 0; |
373 | 354 | ||
374 | spin_lock_irqsave(&iommu->lock, flags); | 355 | local_irq_save(flags); |
375 | 356 | ||
376 | iommu_batch_start(dev, prot, ~0UL); | 357 | iommu_batch_start(dev, prot, ~0UL); |
377 | 358 | ||
378 | max_seg_size = dma_get_max_seg_size(dev); | 359 | max_seg_size = dma_get_max_seg_size(dev); |
379 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 360 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
380 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; | 361 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; |
381 | base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; | 362 | base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; |
382 | for_each_sg(sglist, s, nelems, i) { | 363 | for_each_sg(sglist, s, nelems, i) { |
383 | unsigned long paddr, npages, entry, out_entry = 0, slen; | 364 | unsigned long paddr, npages, entry, out_entry = 0, slen; |
384 | 365 | ||
@@ -391,7 +372,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
391 | /* Allocate iommu entries for that segment */ | 372 | /* Allocate iommu entries for that segment */ |
392 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | 373 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); |
393 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); | 374 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); |
394 | entry = iommu_range_alloc(dev, iommu, npages, &handle); | 375 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, |
376 | &handle, (unsigned long)(-1), 0); | ||
395 | 377 | ||
396 | /* Handle failure */ | 378 | /* Handle failure */ |
397 | if (unlikely(entry == DMA_ERROR_CODE)) { | 379 | if (unlikely(entry == DMA_ERROR_CODE)) { |
@@ -404,8 +386,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
404 | iommu_batch_new_entry(entry); | 386 | iommu_batch_new_entry(entry); |
405 | 387 | ||
406 | /* Convert entry to a dma_addr_t */ | 388 | /* Convert entry to a dma_addr_t */ |
407 | dma_addr = iommu->page_table_map_base + | 389 | dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); |
408 | (entry << IO_PAGE_SHIFT); | ||
409 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | 390 | dma_addr |= (s->offset & ~IO_PAGE_MASK); |
410 | 391 | ||
411 | /* Insert into HW table */ | 392 | /* Insert into HW table */ |
@@ -451,7 +432,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
451 | if (unlikely(err < 0L)) | 432 | if (unlikely(err < 0L)) |
452 | goto iommu_map_failed; | 433 | goto iommu_map_failed; |
453 | 434 | ||
454 | spin_unlock_irqrestore(&iommu->lock, flags); | 435 | local_irq_restore(flags); |
455 | 436 | ||
456 | if (outcount < incount) { | 437 | if (outcount < incount) { |
457 | outs = sg_next(outs); | 438 | outs = sg_next(outs); |
@@ -469,7 +450,8 @@ iommu_map_failed: | |||
469 | vaddr = s->dma_address & IO_PAGE_MASK; | 450 | vaddr = s->dma_address & IO_PAGE_MASK; |
470 | npages = iommu_num_pages(s->dma_address, s->dma_length, | 451 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
471 | IO_PAGE_SIZE); | 452 | IO_PAGE_SIZE); |
472 | iommu_range_free(iommu, vaddr, npages); | 453 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, |
454 | DMA_ERROR_CODE); | ||
473 | /* XXX demap? XXX */ | 455 | /* XXX demap? XXX */ |
474 | s->dma_address = DMA_ERROR_CODE; | 456 | s->dma_address = DMA_ERROR_CODE; |
475 | s->dma_length = 0; | 457 | s->dma_length = 0; |
@@ -477,7 +459,7 @@ iommu_map_failed: | |||
477 | if (s == outs) | 459 | if (s == outs) |
478 | break; | 460 | break; |
479 | } | 461 | } |
480 | spin_unlock_irqrestore(&iommu->lock, flags); | 462 | local_irq_restore(flags); |
481 | 463 | ||
482 | return 0; | 464 | return 0; |
483 | } | 465 | } |
@@ -489,7 +471,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
489 | struct pci_pbm_info *pbm; | 471 | struct pci_pbm_info *pbm; |
490 | struct scatterlist *sg; | 472 | struct scatterlist *sg; |
491 | struct iommu *iommu; | 473 | struct iommu *iommu; |
492 | unsigned long flags; | 474 | unsigned long flags, entry; |
493 | u32 devhandle; | 475 | u32 devhandle; |
494 | 476 | ||
495 | BUG_ON(direction == DMA_NONE); | 477 | BUG_ON(direction == DMA_NONE); |
@@ -498,33 +480,27 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
498 | pbm = dev->archdata.host_controller; | 480 | pbm = dev->archdata.host_controller; |
499 | devhandle = pbm->devhandle; | 481 | devhandle = pbm->devhandle; |
500 | 482 | ||
501 | spin_lock_irqsave(&iommu->lock, flags); | 483 | local_irq_save(flags); |
502 | 484 | ||
503 | sg = sglist; | 485 | sg = sglist; |
504 | while (nelems--) { | 486 | while (nelems--) { |
505 | dma_addr_t dma_handle = sg->dma_address; | 487 | dma_addr_t dma_handle = sg->dma_address; |
506 | unsigned int len = sg->dma_length; | 488 | unsigned int len = sg->dma_length; |
507 | unsigned long npages, entry; | 489 | unsigned long npages; |
490 | struct iommu_map_table *tbl = &iommu->tbl; | ||
491 | unsigned long shift = IO_PAGE_SHIFT; | ||
508 | 492 | ||
509 | if (!len) | 493 | if (!len) |
510 | break; | 494 | break; |
511 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); | 495 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); |
512 | iommu_range_free(iommu, dma_handle, npages); | 496 | entry = ((dma_handle - tbl->table_map_base) >> shift); |
513 | 497 | dma_4v_iommu_demap(&devhandle, entry, npages); | |
514 | entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | 498 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, |
515 | while (npages) { | 499 | DMA_ERROR_CODE); |
516 | unsigned long num; | ||
517 | |||
518 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
519 | npages); | ||
520 | entry += num; | ||
521 | npages -= num; | ||
522 | } | ||
523 | |||
524 | sg = sg_next(sg); | 500 | sg = sg_next(sg); |
525 | } | 501 | } |
526 | 502 | ||
527 | spin_unlock_irqrestore(&iommu->lock, flags); | 503 | local_irq_restore(flags); |
528 | } | 504 | } |
529 | 505 | ||
530 | static struct dma_map_ops sun4v_dma_ops = { | 506 | static struct dma_map_ops sun4v_dma_ops = { |
@@ -550,30 +526,33 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) | |||
550 | } | 526 | } |
551 | 527 | ||
552 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | 528 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
553 | struct iommu *iommu) | 529 | struct iommu_map_table *iommu) |
554 | { | 530 | { |
555 | struct iommu_arena *arena = &iommu->arena; | 531 | struct iommu_pool *pool; |
556 | unsigned long i, cnt = 0; | 532 | unsigned long i, pool_nr, cnt = 0; |
557 | u32 devhandle; | 533 | u32 devhandle; |
558 | 534 | ||
559 | devhandle = pbm->devhandle; | 535 | devhandle = pbm->devhandle; |
560 | for (i = 0; i < arena->limit; i++) { | 536 | for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { |
561 | unsigned long ret, io_attrs, ra; | 537 | pool = &(iommu->pools[pool_nr]); |
562 | 538 | for (i = pool->start; i <= pool->end; i++) { | |
563 | ret = pci_sun4v_iommu_getmap(devhandle, | 539 | unsigned long ret, io_attrs, ra; |
564 | HV_PCI_TSBID(0, i), | 540 | |
565 | &io_attrs, &ra); | 541 | ret = pci_sun4v_iommu_getmap(devhandle, |
566 | if (ret == HV_EOK) { | 542 | HV_PCI_TSBID(0, i), |
567 | if (page_in_phys_avail(ra)) { | 543 | &io_attrs, &ra); |
568 | pci_sun4v_iommu_demap(devhandle, | 544 | if (ret == HV_EOK) { |
569 | HV_PCI_TSBID(0, i), 1); | 545 | if (page_in_phys_avail(ra)) { |
570 | } else { | 546 | pci_sun4v_iommu_demap(devhandle, |
571 | cnt++; | 547 | HV_PCI_TSBID(0, |
572 | __set_bit(i, arena->map); | 548 | i), 1); |
549 | } else { | ||
550 | cnt++; | ||
551 | __set_bit(i, iommu->map); | ||
552 | } | ||
573 | } | 553 | } |
574 | } | 554 | } |
575 | } | 555 | } |
576 | |||
577 | return cnt; | 556 | return cnt; |
578 | } | 557 | } |
579 | 558 | ||
@@ -603,20 +582,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | |||
603 | /* Setup initial software IOMMU state. */ | 582 | /* Setup initial software IOMMU state. */ |
604 | spin_lock_init(&iommu->lock); | 583 | spin_lock_init(&iommu->lock); |
605 | iommu->ctx_lowest_free = 1; | 584 | iommu->ctx_lowest_free = 1; |
606 | iommu->page_table_map_base = dma_offset; | 585 | iommu->tbl.table_map_base = dma_offset; |
607 | iommu->dma_addr_mask = dma_mask; | 586 | iommu->dma_addr_mask = dma_mask; |
608 | 587 | ||
609 | /* Allocate and initialize the free area map. */ | 588 | /* Allocate and initialize the free area map. */ |
610 | sz = (num_tsb_entries + 7) / 8; | 589 | sz = (num_tsb_entries + 7) / 8; |
611 | sz = (sz + 7UL) & ~7UL; | 590 | sz = (sz + 7UL) & ~7UL; |
612 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); | 591 | iommu->tbl.map = kzalloc(sz, GFP_KERNEL); |
613 | if (!iommu->arena.map) { | 592 | if (!iommu->tbl.map) { |
614 | printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); | 593 | printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); |
615 | return -ENOMEM; | 594 | return -ENOMEM; |
616 | } | 595 | } |
617 | iommu->arena.limit = num_tsb_entries; | 596 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, |
618 | 597 | NULL, false /* no large_pool */, | |
619 | sz = probe_existing_entries(pbm, iommu); | 598 | 0 /* default npools */, |
599 | false /* want span boundary checking */); | ||
600 | sz = probe_existing_entries(pbm, &iommu->tbl); | ||
620 | if (sz) | 601 | if (sz) |
621 | printk("%s: Imported %lu TSB entries from OBP\n", | 602 | printk("%s: Imported %lu TSB entries from OBP\n", |
622 | pbm->name, sz); | 603 | pbm->name, sz); |