aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/pci_sun4v.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-04-18 15:31:25 -0400
committerDavid S. Miller <davem@davemloft.net>2015-04-18 15:31:25 -0400
commitc12f048ffdf3a5802239426dc290290929268dc9 (patch)
tree89fa401142c58061744653492ca8b720d5b2d7b5 /arch/sparc/kernel/pci_sun4v.c
parent04b7fe6a4a231871ef681bc95e08fe66992f7b1f (diff)
sparc: Revert generic IOMMU allocator.
I applied the wrong version of this patch series, V4 instead of V10, due to a patchwork bundling snafu. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/pci_sun4v.c')
-rw-r--r--arch/sparc/kernel/pci_sun4v.c193
1 files changed, 99 insertions, 94 deletions
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 9b76b9d639e1..47ddbd496a1e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -15,8 +15,6 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/of_device.h> 17#include <linux/of_device.h>
18#include <linux/hash.h>
19#include <linux/iommu-common.h>
20 18
21#include <asm/iommu.h> 19#include <asm/iommu.h>
22#include <asm/irq.h> 20#include <asm/irq.h>
@@ -30,7 +28,6 @@
30 28
31#define DRIVER_NAME "pci_sun4v" 29#define DRIVER_NAME "pci_sun4v"
32#define PFX DRIVER_NAME ": " 30#define PFX DRIVER_NAME ": "
33static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
34 31
35static unsigned long vpci_major = 1; 32static unsigned long vpci_major = 1;
36static unsigned long vpci_minor = 1; 33static unsigned long vpci_minor = 1;
@@ -158,13 +155,14 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
158 155
159 iommu = dev->archdata.iommu; 156 iommu = dev->archdata.iommu;
160 157
161 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 158 spin_lock_irqsave(&iommu->lock, flags);
162 __this_cpu_read(iommu_pool_hash)); 159 entry = iommu_range_alloc(dev, iommu, npages, NULL);
160 spin_unlock_irqrestore(&iommu->lock, flags);
163 161
164 if (unlikely(entry == DMA_ERROR_CODE)) 162 if (unlikely(entry == DMA_ERROR_CODE))
165 goto range_alloc_fail; 163 goto range_alloc_fail;
166 164
167 *dma_addrp = (iommu->tbl.page_table_map_base + 165 *dma_addrp = (iommu->page_table_map_base +
168 (entry << IO_PAGE_SHIFT)); 166 (entry << IO_PAGE_SHIFT));
169 ret = (void *) first_page; 167 ret = (void *) first_page;
170 first_page = __pa(first_page); 168 first_page = __pa(first_page);
@@ -190,46 +188,45 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
190 return ret; 188 return ret;
191 189
192iommu_map_fail: 190iommu_map_fail:
193 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, false, NULL); 191 /* Interrupts are disabled. */
192 spin_lock(&iommu->lock);
193 iommu_range_free(iommu, *dma_addrp, npages);
194 spin_unlock_irqrestore(&iommu->lock, flags);
194 195
195range_alloc_fail: 196range_alloc_fail:
196 free_pages(first_page, order); 197 free_pages(first_page, order);
197 return NULL; 198 return NULL;
198} 199}
199 200
200static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
201 unsigned long npages)
202{
203 u32 devhandle = *(u32 *)demap_arg;
204 unsigned long num, flags;
205
206 local_irq_save(flags);
207 do {
208 num = pci_sun4v_iommu_demap(devhandle,
209 HV_PCI_TSBID(0, entry),
210 npages);
211
212 entry += num;
213 npages -= num;
214 } while (npages != 0);
215 local_irq_restore(flags);
216}
217
218static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, 201static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
219 dma_addr_t dvma, struct dma_attrs *attrs) 202 dma_addr_t dvma, struct dma_attrs *attrs)
220{ 203{
221 struct pci_pbm_info *pbm; 204 struct pci_pbm_info *pbm;
222 struct iommu *iommu; 205 struct iommu *iommu;
223 unsigned long order, npages, entry; 206 unsigned long flags, order, npages, entry;
224 u32 devhandle; 207 u32 devhandle;
225 208
226 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 209 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
227 iommu = dev->archdata.iommu; 210 iommu = dev->archdata.iommu;
228 pbm = dev->archdata.host_controller; 211 pbm = dev->archdata.host_controller;
229 devhandle = pbm->devhandle; 212 devhandle = pbm->devhandle;
230 entry = ((dvma - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT); 213 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
231 dma_4v_iommu_demap(&devhandle, entry, npages); 214
232 iommu_tbl_range_free(&iommu->tbl, dvma, npages, false, NULL); 215 spin_lock_irqsave(&iommu->lock, flags);
216
217 iommu_range_free(iommu, dvma, npages);
218
219 do {
220 unsigned long num;
221
222 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
223 npages);
224 entry += num;
225 npages -= num;
226 } while (npages != 0);
227
228 spin_unlock_irqrestore(&iommu->lock, flags);
229
233 order = get_order(size); 230 order = get_order(size);
234 if (order < 10) 231 if (order < 10)
235 free_pages((unsigned long)cpu, order); 232 free_pages((unsigned long)cpu, order);
@@ -256,13 +253,14 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
256 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 253 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
257 npages >>= IO_PAGE_SHIFT; 254 npages >>= IO_PAGE_SHIFT;
258 255
259 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 256 spin_lock_irqsave(&iommu->lock, flags);
260 __this_cpu_read(iommu_pool_hash)); 257 entry = iommu_range_alloc(dev, iommu, npages, NULL);
258 spin_unlock_irqrestore(&iommu->lock, flags);
261 259
262 if (unlikely(entry == DMA_ERROR_CODE)) 260 if (unlikely(entry == DMA_ERROR_CODE))
263 goto bad; 261 goto bad;
264 262
265 bus_addr = (iommu->tbl.page_table_map_base + 263 bus_addr = (iommu->page_table_map_base +
266 (entry << IO_PAGE_SHIFT)); 264 (entry << IO_PAGE_SHIFT));
267 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 265 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
268 base_paddr = __pa(oaddr & IO_PAGE_MASK); 266 base_paddr = __pa(oaddr & IO_PAGE_MASK);
@@ -292,7 +290,11 @@ bad:
292 return DMA_ERROR_CODE; 290 return DMA_ERROR_CODE;
293 291
294iommu_map_fail: 292iommu_map_fail:
295 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL); 293 /* Interrupts are disabled. */
294 spin_lock(&iommu->lock);
295 iommu_range_free(iommu, bus_addr, npages);
296 spin_unlock_irqrestore(&iommu->lock, flags);
297
296 return DMA_ERROR_CODE; 298 return DMA_ERROR_CODE;
297} 299}
298 300
@@ -302,7 +304,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
302{ 304{
303 struct pci_pbm_info *pbm; 305 struct pci_pbm_info *pbm;
304 struct iommu *iommu; 306 struct iommu *iommu;
305 unsigned long npages; 307 unsigned long flags, npages;
306 long entry; 308 long entry;
307 u32 devhandle; 309 u32 devhandle;
308 310
@@ -319,9 +321,22 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
319 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 321 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
320 npages >>= IO_PAGE_SHIFT; 322 npages >>= IO_PAGE_SHIFT;
321 bus_addr &= IO_PAGE_MASK; 323 bus_addr &= IO_PAGE_MASK;
322 entry = (bus_addr - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT; 324
323 dma_4v_iommu_demap(&devhandle, entry, npages); 325 spin_lock_irqsave(&iommu->lock, flags);
324 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL); 326
327 iommu_range_free(iommu, bus_addr, npages);
328
329 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
330 do {
331 unsigned long num;
332
333 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
334 npages);
335 entry += num;
336 npages -= num;
337 } while (npages != 0);
338
339 spin_unlock_irqrestore(&iommu->lock, flags);
325} 340}
326 341
327static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 342static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -356,14 +371,14 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
356 /* Init first segment length for backout at failure */ 371 /* Init first segment length for backout at failure */
357 outs->dma_length = 0; 372 outs->dma_length = 0;
358 373
359 local_irq_save(flags); 374 spin_lock_irqsave(&iommu->lock, flags);
360 375
361 iommu_batch_start(dev, prot, ~0UL); 376 iommu_batch_start(dev, prot, ~0UL);
362 377
363 max_seg_size = dma_get_max_seg_size(dev); 378 max_seg_size = dma_get_max_seg_size(dev);
364 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 379 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
365 IO_PAGE_SIZE) >> IO_PAGE_SHIFT; 380 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
366 base_shift = iommu->tbl.page_table_map_base >> IO_PAGE_SHIFT; 381 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
367 for_each_sg(sglist, s, nelems, i) { 382 for_each_sg(sglist, s, nelems, i) {
368 unsigned long paddr, npages, entry, out_entry = 0, slen; 383 unsigned long paddr, npages, entry, out_entry = 0, slen;
369 384
@@ -376,8 +391,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
376 /* Allocate iommu entries for that segment */ 391 /* Allocate iommu entries for that segment */
377 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 392 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
378 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 393 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
379 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, &handle, 394 entry = iommu_range_alloc(dev, iommu, npages, &handle);
380 __this_cpu_read(iommu_pool_hash));
381 395
382 /* Handle failure */ 396 /* Handle failure */
383 if (unlikely(entry == DMA_ERROR_CODE)) { 397 if (unlikely(entry == DMA_ERROR_CODE)) {
@@ -390,7 +404,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
390 iommu_batch_new_entry(entry); 404 iommu_batch_new_entry(entry);
391 405
392 /* Convert entry to a dma_addr_t */ 406 /* Convert entry to a dma_addr_t */
393 dma_addr = iommu->tbl.page_table_map_base + 407 dma_addr = iommu->page_table_map_base +
394 (entry << IO_PAGE_SHIFT); 408 (entry << IO_PAGE_SHIFT);
395 dma_addr |= (s->offset & ~IO_PAGE_MASK); 409 dma_addr |= (s->offset & ~IO_PAGE_MASK);
396 410
@@ -437,7 +451,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
437 if (unlikely(err < 0L)) 451 if (unlikely(err < 0L))
438 goto iommu_map_failed; 452 goto iommu_map_failed;
439 453
440 local_irq_restore(flags); 454 spin_unlock_irqrestore(&iommu->lock, flags);
441 455
442 if (outcount < incount) { 456 if (outcount < incount) {
443 outs = sg_next(outs); 457 outs = sg_next(outs);
@@ -455,8 +469,7 @@ iommu_map_failed:
455 vaddr = s->dma_address & IO_PAGE_MASK; 469 vaddr = s->dma_address & IO_PAGE_MASK;
456 npages = iommu_num_pages(s->dma_address, s->dma_length, 470 npages = iommu_num_pages(s->dma_address, s->dma_length,
457 IO_PAGE_SIZE); 471 IO_PAGE_SIZE);
458 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 472 iommu_range_free(iommu, vaddr, npages);
459 false, NULL);
460 /* XXX demap? XXX */ 473 /* XXX demap? XXX */
461 s->dma_address = DMA_ERROR_CODE; 474 s->dma_address = DMA_ERROR_CODE;
462 s->dma_length = 0; 475 s->dma_length = 0;
@@ -464,7 +477,7 @@ iommu_map_failed:
464 if (s == outs) 477 if (s == outs)
465 break; 478 break;
466 } 479 }
467 local_irq_restore(flags); 480 spin_unlock_irqrestore(&iommu->lock, flags);
468 481
469 return 0; 482 return 0;
470} 483}
@@ -476,7 +489,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
476 struct pci_pbm_info *pbm; 489 struct pci_pbm_info *pbm;
477 struct scatterlist *sg; 490 struct scatterlist *sg;
478 struct iommu *iommu; 491 struct iommu *iommu;
479 unsigned long flags, entry; 492 unsigned long flags;
480 u32 devhandle; 493 u32 devhandle;
481 494
482 BUG_ON(direction == DMA_NONE); 495 BUG_ON(direction == DMA_NONE);
@@ -485,27 +498,33 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
485 pbm = dev->archdata.host_controller; 498 pbm = dev->archdata.host_controller;
486 devhandle = pbm->devhandle; 499 devhandle = pbm->devhandle;
487 500
488 local_irq_save(flags); 501 spin_lock_irqsave(&iommu->lock, flags);
489 502
490 sg = sglist; 503 sg = sglist;
491 while (nelems--) { 504 while (nelems--) {
492 dma_addr_t dma_handle = sg->dma_address; 505 dma_addr_t dma_handle = sg->dma_address;
493 unsigned int len = sg->dma_length; 506 unsigned int len = sg->dma_length;
494 unsigned long npages; 507 unsigned long npages, entry;
495 struct iommu_table *tbl = &iommu->tbl;
496 unsigned long shift = IO_PAGE_SHIFT;
497 508
498 if (!len) 509 if (!len)
499 break; 510 break;
500 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 511 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
501 entry = ((dma_handle - tbl->page_table_map_base) >> shift); 512 iommu_range_free(iommu, dma_handle, npages);
502 dma_4v_iommu_demap(&devhandle, entry, npages); 513
503 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, 514 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
504 false, NULL); 515 while (npages) {
516 unsigned long num;
517
518 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
519 npages);
520 entry += num;
521 npages -= num;
522 }
523
505 sg = sg_next(sg); 524 sg = sg_next(sg);
506 } 525 }
507 526
508 local_irq_restore(flags); 527 spin_unlock_irqrestore(&iommu->lock, flags);
509} 528}
510 529
511static struct dma_map_ops sun4v_dma_ops = { 530static struct dma_map_ops sun4v_dma_ops = {
@@ -517,8 +536,6 @@ static struct dma_map_ops sun4v_dma_ops = {
517 .unmap_sg = dma_4v_unmap_sg, 536 .unmap_sg = dma_4v_unmap_sg,
518}; 537};
519 538
520static struct iommu_tbl_ops dma_4v_iommu_ops;
521
522static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) 539static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
523{ 540{
524 struct property *prop; 541 struct property *prop;
@@ -533,33 +550,30 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
533} 550}
534 551
535static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, 552static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
536 struct iommu_table *iommu) 553 struct iommu *iommu)
537{ 554{
538 struct iommu_pool *pool; 555 struct iommu_arena *arena = &iommu->arena;
539 unsigned long i, pool_nr, cnt = 0; 556 unsigned long i, cnt = 0;
540 u32 devhandle; 557 u32 devhandle;
541 558
542 devhandle = pbm->devhandle; 559 devhandle = pbm->devhandle;
543 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { 560 for (i = 0; i < arena->limit; i++) {
544 pool = &(iommu->arena_pool[pool_nr]); 561 unsigned long ret, io_attrs, ra;
545 for (i = pool->start; i <= pool->end; i++) { 562
546 unsigned long ret, io_attrs, ra; 563 ret = pci_sun4v_iommu_getmap(devhandle,
547 564 HV_PCI_TSBID(0, i),
548 ret = pci_sun4v_iommu_getmap(devhandle, 565 &io_attrs, &ra);
549 HV_PCI_TSBID(0, i), 566 if (ret == HV_EOK) {
550 &io_attrs, &ra); 567 if (page_in_phys_avail(ra)) {
551 if (ret == HV_EOK) { 568 pci_sun4v_iommu_demap(devhandle,
552 if (page_in_phys_avail(ra)) { 569 HV_PCI_TSBID(0, i), 1);
553 pci_sun4v_iommu_demap(devhandle, 570 } else {
554 HV_PCI_TSBID(0, 571 cnt++;
555 i), 1); 572 __set_bit(i, arena->map);
556 } else {
557 cnt++;
558 __set_bit(i, iommu->map);
559 }
560 } 573 }
561 } 574 }
562 } 575 }
576
563 return cnt; 577 return cnt;
564} 578}
565 579
@@ -587,22 +601,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
587 dma_offset = vdma[0]; 601 dma_offset = vdma[0];
588 602
589 /* Setup initial software IOMMU state. */ 603 /* Setup initial software IOMMU state. */
604 spin_lock_init(&iommu->lock);
590 iommu->ctx_lowest_free = 1; 605 iommu->ctx_lowest_free = 1;
591 iommu->tbl.page_table_map_base = dma_offset; 606 iommu->page_table_map_base = dma_offset;
592 iommu->dma_addr_mask = dma_mask; 607 iommu->dma_addr_mask = dma_mask;
593 608
594 /* Allocate and initialize the free area map. */ 609 /* Allocate and initialize the free area map. */
595 sz = (num_tsb_entries + 7) / 8; 610 sz = (num_tsb_entries + 7) / 8;
596 sz = (sz + 7UL) & ~7UL; 611 sz = (sz + 7UL) & ~7UL;
597 iommu->tbl.map = kzalloc(sz, GFP_KERNEL); 612 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
598 if (!iommu->tbl.map) { 613 if (!iommu->arena.map) {
599 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); 614 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
600 return -ENOMEM; 615 return -ENOMEM;
601 } 616 }
602 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, 617 iommu->arena.limit = num_tsb_entries;
603 &dma_4v_iommu_ops, false /* no large_pool */, 618
604 0 /* default npools */); 619 sz = probe_existing_entries(pbm, iommu);
605 sz = probe_existing_entries(pbm, &iommu->tbl);
606 if (sz) 620 if (sz)
607 printk("%s: Imported %lu TSB entries from OBP\n", 621 printk("%s: Imported %lu TSB entries from OBP\n",
608 pbm->name, sz); 622 pbm->name, sz);
@@ -1001,17 +1015,8 @@ static struct platform_driver pci_sun4v_driver = {
1001 .probe = pci_sun4v_probe, 1015 .probe = pci_sun4v_probe,
1002}; 1016};
1003 1017
1004static void setup_iommu_pool_hash(void)
1005{
1006 unsigned int i;
1007
1008 for_each_possible_cpu(i)
1009 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
1010}
1011
1012static int __init pci_sun4v_init(void) 1018static int __init pci_sun4v_init(void)
1013{ 1019{
1014 setup_iommu_pool_hash();
1015 return platform_driver_register(&pci_sun4v_driver); 1020 return platform_driver_register(&pci_sun4v_driver);
1016} 1021}
1017 1022