summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2015-06-05 02:35:09 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-06-11 01:16:15 -0400
commit0eaf4defc7c44ed5dd33a03cab12a5f88c9b4b86 (patch)
treeb95470bf0da461090ac3f8aa840110cdb0abb1ec /drivers/vfio
parentb348aa65297659c310943221ac1d3f4b4491ea44 (diff)
powerpc/spapr: vfio: Switch from iommu_table to new iommu_table_group
So far one TCE table could only be used by one IOMMU group. However IODA2 hardware allows programming the same TCE table address to multiple PE allowing sharing tables. This replaces a single pointer to a group in a iommu_table struct with a linked list of groups which provides the way of invalidating TCE cache for every PE when an actual TCE table is updated. This adds pnv_pci_link_table_and_group() and pnv_pci_unlink_table_and_group() helpers to manage the list. However without VFIO, it is still going to be a single IOMMU group per iommu_table. This changes iommu_add_device() to add a device to a first group from the group list of a table as it is only called from the platform init code or PCI bus notifier and at these moments there is only one group per table. This does not change TCE invalidation code to loop through all attached groups in order to simplify this patch and because it is not really needed in most cases. IODA2 is fixed in a later patch. This should cause no behavioural change. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> [aw: for the vfio related changes] Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c122
1 files changed, 86 insertions, 36 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c4bc345d64d7..ffc634a75dba 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -88,7 +88,7 @@ static void decrement_locked_vm(long npages)
88 */ 88 */
89struct tce_container { 89struct tce_container {
90 struct mutex lock; 90 struct mutex lock;
91 struct iommu_table *tbl; 91 struct iommu_group *grp;
92 bool enabled; 92 bool enabled;
93 unsigned long locked_pages; 93 unsigned long locked_pages;
94}; 94};
@@ -103,13 +103,42 @@ static bool tce_page_is_contained(struct page *page, unsigned page_shift)
103 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift; 103 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
104} 104}
105 105
106static long tce_iommu_find_table(struct tce_container *container,
107 phys_addr_t ioba, struct iommu_table **ptbl)
108{
109 long i;
110 struct iommu_table_group *table_group;
111
112 table_group = iommu_group_get_iommudata(container->grp);
113 if (!table_group)
114 return -1;
115
116 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
117 struct iommu_table *tbl = table_group->tables[i];
118
119 if (tbl) {
120 unsigned long entry = ioba >> tbl->it_page_shift;
121 unsigned long start = tbl->it_offset;
122 unsigned long end = start + tbl->it_size;
123
124 if ((start <= entry) && (entry < end)) {
125 *ptbl = tbl;
126 return i;
127 }
128 }
129 }
130
131 return -1;
132}
133
106static int tce_iommu_enable(struct tce_container *container) 134static int tce_iommu_enable(struct tce_container *container)
107{ 135{
108 int ret = 0; 136 int ret = 0;
109 unsigned long locked; 137 unsigned long locked;
110 struct iommu_table *tbl = container->tbl; 138 struct iommu_table *tbl;
139 struct iommu_table_group *table_group;
111 140
112 if (!container->tbl) 141 if (!container->grp)
113 return -ENXIO; 142 return -ENXIO;
114 143
115 if (!current->mm) 144 if (!current->mm)
@@ -143,6 +172,11 @@ static int tce_iommu_enable(struct tce_container *container)
143 * as this information is only available from KVM and VFIO is 172 * as this information is only available from KVM and VFIO is
144 * KVM agnostic. 173 * KVM agnostic.
145 */ 174 */
175 table_group = iommu_group_get_iommudata(container->grp);
176 if (!table_group)
177 return -ENODEV;
178
179 tbl = table_group->tables[0];
146 locked = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT; 180 locked = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT;
147 ret = try_increment_locked_vm(locked); 181 ret = try_increment_locked_vm(locked);
148 if (ret) 182 if (ret)
@@ -190,11 +224,10 @@ static void tce_iommu_release(void *iommu_data)
190{ 224{
191 struct tce_container *container = iommu_data; 225 struct tce_container *container = iommu_data;
192 226
193 WARN_ON(container->tbl && !container->tbl->it_table_group->group); 227 WARN_ON(container->grp);
194 228
195 if (container->tbl && container->tbl->it_table_group->group) 229 if (container->grp)
196 tce_iommu_detach_group(iommu_data, 230 tce_iommu_detach_group(iommu_data, container->grp);
197 container->tbl->it_table_group->group);
198 231
199 tce_iommu_disable(container); 232 tce_iommu_disable(container);
200 mutex_destroy(&container->lock); 233 mutex_destroy(&container->lock);
@@ -312,9 +345,16 @@ static long tce_iommu_ioctl(void *iommu_data,
312 345
313 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { 346 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
314 struct vfio_iommu_spapr_tce_info info; 347 struct vfio_iommu_spapr_tce_info info;
315 struct iommu_table *tbl = container->tbl; 348 struct iommu_table *tbl;
349 struct iommu_table_group *table_group;
350
351 if (WARN_ON(!container->grp))
352 return -ENXIO;
353
354 table_group = iommu_group_get_iommudata(container->grp);
316 355
317 if (WARN_ON(!tbl)) 356 tbl = table_group->tables[0];
357 if (WARN_ON_ONCE(!tbl))
318 return -ENXIO; 358 return -ENXIO;
319 359
320 minsz = offsetofend(struct vfio_iommu_spapr_tce_info, 360 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
@@ -337,17 +377,13 @@ static long tce_iommu_ioctl(void *iommu_data,
337 } 377 }
338 case VFIO_IOMMU_MAP_DMA: { 378 case VFIO_IOMMU_MAP_DMA: {
339 struct vfio_iommu_type1_dma_map param; 379 struct vfio_iommu_type1_dma_map param;
340 struct iommu_table *tbl = container->tbl; 380 struct iommu_table *tbl = NULL;
341 unsigned long tce; 381 unsigned long tce;
382 long num;
342 383
343 if (!container->enabled) 384 if (!container->enabled)
344 return -EPERM; 385 return -EPERM;
345 386
346 if (!tbl)
347 return -ENXIO;
348
349 BUG_ON(!tbl->it_table_group->group);
350
351 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); 387 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
352 388
353 if (copy_from_user(&param, (void __user *)arg, minsz)) 389 if (copy_from_user(&param, (void __user *)arg, minsz))
@@ -360,6 +396,10 @@ static long tce_iommu_ioctl(void *iommu_data,
360 VFIO_DMA_MAP_FLAG_WRITE)) 396 VFIO_DMA_MAP_FLAG_WRITE))
361 return -EINVAL; 397 return -EINVAL;
362 398
399 num = tce_iommu_find_table(container, param.iova, &tbl);
400 if (num < 0)
401 return -ENXIO;
402
363 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) || 403 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
364 (param.vaddr & ~IOMMU_PAGE_MASK(tbl))) 404 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
365 return -EINVAL; 405 return -EINVAL;
@@ -385,14 +425,12 @@ static long tce_iommu_ioctl(void *iommu_data,
385 } 425 }
386 case VFIO_IOMMU_UNMAP_DMA: { 426 case VFIO_IOMMU_UNMAP_DMA: {
387 struct vfio_iommu_type1_dma_unmap param; 427 struct vfio_iommu_type1_dma_unmap param;
388 struct iommu_table *tbl = container->tbl; 428 struct iommu_table *tbl = NULL;
429 long num;
389 430
390 if (!container->enabled) 431 if (!container->enabled)
391 return -EPERM; 432 return -EPERM;
392 433
393 if (WARN_ON(!tbl))
394 return -ENXIO;
395
396 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, 434 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
397 size); 435 size);
398 436
@@ -406,6 +444,10 @@ static long tce_iommu_ioctl(void *iommu_data,
406 if (param.flags) 444 if (param.flags)
407 return -EINVAL; 445 return -EINVAL;
408 446
447 num = tce_iommu_find_table(container, param.iova, &tbl);
448 if (num < 0)
449 return -ENXIO;
450
409 if (param.size & ~IOMMU_PAGE_MASK(tbl)) 451 if (param.size & ~IOMMU_PAGE_MASK(tbl))
410 return -EINVAL; 452 return -EINVAL;
411 453
@@ -434,12 +476,11 @@ static long tce_iommu_ioctl(void *iommu_data,
434 mutex_unlock(&container->lock); 476 mutex_unlock(&container->lock);
435 return 0; 477 return 0;
436 case VFIO_EEH_PE_OP: 478 case VFIO_EEH_PE_OP:
437 if (!container->tbl || !container->tbl->it_table_group->group) 479 if (!container->grp)
438 return -ENODEV; 480 return -ENODEV;
439 481
440 return vfio_spapr_iommu_eeh_ioctl( 482 return vfio_spapr_iommu_eeh_ioctl(container->grp,
441 container->tbl->it_table_group->group, 483 cmd, arg);
442 cmd, arg);
443 } 484 }
444 485
445 return -ENOTTY; 486 return -ENOTTY;
@@ -450,17 +491,15 @@ static int tce_iommu_attach_group(void *iommu_data,
450{ 491{
451 int ret; 492 int ret;
452 struct tce_container *container = iommu_data; 493 struct tce_container *container = iommu_data;
453 struct iommu_table *tbl = iommu_group_get_iommudata(iommu_group); 494 struct iommu_table_group *table_group;
454 495
455 BUG_ON(!tbl);
456 mutex_lock(&container->lock); 496 mutex_lock(&container->lock);
457 497
458 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", 498 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
459 iommu_group_id(iommu_group), iommu_group); */ 499 iommu_group_id(iommu_group), iommu_group); */
460 if (container->tbl) { 500 if (container->grp) {
461 pr_warn("tce_vfio: Only one group per IOMMU container is allowed, existing id=%d, attaching id=%d\n", 501 pr_warn("tce_vfio: Only one group per IOMMU container is allowed, existing id=%d, attaching id=%d\n",
462 iommu_group_id(container->tbl-> 502 iommu_group_id(container->grp),
463 it_table_group->group),
464 iommu_group_id(iommu_group)); 503 iommu_group_id(iommu_group));
465 ret = -EBUSY; 504 ret = -EBUSY;
466 goto unlock_exit; 505 goto unlock_exit;
@@ -473,9 +512,15 @@ static int tce_iommu_attach_group(void *iommu_data,
473 goto unlock_exit; 512 goto unlock_exit;
474 } 513 }
475 514
476 ret = iommu_take_ownership(tbl); 515 table_group = iommu_group_get_iommudata(iommu_group);
516 if (!table_group) {
517 ret = -ENXIO;
518 goto unlock_exit;
519 }
520
521 ret = iommu_take_ownership(table_group->tables[0]);
477 if (!ret) 522 if (!ret)
478 container->tbl = tbl; 523 container->grp = iommu_group;
479 524
480unlock_exit: 525unlock_exit:
481 mutex_unlock(&container->lock); 526 mutex_unlock(&container->lock);
@@ -487,26 +532,31 @@ static void tce_iommu_detach_group(void *iommu_data,
487 struct iommu_group *iommu_group) 532 struct iommu_group *iommu_group)
488{ 533{
489 struct tce_container *container = iommu_data; 534 struct tce_container *container = iommu_data;
490 struct iommu_table *tbl = iommu_group_get_iommudata(iommu_group); 535 struct iommu_table_group *table_group;
536 struct iommu_table *tbl;
491 537
492 BUG_ON(!tbl);
493 mutex_lock(&container->lock); 538 mutex_lock(&container->lock);
494 if (tbl != container->tbl) { 539 if (iommu_group != container->grp) {
495 pr_warn("tce_vfio: detaching group #%u, expected group is #%u\n", 540 pr_warn("tce_vfio: detaching group #%u, expected group is #%u\n",
496 iommu_group_id(iommu_group), 541 iommu_group_id(iommu_group),
497 iommu_group_id(tbl->it_table_group->group)); 542 iommu_group_id(container->grp));
498 goto unlock_exit; 543 goto unlock_exit;
499 } 544 }
500 545
501 if (container->enabled) { 546 if (container->enabled) {
502 pr_warn("tce_vfio: detaching group #%u from enabled container, forcing disable\n", 547 pr_warn("tce_vfio: detaching group #%u from enabled container, forcing disable\n",
503 iommu_group_id(tbl->it_table_group->group)); 548 iommu_group_id(container->grp));
504 tce_iommu_disable(container); 549 tce_iommu_disable(container);
505 } 550 }
506 551
507 /* pr_debug("tce_vfio: detaching group #%u from iommu %p\n", 552 /* pr_debug("tce_vfio: detaching group #%u from iommu %p\n",
508 iommu_group_id(iommu_group), iommu_group); */ 553 iommu_group_id(iommu_group), iommu_group); */
509 container->tbl = NULL; 554 container->grp = NULL;
555
556 table_group = iommu_group_get_iommudata(iommu_group);
557 BUG_ON(!table_group);
558
559 tbl = table_group->tables[0];
510 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); 560 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
511 iommu_release_ownership(tbl); 561 iommu_release_ownership(tbl);
512 562