summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/rockchip-iommu.c
diff options
context:
space:
mode:
authorZhengShunQian <zhengsq@rock-chips.com>2016-01-19 02:03:00 -0500
committerJoerg Roedel <jroedel@suse.de>2016-01-29 06:26:06 -0500
commitcd6438c5f8446691afa4829fe1a9d7b656204f11 (patch)
tree6285d2c90cca63a6a6b2d68a766ce6bb20510567 /drivers/iommu/rockchip-iommu.c
parent92e963f50fc74041b5e9e744c330dca48e04f08d (diff)
iommu/rockchip: Reconstruct to support multi slaves
There are some IPs, such as video encoder/decoder, contains 2 slave iommus, one for reading and the other for writing. They share the same irq and clock with master. This patch reconstructs to support this case by making them share the same Page Directory, Page Tables and even the register operations. That means every instruction to the reading MMU registers would be duplicated to the writing MMU and vice versa. Signed-off-by: ZhengShunQian <zhengsq@rock-chips.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/rockchip-iommu.c')
-rw-r--r--drivers/iommu/rockchip-iommu.c214
1 files changed, 135 insertions, 79 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ebf0adb8e7ea..a6f593a0a29e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -86,7 +86,8 @@ struct rk_iommu_domain {
86 86
87struct rk_iommu { 87struct rk_iommu {
88 struct device *dev; 88 struct device *dev;
89 void __iomem *base; 89 void __iomem **bases;
90 int num_mmu;
90 int irq; 91 int irq;
91 struct list_head node; /* entry in rk_iommu_domain.iommus */ 92 struct list_head node; /* entry in rk_iommu_domain.iommus */
92 struct iommu_domain *domain; /* domain to which iommu is attached */ 93 struct iommu_domain *domain; /* domain to which iommu is attached */
@@ -271,47 +272,70 @@ static u32 rk_iova_page_offset(dma_addr_t iova)
271 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; 272 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
272} 273}
273 274
274static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) 275static u32 rk_iommu_read(void __iomem *base, u32 offset)
275{ 276{
276 return readl(iommu->base + offset); 277 return readl(base + offset);
277} 278}
278 279
279static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) 280static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
280{ 281{
281 writel(value, iommu->base + offset); 282 writel(value, base + offset);
282} 283}
283 284
284static void rk_iommu_command(struct rk_iommu *iommu, u32 command) 285static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
285{ 286{
286 writel(command, iommu->base + RK_MMU_COMMAND); 287 int i;
288
289 for (i = 0; i < iommu->num_mmu; i++)
290 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
287} 291}
288 292
293static void rk_iommu_base_command(void __iomem *base, u32 command)
294{
295 writel(command, base + RK_MMU_COMMAND);
296}
289static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, 297static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
290 size_t size) 298 size_t size)
291{ 299{
300 int i;
301
292 dma_addr_t iova_end = iova + size; 302 dma_addr_t iova_end = iova + size;
293 /* 303 /*
294 * TODO(djkurtz): Figure out when it is more efficient to shootdown the 304 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
295 * entire iotlb rather than iterate over individual iovas. 305 * entire iotlb rather than iterate over individual iovas.
296 */ 306 */
297 for (; iova < iova_end; iova += SPAGE_SIZE) 307 for (i = 0; i < iommu->num_mmu; i++)
298 rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); 308 for (; iova < iova_end; iova += SPAGE_SIZE)
309 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
299} 310}
300 311
301static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) 312static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
302{ 313{
303 return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; 314 bool active = true;
315 int i;
316
317 for (i = 0; i < iommu->num_mmu; i++)
318 active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
319 RK_MMU_STATUS_STALL_ACTIVE;
320
321 return active;
304} 322}
305 323
306static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) 324static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
307{ 325{
308 return rk_iommu_read(iommu, RK_MMU_STATUS) & 326 bool enable = true;
309 RK_MMU_STATUS_PAGING_ENABLED; 327 int i;
328
329 for (i = 0; i < iommu->num_mmu; i++)
330 enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
331 RK_MMU_STATUS_PAGING_ENABLED;
332
333 return enable;
310} 334}
311 335
312static int rk_iommu_enable_stall(struct rk_iommu *iommu) 336static int rk_iommu_enable_stall(struct rk_iommu *iommu)
313{ 337{
314 int ret; 338 int ret, i;
315 339
316 if (rk_iommu_is_stall_active(iommu)) 340 if (rk_iommu_is_stall_active(iommu))
317 return 0; 341 return 0;
@@ -324,15 +348,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
324 348
325 ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); 349 ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
326 if (ret) 350 if (ret)
327 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", 351 for (i = 0; i < iommu->num_mmu; i++)
328 rk_iommu_read(iommu, RK_MMU_STATUS)); 352 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
353 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
329 354
330 return ret; 355 return ret;
331} 356}
332 357
333static int rk_iommu_disable_stall(struct rk_iommu *iommu) 358static int rk_iommu_disable_stall(struct rk_iommu *iommu)
334{ 359{
335 int ret; 360 int ret, i;
336 361
337 if (!rk_iommu_is_stall_active(iommu)) 362 if (!rk_iommu_is_stall_active(iommu))
338 return 0; 363 return 0;
@@ -341,15 +366,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
341 366
342 ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); 367 ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
343 if (ret) 368 if (ret)
344 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", 369 for (i = 0; i < iommu->num_mmu; i++)
345 rk_iommu_read(iommu, RK_MMU_STATUS)); 370 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
371 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
346 372
347 return ret; 373 return ret;
348} 374}
349 375
350static int rk_iommu_enable_paging(struct rk_iommu *iommu) 376static int rk_iommu_enable_paging(struct rk_iommu *iommu)
351{ 377{
352 int ret; 378 int ret, i;
353 379
354 if (rk_iommu_is_paging_enabled(iommu)) 380 if (rk_iommu_is_paging_enabled(iommu))
355 return 0; 381 return 0;
@@ -358,15 +384,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
358 384
359 ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); 385 ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
360 if (ret) 386 if (ret)
361 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", 387 for (i = 0; i < iommu->num_mmu; i++)
362 rk_iommu_read(iommu, RK_MMU_STATUS)); 388 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
389 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
363 390
364 return ret; 391 return ret;
365} 392}
366 393
367static int rk_iommu_disable_paging(struct rk_iommu *iommu) 394static int rk_iommu_disable_paging(struct rk_iommu *iommu)
368{ 395{
369 int ret; 396 int ret, i;
370 397
371 if (!rk_iommu_is_paging_enabled(iommu)) 398 if (!rk_iommu_is_paging_enabled(iommu))
372 return 0; 399 return 0;
@@ -375,41 +402,49 @@ static int rk_iommu_disable_paging(struct rk_iommu *iommu)
375 402
376 ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); 403 ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
377 if (ret) 404 if (ret)
378 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", 405 for (i = 0; i < iommu->num_mmu; i++)
379 rk_iommu_read(iommu, RK_MMU_STATUS)); 406 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
407 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
380 408
381 return ret; 409 return ret;
382} 410}
383 411
384static int rk_iommu_force_reset(struct rk_iommu *iommu) 412static int rk_iommu_force_reset(struct rk_iommu *iommu)
385{ 413{
386 int ret; 414 int ret, i;
387 u32 dte_addr; 415 u32 dte_addr;
388 416
389 /* 417 /*
390 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY 418 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
391 * and verifying that upper 5 nybbles are read back. 419 * and verifying that upper 5 nybbles are read back.
392 */ 420 */
393 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); 421 for (i = 0; i < iommu->num_mmu; i++) {
422 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
394 423
395 dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); 424 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
396 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { 425 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
397 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); 426 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
398 return -EFAULT; 427 return -EFAULT;
428 }
399 } 429 }
400 430
401 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); 431 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
402 432
403 ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, 433 for (i = 0; i < iommu->num_mmu; i++) {
404 FORCE_RESET_TIMEOUT); 434 ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
405 if (ret) 435 FORCE_RESET_TIMEOUT);
406 dev_err(iommu->dev, "FORCE_RESET command timed out\n"); 436 if (ret) {
437 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
438 return ret;
439 }
440 }
407 441
408 return ret; 442 return 0;
409} 443}
410 444
411static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) 445static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
412{ 446{
447 void __iomem *base = iommu->bases[index];
413 u32 dte_index, pte_index, page_offset; 448 u32 dte_index, pte_index, page_offset;
414 u32 mmu_dte_addr; 449 u32 mmu_dte_addr;
415 phys_addr_t mmu_dte_addr_phys, dte_addr_phys; 450 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
@@ -425,7 +460,7 @@ static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
425 pte_index = rk_iova_pte_index(iova); 460 pte_index = rk_iova_pte_index(iova);
426 page_offset = rk_iova_page_offset(iova); 461 page_offset = rk_iova_page_offset(iova);
427 462
428 mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); 463 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
429 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; 464 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
430 465
431 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); 466 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
@@ -460,51 +495,56 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
460 u32 status; 495 u32 status;
461 u32 int_status; 496 u32 int_status;
462 dma_addr_t iova; 497 dma_addr_t iova;
498 irqreturn_t ret = IRQ_NONE;
499 int i;
463 500
464 int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); 501 for (i = 0; i < iommu->num_mmu; i++) {
465 if (int_status == 0) 502 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
466 return IRQ_NONE; 503 if (int_status == 0)
504 continue;
467 505
468 iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); 506 ret = IRQ_HANDLED;
507 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
469 508
470 if (int_status & RK_MMU_IRQ_PAGE_FAULT) { 509 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
471 int flags; 510 int flags;
472 511
473 status = rk_iommu_read(iommu, RK_MMU_STATUS); 512 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
474 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? 513 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
475 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; 514 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
476 515
477 dev_err(iommu->dev, "Page fault at %pad of type %s\n", 516 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
478 &iova, 517 &iova,
479 (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); 518 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
480 519
481 log_iova(iommu, iova); 520 log_iova(iommu, i, iova);
482 521
483 /* 522 /*
484 * Report page fault to any installed handlers. 523 * Report page fault to any installed handlers.
485 * Ignore the return code, though, since we always zap cache 524 * Ignore the return code, though, since we always zap cache
486 * and clear the page fault anyway. 525 * and clear the page fault anyway.
487 */ 526 */
488 if (iommu->domain) 527 if (iommu->domain)
489 report_iommu_fault(iommu->domain, iommu->dev, iova, 528 report_iommu_fault(iommu->domain, iommu->dev, iova,
490 flags); 529 flags);
491 else 530 else
492 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); 531 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
493 532
494 rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); 533 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
495 rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); 534 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
496 } 535 }
497 536
498 if (int_status & RK_MMU_IRQ_BUS_ERROR) 537 if (int_status & RK_MMU_IRQ_BUS_ERROR)
499 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); 538 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
500 539
501 if (int_status & ~RK_MMU_IRQ_MASK) 540 if (int_status & ~RK_MMU_IRQ_MASK)
502 dev_err(iommu->dev, "unexpected int_status: %#08x\n", 541 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
503 int_status); 542 int_status);
504 543
505 rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); 544 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
545 }
506 546
507 return IRQ_HANDLED; 547 return ret;
508} 548}
509 549
510static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, 550static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -746,7 +786,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
746 struct rk_iommu *iommu; 786 struct rk_iommu *iommu;
747 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 787 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
748 unsigned long flags; 788 unsigned long flags;
749 int ret; 789 int ret, i;
750 phys_addr_t dte_addr; 790 phys_addr_t dte_addr;
751 791
752 /* 792 /*
@@ -773,9 +813,11 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
773 return ret; 813 return ret;
774 814
775 dte_addr = virt_to_phys(rk_domain->dt); 815 dte_addr = virt_to_phys(rk_domain->dt);
776 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); 816 for (i = 0; i < iommu->num_mmu; i++) {
777 rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); 817 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
778 rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); 818 rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
819 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
820 }
779 821
780 ret = rk_iommu_enable_paging(iommu); 822 ret = rk_iommu_enable_paging(iommu);
781 if (ret) 823 if (ret)
@@ -798,6 +840,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
798 struct rk_iommu *iommu; 840 struct rk_iommu *iommu;
799 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 841 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
800 unsigned long flags; 842 unsigned long flags;
843 int i;
801 844
802 /* Allow 'virtual devices' (eg drm) to detach from domain */ 845 /* Allow 'virtual devices' (eg drm) to detach from domain */
803 iommu = rk_iommu_from_dev(dev); 846 iommu = rk_iommu_from_dev(dev);
@@ -811,8 +854,10 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
811 /* Ignore error while disabling, just keep going */ 854 /* Ignore error while disabling, just keep going */
812 rk_iommu_enable_stall(iommu); 855 rk_iommu_enable_stall(iommu);
813 rk_iommu_disable_paging(iommu); 856 rk_iommu_disable_paging(iommu);
814 rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); 857 for (i = 0; i < iommu->num_mmu; i++) {
815 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); 858 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
859 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
860 }
816 rk_iommu_disable_stall(iommu); 861 rk_iommu_disable_stall(iommu);
817 862
818 devm_free_irq(dev, iommu->irq, iommu); 863 devm_free_irq(dev, iommu->irq, iommu);
@@ -988,6 +1033,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
988 struct device *dev = &pdev->dev; 1033 struct device *dev = &pdev->dev;
989 struct rk_iommu *iommu; 1034 struct rk_iommu *iommu;
990 struct resource *res; 1035 struct resource *res;
1036 int i;
991 1037
992 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); 1038 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
993 if (!iommu) 1039 if (!iommu)
@@ -995,11 +1041,21 @@ static int rk_iommu_probe(struct platform_device *pdev)
995 1041
996 platform_set_drvdata(pdev, iommu); 1042 platform_set_drvdata(pdev, iommu);
997 iommu->dev = dev; 1043 iommu->dev = dev;
1044 iommu->num_mmu = 0;
1045 iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu,
1046 GFP_KERNEL);
1047 if (!iommu->bases)
1048 return -ENOMEM;
998 1049
999 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1050 for (i = 0; i < pdev->num_resources; i++) {
1000 iommu->base = devm_ioremap_resource(&pdev->dev, res); 1051 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1001 if (IS_ERR(iommu->base)) 1052 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1002 return PTR_ERR(iommu->base); 1053 if (IS_ERR(iommu->bases[i]))
1054 continue;
1055 iommu->num_mmu++;
1056 }
1057 if (iommu->num_mmu == 0)
1058 return PTR_ERR(iommu->bases[0]);
1003 1059
1004 iommu->irq = platform_get_irq(pdev, 0); 1060 iommu->irq = platform_get_irq(pdev, 0);
1005 if (iommu->irq < 0) { 1061 if (iommu->irq < 0) {