aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-06 11:26:49 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-04-07 04:46:00 -0400
commit815b33fdc279d34ab40a8bfe1866623a4cc5669b (patch)
tree8639065da6160d51b75c2a9cde2d7d0574d4024b
parent11b6402c6673b530fac9920c5640c75e99fee956 (diff)
x86/amd-iommu: Cleanup completion-wait handling
This patch cleans up the implementation of completion-wait command sending. It also switches the completion indicator from the MMIO bit to a memory store which can be checked without IOMMU locking. As a side effect this patch makes the __iommu_queue_command function obsolete and so it is removed too. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/x86/kernel/amd_iommu.c107
1 files changed, 28 insertions, 79 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f8ec28ea3314..073c64b1994b 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -25,6 +25,7 @@
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/iommu-helper.h> 26#include <linux/iommu-helper.h>
27#include <linux/iommu.h> 27#include <linux/iommu.h>
28#include <linux/delay.h>
28#include <asm/proto.h> 29#include <asm/proto.h>
29#include <asm/iommu.h> 30#include <asm/iommu.h>
30#include <asm/gart.h> 31#include <asm/gart.h>
@@ -34,7 +35,7 @@
34 35
35#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 36#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
36 37
37#define EXIT_LOOP_COUNT 10000000 38#define LOOP_TIMEOUT 100000
38 39
39static DEFINE_RWLOCK(amd_iommu_devtable_lock); 40static DEFINE_RWLOCK(amd_iommu_devtable_lock);
40 41
@@ -383,10 +384,14 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
383 * 384 *
384 ****************************************************************************/ 385 ****************************************************************************/
385 386
386static void build_completion_wait(struct iommu_cmd *cmd) 387static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
387{ 388{
389 WARN_ON(address & 0x7ULL);
390
388 memset(cmd, 0, sizeof(*cmd)); 391 memset(cmd, 0, sizeof(*cmd));
389 cmd->data[0] = CMD_COMPL_WAIT_INT_MASK; 392 cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
393 cmd->data[1] = upper_32_bits(__pa(address));
394 cmd->data[2] = 1;
390 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); 395 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
391} 396}
392 397
@@ -432,12 +437,14 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
432 * Writes the command to the IOMMUs command buffer and informs the 437 * Writes the command to the IOMMUs command buffer and informs the
433 * hardware about the new command. Must be called with iommu->lock held. 438 * hardware about the new command. Must be called with iommu->lock held.
434 */ 439 */
435static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 440static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
436{ 441{
442 unsigned long flags;
437 u32 tail, head; 443 u32 tail, head;
438 u8 *target; 444 u8 *target;
439 445
440 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); 446 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
447 spin_lock_irqsave(&iommu->lock, flags);
441 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 448 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
442 target = iommu->cmd_buf + tail; 449 target = iommu->cmd_buf + tail;
443 memcpy_toio(target, cmd, sizeof(*cmd)); 450 memcpy_toio(target, cmd, sizeof(*cmd));
@@ -446,99 +453,41 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
446 if (tail == head) 453 if (tail == head)
447 return -ENOMEM; 454 return -ENOMEM;
448 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 455 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
449 456 iommu->need_sync = true;
450 return 0;
451}
452
453/*
454 * General queuing function for commands. Takes iommu->lock and calls
455 * __iommu_queue_command().
456 */
457static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
458{
459 unsigned long flags;
460 int ret;
461
462 spin_lock_irqsave(&iommu->lock, flags);
463 ret = __iommu_queue_command(iommu, cmd);
464 if (!ret)
465 iommu->need_sync = true;
466 spin_unlock_irqrestore(&iommu->lock, flags); 457 spin_unlock_irqrestore(&iommu->lock, flags);
467 458
468 return ret; 459 return 0;
469}
470
471/*
472 * This function waits until an IOMMU has completed a completion
473 * wait command
474 */
475static void __iommu_wait_for_completion(struct amd_iommu *iommu)
476{
477 int ready = 0;
478 unsigned status = 0;
479 unsigned long i = 0;
480
481 INC_STATS_COUNTER(compl_wait);
482
483 while (!ready && (i < EXIT_LOOP_COUNT)) {
484 ++i;
485 /* wait for the bit to become one */
486 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
487 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
488 }
489
490 /* set bit back to zero */
491 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
492 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
493
494 if (unlikely(i == EXIT_LOOP_COUNT))
495 iommu->reset_in_progress = true;
496} 460}
497 461
498/* 462/*
499 * This function queues a completion wait command into the command 463 * This function queues a completion wait command into the command
500 * buffer of an IOMMU 464 * buffer of an IOMMU
501 */ 465 */
502static int __iommu_completion_wait(struct amd_iommu *iommu)
503{
504 struct iommu_cmd cmd;
505
506 build_completion_wait(&cmd);
507
508 return __iommu_queue_command(iommu, &cmd);
509}
510
511/*
512 * This function is called whenever we need to ensure that the IOMMU has
513 * completed execution of all commands we sent. It sends a
514 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
515 * us about that by writing a value to a physical address we pass with
516 * the command.
517 */
518static int iommu_completion_wait(struct amd_iommu *iommu) 466static int iommu_completion_wait(struct amd_iommu *iommu)
519{ 467{
520 int ret = 0; 468 struct iommu_cmd cmd;
521 unsigned long flags; 469 volatile u64 sem = 0;
522 470 int ret, i = 0;
523 spin_lock_irqsave(&iommu->lock, flags);
524 471
525 if (!iommu->need_sync) 472 if (!iommu->need_sync)
526 goto out; 473 return 0;
527
528 ret = __iommu_completion_wait(iommu);
529 474
530 iommu->need_sync = false; 475 build_completion_wait(&cmd, (u64)&sem);
531 476
477 ret = iommu_queue_command(iommu, &cmd);
532 if (ret) 478 if (ret)
533 goto out; 479 return ret;
534
535 __iommu_wait_for_completion(iommu);
536 480
537out: 481 while (sem == 0 && i < LOOP_TIMEOUT) {
538 spin_unlock_irqrestore(&iommu->lock, flags); 482 udelay(1);
483 i += 1;
484 }
539 485
540 if (iommu->reset_in_progress) 486 if (i == LOOP_TIMEOUT) {
487 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
488 iommu->reset_in_progress = true;
541 reset_iommu_command_buffer(iommu); 489 reset_iommu_command_buffer(iommu);
490 }
542 491
543 return 0; 492 return 0;
544} 493}