aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-06 12:38:20 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-04-07 04:46:07 -0400
commitac0ea6e92b2227c86fe4f7f9eb429071d617a25d (patch)
tree7094f983ef784cf050f1f5d192112bab67028ae9 /arch/x86/kernel/amd_iommu.c
parent17b124bf1463582005d662d4dd95f037ad863c57 (diff)
x86/amd-iommu: Improve handling of full command buffer
This patch improved the handling of commands when the IOMMU command buffer is nearly full. In this case it issues an completion wait command and waits until the IOMMU has processed it before continuing queuing new commands. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c88
1 files changed, 65 insertions, 23 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 9d66b2092ae1..75c7f8c3fe12 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -381,6 +381,39 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
381 * 381 *
382 ****************************************************************************/ 382 ****************************************************************************/
383 383
384static int wait_on_sem(volatile u64 *sem)
385{
386 int i = 0;
387
388 while (*sem == 0 && i < LOOP_TIMEOUT) {
389 udelay(1);
390 i += 1;
391 }
392
393 if (i == LOOP_TIMEOUT) {
394 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
395 return -EIO;
396 }
397
398 return 0;
399}
400
401static void copy_cmd_to_buffer(struct amd_iommu *iommu,
402 struct iommu_cmd *cmd,
403 u32 tail)
404{
405 u8 *target;
406
407 target = iommu->cmd_buf + tail;
408 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
409
410 /* Copy command to buffer */
411 memcpy(target, cmd, sizeof(*cmd));
412
413 /* Tell the IOMMU about it */
414 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
415}
416
384static void build_completion_wait(struct iommu_cmd *cmd, u64 address) 417static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
385{ 418{
386 WARN_ON(address & 0x7ULL); 419 WARN_ON(address & 0x7ULL);
@@ -432,25 +465,44 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
432 465
433/* 466/*
434 * Writes the command to the IOMMUs command buffer and informs the 467 * Writes the command to the IOMMUs command buffer and informs the
435 * hardware about the new command. Must be called with iommu->lock held. 468 * hardware about the new command.
436 */ 469 */
437static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 470static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
438{ 471{
472 u32 left, tail, head, next_tail;
439 unsigned long flags; 473 unsigned long flags;
440 u32 tail, head;
441 u8 *target;
442 474
443 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); 475 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
476
477again:
444 spin_lock_irqsave(&iommu->lock, flags); 478 spin_lock_irqsave(&iommu->lock, flags);
445 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 479
446 target = iommu->cmd_buf + tail; 480 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
447 memcpy_toio(target, cmd, sizeof(*cmd)); 481 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
448 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; 482 next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
449 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 483 left = (head - next_tail) % iommu->cmd_buf_size;
450 if (tail == head) 484
451 return -ENOMEM; 485 if (left <= 2) {
452 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 486 struct iommu_cmd sync_cmd;
487 volatile u64 sem = 0;
488 int ret;
489
490 build_completion_wait(&sync_cmd, (u64)&sem);
491 copy_cmd_to_buffer(iommu, &sync_cmd, tail);
492
493 spin_unlock_irqrestore(&iommu->lock, flags);
494
495 if ((ret = wait_on_sem(&sem)) != 0)
496 return ret;
497
498 goto again;
499 }
500
501 copy_cmd_to_buffer(iommu, cmd, tail);
502
503 /* We need to sync now to make sure all commands are processed */
453 iommu->need_sync = true; 504 iommu->need_sync = true;
505
454 spin_unlock_irqrestore(&iommu->lock, flags); 506 spin_unlock_irqrestore(&iommu->lock, flags);
455 507
456 return 0; 508 return 0;
@@ -464,7 +516,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
464{ 516{
465 struct iommu_cmd cmd; 517 struct iommu_cmd cmd;
466 volatile u64 sem = 0; 518 volatile u64 sem = 0;
467 int ret, i = 0; 519 int ret;
468 520
469 if (!iommu->need_sync) 521 if (!iommu->need_sync)
470 return 0; 522 return 0;
@@ -475,17 +527,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
475 if (ret) 527 if (ret)
476 return ret; 528 return ret;
477 529
478 while (sem == 0 && i < LOOP_TIMEOUT) { 530 return wait_on_sem(&sem);
479 udelay(1);
480 i += 1;
481 }
482
483 if (i == LOOP_TIMEOUT) {
484 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
485 ret = -EIO;
486 }
487
488 return 0;
489} 531}
490 532
491/* 533/*