summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2017-10-19 11:41:53 -0400
committerWill Deacon <will.deacon@arm.com>2017-10-20 11:55:09 -0400
commit49806599c31d77b1050022aeb3da3051cd9f85f6 (patch)
tree362113ce69be0dfde02281a7be143243ed4edd77 /drivers
parent37de98f8f1cf330918b242cd3ce13751857243a6 (diff)
iommu/arm-smmu-v3: Split arm_smmu_cmdq_issue_sync in half
arm_smmu_cmdq_issue_sync is a little unwieldy now that it supports both MSI and event-based polling, so split it into two functions to make things easier to follow. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/arm-smmu-v3.c47
1 files changed, 35 insertions, 12 deletions
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ceb8f9ef4bad..3876a0328589 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -984,7 +984,7 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
984 * The difference between val and sync_idx is bounded by the maximum size of 984 * The difference between val and sync_idx is bounded by the maximum size of
985 * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic. 985 * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic.
986 */ 986 */
987static int arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx) 987static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
988{ 988{
989 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_SYNC_TIMEOUT_US); 989 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_SYNC_TIMEOUT_US);
990 u32 val = smp_cond_load_acquire(&smmu->sync_count, 990 u32 val = smp_cond_load_acquire(&smmu->sync_count,
@@ -994,30 +994,53 @@ static int arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
994 return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0; 994 return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0;
995} 995}
996 996
997static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) 997static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
998{
999 u64 cmd[CMDQ_ENT_DWORDS];
1000 unsigned long flags;
1001 struct arm_smmu_cmdq_ent ent = {
1002 .opcode = CMDQ_OP_CMD_SYNC,
1003 .sync = {
1004 .msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
1005 .msiaddr = virt_to_phys(&smmu->sync_count),
1006 },
1007 };
1008
1009 arm_smmu_cmdq_build_cmd(cmd, &ent);
1010
1011 spin_lock_irqsave(&smmu->cmdq.lock, flags);
1012 arm_smmu_cmdq_insert_cmd(smmu, cmd);
1013 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
1014
1015 return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
1016}
1017
1018static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
998{ 1019{
999 u64 cmd[CMDQ_ENT_DWORDS]; 1020 u64 cmd[CMDQ_ENT_DWORDS];
1000 unsigned long flags; 1021 unsigned long flags;
1001 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); 1022 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
1002 bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
1003 (smmu->features & ARM_SMMU_FEAT_COHERENCY);
1004 struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC }; 1023 struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
1005 int ret; 1024 int ret;
1006 1025
1007 if (msi) {
1008 ent.sync.msidata = atomic_inc_return_relaxed(&smmu->sync_nr);
1009 ent.sync.msiaddr = virt_to_phys(&smmu->sync_count);
1010 }
1011 arm_smmu_cmdq_build_cmd(cmd, &ent); 1026 arm_smmu_cmdq_build_cmd(cmd, &ent);
1012 1027
1013 spin_lock_irqsave(&smmu->cmdq.lock, flags); 1028 spin_lock_irqsave(&smmu->cmdq.lock, flags);
1014 arm_smmu_cmdq_insert_cmd(smmu, cmd); 1029 arm_smmu_cmdq_insert_cmd(smmu, cmd);
1015 if (!msi) 1030 ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
1016 ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
1017 spin_unlock_irqrestore(&smmu->cmdq.lock, flags); 1031 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
1018 1032
1019 if (msi) 1033 return ret;
1020 ret = arm_smmu_sync_poll_msi(smmu, ent.sync.msidata); 1034}
1035
1036static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
1037{
1038 int ret;
1039 bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
1040 (smmu->features & ARM_SMMU_FEAT_COHERENCY);
1041
1042 ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu)
1043 : __arm_smmu_cmdq_issue_sync(smmu);
1021 if (ret) 1044 if (ret)
1022 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); 1045 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
1023} 1046}