aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2017-08-31 09:44:25 -0400
committerWill Deacon <will.deacon@arm.com>2017-10-20 11:55:06 -0400
commit2f657add07a8f758e41076820157eeca1df22b79 (patch)
tree42415eaa81275d4456b0aa4eae434d3166777a25
parent2a22baa2d17f37f9bd86febcb69f10dbe6792b58 (diff)
iommu/arm-smmu-v3: Specialise CMD_SYNC handling
CMD_SYNC already has a bit of special treatment here and there, but as we're about to extend it with more functionality for completing outside the CMDQ lock, things are going to get rather messy if we keep trying to cram everything into a single generic command interface. Instead, let's break out the issuing of CMD_SYNC into its own specific helper where upcoming changes will have room to breathe. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/arm-smmu-v3.c52
1 files changed, 34 insertions, 18 deletions
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index d81d5dbffbe2..18a0fa7dd72d 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -929,13 +929,22 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
929 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); 929 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
930} 930}
931 931
932static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
933{
934 struct arm_smmu_queue *q = &smmu->cmdq.q;
935 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
936
937 while (queue_insert_raw(q, cmd) == -ENOSPC) {
938 if (queue_poll_cons(q, false, wfe))
939 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
940 }
941}
942
932static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, 943static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
933 struct arm_smmu_cmdq_ent *ent) 944 struct arm_smmu_cmdq_ent *ent)
934{ 945{
935 u64 cmd[CMDQ_ENT_DWORDS]; 946 u64 cmd[CMDQ_ENT_DWORDS];
936 unsigned long flags; 947 unsigned long flags;
937 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
938 struct arm_smmu_queue *q = &smmu->cmdq.q;
939 948
940 if (arm_smmu_cmdq_build_cmd(cmd, ent)) { 949 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
941 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", 950 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
@@ -944,14 +953,27 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
944 } 953 }
945 954
946 spin_lock_irqsave(&smmu->cmdq.lock, flags); 955 spin_lock_irqsave(&smmu->cmdq.lock, flags);
947 while (queue_insert_raw(q, cmd) == -ENOSPC) { 956 arm_smmu_cmdq_insert_cmd(smmu, cmd);
948 if (queue_poll_cons(q, false, wfe)) 957 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
949 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); 958}
950 }
951 959
952 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe)) 960static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
953 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); 961{
962 u64 cmd[CMDQ_ENT_DWORDS];
963 unsigned long flags;
964 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
965 struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
966 int ret;
967
968 arm_smmu_cmdq_build_cmd(cmd, &ent);
969
970 spin_lock_irqsave(&smmu->cmdq.lock, flags);
971 arm_smmu_cmdq_insert_cmd(smmu, cmd);
972 ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
954 spin_unlock_irqrestore(&smmu->cmdq.lock, flags); 973 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
974
975 if (ret)
976 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
955} 977}
956 978
957/* Context descriptor manipulation functions */ 979/* Context descriptor manipulation functions */
@@ -1027,8 +1049,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1027 }; 1049 };
1028 1050
1029 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 1051 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1030 cmd.opcode = CMDQ_OP_CMD_SYNC; 1052 arm_smmu_cmdq_issue_sync(smmu);
1031 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1032} 1053}
1033 1054
1034static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, 1055static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
@@ -1355,10 +1376,7 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
1355/* IO_PGTABLE API */ 1376/* IO_PGTABLE API */
1356static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) 1377static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1357{ 1378{
1358 struct arm_smmu_cmdq_ent cmd; 1379 arm_smmu_cmdq_issue_sync(smmu);
1359
1360 cmd.opcode = CMDQ_OP_CMD_SYNC;
1361 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1362} 1380}
1363 1381
1364static void arm_smmu_tlb_sync(void *cookie) 1382static void arm_smmu_tlb_sync(void *cookie)
@@ -2402,8 +2420,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2402 /* Invalidate any cached configuration */ 2420 /* Invalidate any cached configuration */
2403 cmd.opcode = CMDQ_OP_CFGI_ALL; 2421 cmd.opcode = CMDQ_OP_CFGI_ALL;
2404 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 2422 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2405 cmd.opcode = CMDQ_OP_CMD_SYNC; 2423 arm_smmu_cmdq_issue_sync(smmu);
2406 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2407 2424
2408 /* Invalidate any stale TLB entries */ 2425 /* Invalidate any stale TLB entries */
2409 if (smmu->features & ARM_SMMU_FEAT_HYP) { 2426 if (smmu->features & ARM_SMMU_FEAT_HYP) {
@@ -2413,8 +2430,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2413 2430
2414 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL; 2431 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2415 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 2432 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2416 cmd.opcode = CMDQ_OP_CMD_SYNC; 2433 arm_smmu_cmdq_issue_sync(smmu);
2417 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2418 2434
2419 /* Event queue */ 2435 /* Event queue */
2420 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); 2436 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);