aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe.brucker@arm.com>2016-08-22 09:42:24 -0400
committerWill Deacon <will.deacon@arm.com>2016-09-16 04:34:12 -0400
commitb4163fb3333cf2279f5bfa2bb4d2d93aa66a3eac (patch)
tree6161c98a707476d341af78ddfb29cfd5ea385802
parente2d42311ffc9a5014eb129d02eb0e7bc791430f0 (diff)
iommu/arm-smmu: Fix event queues synchronization
SMMUv3 only sends interrupts for event queues (EVTQ and PRIQ) when they transition from empty to non-empty. At the moment, if the SMMU adds new items to a queue before the event thread finished consuming a previous batch, the driver ignores any new item. The queue is then stuck in non-empty state and all subsequent events will be lost. As an example, consider the following flow, where (P, C) is the SMMU view of producer/consumer indices, and (p, c) the driver view. P C | p c 1. SMMU appends a PPR to the PRI queue, 1 0 | 0 0 sends an MSI 2. PRIQ handler is called. 1 0 | 1 0 3. SMMU appends a PPR to the PRI queue. 2 0 | 1 0 4. PRIQ thread removes the first element. 2 1 | 1 1 5. PRIQ thread believes that the queue is empty, goes into idle indefinitely. To avoid this, always synchronize the producer index and drain the queue once before leaving an event handler. In order to prevent races on the local producer index, move all event queue handling into the threads. Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/arm-smmu-v3.c150
1 files changed, 66 insertions, 84 deletions
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 641e88761319..d156c1e610d6 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1161,36 +1161,66 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1161 struct arm_smmu_queue *q = &smmu->evtq.q; 1161 struct arm_smmu_queue *q = &smmu->evtq.q;
1162 u64 evt[EVTQ_ENT_DWORDS]; 1162 u64 evt[EVTQ_ENT_DWORDS];
1163 1163
1164 while (!queue_remove_raw(q, evt)) { 1164 do {
1165 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK; 1165 while (!queue_remove_raw(q, evt)) {
1166 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1166 1167
1167 dev_info(smmu->dev, "event 0x%02x received:\n", id); 1168 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1168 for (i = 0; i < ARRAY_SIZE(evt); ++i) 1169 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1169 dev_info(smmu->dev, "\t0x%016llx\n", 1170 dev_info(smmu->dev, "\t0x%016llx\n",
1170 (unsigned long long)evt[i]); 1171 (unsigned long long)evt[i]);
1171 } 1172
1173 }
1174
1175 /*
1176 * Not much we can do on overflow, so scream and pretend we're
1177 * trying harder.
1178 */
1179 if (queue_sync_prod(q) == -EOVERFLOW)
1180 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1181 } while (!queue_empty(q));
1172 1182
1173 /* Sync our overflow flag, as we believe we're up to speed */ 1183 /* Sync our overflow flag, as we believe we're up to speed */
1174 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); 1184 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1175 return IRQ_HANDLED; 1185 return IRQ_HANDLED;
1176} 1186}
1177 1187
1178static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev) 1188static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1179{ 1189{
1180 irqreturn_t ret = IRQ_WAKE_THREAD; 1190 u32 sid, ssid;
1181 struct arm_smmu_device *smmu = dev; 1191 u16 grpid;
1182 struct arm_smmu_queue *q = &smmu->evtq.q; 1192 bool ssv, last;
1183 1193
1184 /* 1194 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1185 * Not much we can do on overflow, so scream and pretend we're 1195 ssv = evt[0] & PRIQ_0_SSID_V;
1186 * trying harder. 1196 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1187 */ 1197 last = evt[0] & PRIQ_0_PRG_LAST;
1188 if (queue_sync_prod(q) == -EOVERFLOW) 1198 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1189 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); 1199
1190 else if (queue_empty(q)) 1200 dev_info(smmu->dev, "unexpected PRI request received:\n");
1191 ret = IRQ_NONE; 1201 dev_info(smmu->dev,
1202 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1203 sid, ssid, grpid, last ? "L" : "",
1204 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1205 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1206 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1207 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1208 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1209
1210 if (last) {
1211 struct arm_smmu_cmdq_ent cmd = {
1212 .opcode = CMDQ_OP_PRI_RESP,
1213 .substream_valid = ssv,
1214 .pri = {
1215 .sid = sid,
1216 .ssid = ssid,
1217 .grpid = grpid,
1218 .resp = PRI_RESP_DENY,
1219 },
1220 };
1192 1221
1193 return ret; 1222 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1223 }
1194} 1224}
1195 1225
1196static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) 1226static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
@@ -1199,63 +1229,19 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1199 struct arm_smmu_queue *q = &smmu->priq.q; 1229 struct arm_smmu_queue *q = &smmu->priq.q;
1200 u64 evt[PRIQ_ENT_DWORDS]; 1230 u64 evt[PRIQ_ENT_DWORDS];
1201 1231
1202 while (!queue_remove_raw(q, evt)) { 1232 do {
1203 u32 sid, ssid; 1233 while (!queue_remove_raw(q, evt))
1204 u16 grpid; 1234 arm_smmu_handle_ppr(smmu, evt);
1205 bool ssv, last;
1206
1207 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1208 ssv = evt[0] & PRIQ_0_SSID_V;
1209 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1210 last = evt[0] & PRIQ_0_PRG_LAST;
1211 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1212
1213 dev_info(smmu->dev, "unexpected PRI request received:\n");
1214 dev_info(smmu->dev,
1215 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1216 sid, ssid, grpid, last ? "L" : "",
1217 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1218 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1219 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1220 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1221 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1222
1223 if (last) {
1224 struct arm_smmu_cmdq_ent cmd = {
1225 .opcode = CMDQ_OP_PRI_RESP,
1226 .substream_valid = ssv,
1227 .pri = {
1228 .sid = sid,
1229 .ssid = ssid,
1230 .grpid = grpid,
1231 .resp = PRI_RESP_DENY,
1232 },
1233 };
1234 1235
1235 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 1236 if (queue_sync_prod(q) == -EOVERFLOW)
1236 } 1237 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1237 } 1238 } while (!queue_empty(q));
1238 1239
1239 /* Sync our overflow flag, as we believe we're up to speed */ 1240 /* Sync our overflow flag, as we believe we're up to speed */
1240 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); 1241 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1241 return IRQ_HANDLED; 1242 return IRQ_HANDLED;
1242} 1243}
1243 1244
1244static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
1245{
1246 irqreturn_t ret = IRQ_WAKE_THREAD;
1247 struct arm_smmu_device *smmu = dev;
1248 struct arm_smmu_queue *q = &smmu->priq.q;
1249
1250 /* PRIQ overflow indicates a programming error */
1251 if (queue_sync_prod(q) == -EOVERFLOW)
1252 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1253 else if (queue_empty(q))
1254 ret = IRQ_NONE;
1255
1256 return ret;
1257}
1258
1259static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev) 1245static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1260{ 1246{
1261 /* We don't actually use CMD_SYNC interrupts for anything */ 1247 /* We don't actually use CMD_SYNC interrupts for anything */
@@ -1288,15 +1274,11 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1288 if (active & GERROR_MSI_GERROR_ABT_ERR) 1274 if (active & GERROR_MSI_GERROR_ABT_ERR)
1289 dev_warn(smmu->dev, "GERROR MSI write aborted\n"); 1275 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1290 1276
1291 if (active & GERROR_MSI_PRIQ_ABT_ERR) { 1277 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1292 dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); 1278 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1293 arm_smmu_priq_handler(irq, smmu->dev);
1294 }
1295 1279
1296 if (active & GERROR_MSI_EVTQ_ABT_ERR) { 1280 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1297 dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); 1281 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1298 arm_smmu_evtq_handler(irq, smmu->dev);
1299 }
1300 1282
1301 if (active & GERROR_MSI_CMDQ_ABT_ERR) { 1283 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1302 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); 1284 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
@@ -2235,10 +2217,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2235 /* Request interrupt lines */ 2217 /* Request interrupt lines */
2236 irq = smmu->evtq.q.irq; 2218 irq = smmu->evtq.q.irq;
2237 if (irq) { 2219 if (irq) {
2238 ret = devm_request_threaded_irq(smmu->dev, irq, 2220 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2239 arm_smmu_evtq_handler,
2240 arm_smmu_evtq_thread, 2221 arm_smmu_evtq_thread,
2241 0, "arm-smmu-v3-evtq", smmu); 2222 IRQF_ONESHOT,
2223 "arm-smmu-v3-evtq", smmu);
2242 if (ret < 0) 2224 if (ret < 0)
2243 dev_warn(smmu->dev, "failed to enable evtq irq\n"); 2225 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2244 } 2226 }
@@ -2263,10 +2245,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2263 if (smmu->features & ARM_SMMU_FEAT_PRI) { 2245 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2264 irq = smmu->priq.q.irq; 2246 irq = smmu->priq.q.irq;
2265 if (irq) { 2247 if (irq) {
2266 ret = devm_request_threaded_irq(smmu->dev, irq, 2248 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2267 arm_smmu_priq_handler,
2268 arm_smmu_priq_thread, 2249 arm_smmu_priq_thread,
2269 0, "arm-smmu-v3-priq", 2250 IRQF_ONESHOT,
2251 "arm-smmu-v3-priq",
2270 smmu); 2252 smmu);
2271 if (ret < 0) 2253 if (ret < 0)
2272 dev_warn(smmu->dev, 2254 dev_warn(smmu->dev,