aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2018-03-26 08:35:12 -0400
committerWill Deacon <will.deacon@arm.com>2018-03-27 09:12:05 -0400
commit7417b99c49e5bb77e04d64c915da2ee4bfcbf8a8 (patch)
tree4ce783032d9c68be4ae11b67e61767a4dcc5d9e6
parentba08bdcbf7fd6c3b3a94496ca15b5b66dbea5034 (diff)
iommu/arm-smmu-v3: Clean up queue definitions
As with registers and tables, use GENMASK and the bitfield accessors consistently for queue fields, to save some lines and ease maintenance a little. This now leaves everything in a nice state where all named field definitions expect to be used with bitfield accessors (although since single-bit fields can still be used directly we leave some of those uses as-is to avoid unnecessary churn), while the few remaining *_MASK definitions apply exclusively to in-place values. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/arm-smmu-v3.c128
1 files changed, 55 insertions, 73 deletions
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 4539a6cde9f5..ae6049bddd73 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -184,6 +184,7 @@
184#define ARM_SMMU_SH_OSH 2 184#define ARM_SMMU_SH_OSH 2
185#define ARM_SMMU_SH_ISH 3 185#define ARM_SMMU_SH_ISH 3
186#define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1 186#define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
187#define ARM_SMMU_MEMATTR_OIWB 0xf
187 188
188#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) 189#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
189#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) 190#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
@@ -301,64 +302,49 @@
301#define CMDQ_ERR_CERROR_ILL_IDX 1 302#define CMDQ_ERR_CERROR_ILL_IDX 1
302#define CMDQ_ERR_CERROR_ABT_IDX 2 303#define CMDQ_ERR_CERROR_ABT_IDX 2
303 304
304#define CMDQ_0_OP_SHIFT 0 305#define CMDQ_0_OP GENMASK_ULL(7, 0)
305#define CMDQ_0_OP_MASK 0xffUL
306#define CMDQ_0_SSV (1UL << 11) 306#define CMDQ_0_SSV (1UL << 11)
307 307
308#define CMDQ_PREFETCH_0_SID_SHIFT 32 308#define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32)
309#define CMDQ_PREFETCH_1_SIZE_SHIFT 0 309#define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
310#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12) 310#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
311 311
312#define CMDQ_CFGI_0_SID_SHIFT 32 312#define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
313#define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
314#define CMDQ_CFGI_1_LEAF (1UL << 0) 313#define CMDQ_CFGI_1_LEAF (1UL << 0)
315#define CMDQ_CFGI_1_RANGE_SHIFT 0 314#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
316#define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
317 315
318#define CMDQ_TLBI_0_VMID_SHIFT 32 316#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
319#define CMDQ_TLBI_0_ASID_SHIFT 48 317#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
320#define CMDQ_TLBI_1_LEAF (1UL << 0) 318#define CMDQ_TLBI_1_LEAF (1UL << 0)
321#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12) 319#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
322#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(47, 12) 320#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(47, 12)
323 321
324#define CMDQ_PRI_0_SSID_SHIFT 12 322#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
325#define CMDQ_PRI_0_SSID_MASK 0xfffffUL 323#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
326#define CMDQ_PRI_0_SID_SHIFT 32 324#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
327#define CMDQ_PRI_0_SID_MASK 0xffffffffUL 325#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
328#define CMDQ_PRI_1_GRPID_SHIFT 0 326
329#define CMDQ_PRI_1_GRPID_MASK 0x1ffUL 327#define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
330#define CMDQ_PRI_1_RESP_SHIFT 12 328#define CMDQ_SYNC_0_CS_NONE 0
331#define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT) 329#define CMDQ_SYNC_0_CS_IRQ 1
332#define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT) 330#define CMDQ_SYNC_0_CS_SEV 2
333#define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT) 331#define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22)
334 332#define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24)
335#define CMDQ_SYNC_0_CS_SHIFT 12 333#define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32)
336#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
337#define CMDQ_SYNC_0_CS_IRQ (1UL << CMDQ_SYNC_0_CS_SHIFT)
338#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
339#define CMDQ_SYNC_0_MSH_SHIFT 22
340#define CMDQ_SYNC_0_MSH_ISH (3UL << CMDQ_SYNC_0_MSH_SHIFT)
341#define CMDQ_SYNC_0_MSIATTR_SHIFT 24
342#define CMDQ_SYNC_0_MSIATTR_OIWB (0xfUL << CMDQ_SYNC_0_MSIATTR_SHIFT)
343#define CMDQ_SYNC_0_MSIDATA_SHIFT 32
344#define CMDQ_SYNC_0_MSIDATA_MASK 0xffffffffUL
345#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(47, 2) 334#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(47, 2)
346 335
347/* Event queue */ 336/* Event queue */
348#define EVTQ_ENT_DWORDS 4 337#define EVTQ_ENT_DWORDS 4
349#define EVTQ_MAX_SZ_SHIFT 7 338#define EVTQ_MAX_SZ_SHIFT 7
350 339
351#define EVTQ_0_ID_SHIFT 0 340#define EVTQ_0_ID GENMASK_ULL(7, 0)
352#define EVTQ_0_ID_MASK 0xffUL
353 341
354/* PRI queue */ 342/* PRI queue */
355#define PRIQ_ENT_DWORDS 2 343#define PRIQ_ENT_DWORDS 2
356#define PRIQ_MAX_SZ_SHIFT 8 344#define PRIQ_MAX_SZ_SHIFT 8
357 345
358#define PRIQ_0_SID_SHIFT 0 346#define PRIQ_0_SID GENMASK_ULL(31, 0)
359#define PRIQ_0_SID_MASK 0xffffffffUL 347#define PRIQ_0_SSID GENMASK_ULL(51, 32)
360#define PRIQ_0_SSID_SHIFT 32
361#define PRIQ_0_SSID_MASK 0xfffffUL
362#define PRIQ_0_PERM_PRIV (1UL << 58) 348#define PRIQ_0_PERM_PRIV (1UL << 58)
363#define PRIQ_0_PERM_EXEC (1UL << 59) 349#define PRIQ_0_PERM_EXEC (1UL << 59)
364#define PRIQ_0_PERM_READ (1UL << 60) 350#define PRIQ_0_PERM_READ (1UL << 60)
@@ -366,8 +352,7 @@
366#define PRIQ_0_PRG_LAST (1UL << 62) 352#define PRIQ_0_PRG_LAST (1UL << 62)
367#define PRIQ_0_SSID_V (1UL << 63) 353#define PRIQ_0_SSID_V (1UL << 63)
368 354
369#define PRIQ_1_PRG_IDX_SHIFT 0 355#define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0)
370#define PRIQ_1_PRG_IDX_MASK 0x1ffUL
371#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12) 356#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
372 357
373/* High-level queue structures */ 358/* High-level queue structures */
@@ -384,9 +369,9 @@ MODULE_PARM_DESC(disable_bypass,
384 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); 369 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
385 370
386enum pri_resp { 371enum pri_resp {
387 PRI_RESP_DENY, 372 PRI_RESP_DENY = 0,
388 PRI_RESP_FAIL, 373 PRI_RESP_FAIL = 1,
389 PRI_RESP_SUCC, 374 PRI_RESP_SUCC = 2,
390}; 375};
391 376
392enum arm_smmu_msi_index { 377enum arm_smmu_msi_index {
@@ -790,67 +775,64 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
790static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) 775static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
791{ 776{
792 memset(cmd, 0, CMDQ_ENT_DWORDS << 3); 777 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
793 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT; 778 cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
794 779
795 switch (ent->opcode) { 780 switch (ent->opcode) {
796 case CMDQ_OP_TLBI_EL2_ALL: 781 case CMDQ_OP_TLBI_EL2_ALL:
797 case CMDQ_OP_TLBI_NSNH_ALL: 782 case CMDQ_OP_TLBI_NSNH_ALL:
798 break; 783 break;
799 case CMDQ_OP_PREFETCH_CFG: 784 case CMDQ_OP_PREFETCH_CFG:
800 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT; 785 cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
801 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT; 786 cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
802 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK; 787 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
803 break; 788 break;
804 case CMDQ_OP_CFGI_STE: 789 case CMDQ_OP_CFGI_STE:
805 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT; 790 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
806 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0; 791 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
807 break; 792 break;
808 case CMDQ_OP_CFGI_ALL: 793 case CMDQ_OP_CFGI_ALL:
809 /* Cover the entire SID range */ 794 /* Cover the entire SID range */
810 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT; 795 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
811 break; 796 break;
812 case CMDQ_OP_TLBI_NH_VA: 797 case CMDQ_OP_TLBI_NH_VA:
813 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; 798 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
814 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; 799 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
815 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; 800 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
816 break; 801 break;
817 case CMDQ_OP_TLBI_S2_IPA: 802 case CMDQ_OP_TLBI_S2_IPA:
818 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; 803 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
819 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; 804 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
820 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; 805 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
821 break; 806 break;
822 case CMDQ_OP_TLBI_NH_ASID: 807 case CMDQ_OP_TLBI_NH_ASID:
823 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; 808 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
824 /* Fallthrough */ 809 /* Fallthrough */
825 case CMDQ_OP_TLBI_S12_VMALL: 810 case CMDQ_OP_TLBI_S12_VMALL:
826 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; 811 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
827 break; 812 break;
828 case CMDQ_OP_PRI_RESP: 813 case CMDQ_OP_PRI_RESP:
829 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0; 814 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
830 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT; 815 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
831 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT; 816 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid);
832 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT; 817 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid);
833 switch (ent->pri.resp) { 818 switch (ent->pri.resp) {
834 case PRI_RESP_DENY: 819 case PRI_RESP_DENY:
835 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
836 break;
837 case PRI_RESP_FAIL: 820 case PRI_RESP_FAIL:
838 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
839 break;
840 case PRI_RESP_SUCC: 821 case PRI_RESP_SUCC:
841 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
842 break; 822 break;
843 default: 823 default:
844 return -EINVAL; 824 return -EINVAL;
845 } 825 }
826 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
846 break; 827 break;
847 case CMDQ_OP_CMD_SYNC: 828 case CMDQ_OP_CMD_SYNC:
848 if (ent->sync.msiaddr) 829 if (ent->sync.msiaddr)
849 cmd[0] |= CMDQ_SYNC_0_CS_IRQ; 830 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
850 else 831 else
851 cmd[0] |= CMDQ_SYNC_0_CS_SEV; 832 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
852 cmd[0] |= CMDQ_SYNC_0_MSH_ISH | CMDQ_SYNC_0_MSIATTR_OIWB; 833 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
853 cmd[0] |= (u64)ent->sync.msidata << CMDQ_SYNC_0_MSIDATA_SHIFT; 834 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
835 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
854 cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; 836 cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
855 break; 837 break;
856 default: 838 default:
@@ -1239,7 +1221,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1239 1221
1240 do { 1222 do {
1241 while (!queue_remove_raw(q, evt)) { 1223 while (!queue_remove_raw(q, evt)) {
1242 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK; 1224 u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
1243 1225
1244 dev_info(smmu->dev, "event 0x%02x received:\n", id); 1226 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1245 for (i = 0; i < ARRAY_SIZE(evt); ++i) 1227 for (i = 0; i < ARRAY_SIZE(evt); ++i)
@@ -1267,11 +1249,11 @@ static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1267 u16 grpid; 1249 u16 grpid;
1268 bool ssv, last; 1250 bool ssv, last;
1269 1251
1270 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK; 1252 sid = FIELD_GET(PRIQ_0_SID, evt[0]);
1271 ssv = evt[0] & PRIQ_0_SSID_V; 1253 ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
1272 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0; 1254 ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
1273 last = evt[0] & PRIQ_0_PRG_LAST; 1255 last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
1274 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK; 1256 grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
1275 1257
1276 dev_info(smmu->dev, "unexpected PRI request received:\n"); 1258 dev_info(smmu->dev, "unexpected PRI request received:\n");
1277 dev_info(smmu->dev, 1259 dev_info(smmu->dev,