aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dmar.c48
-rw-r--r--drivers/iommu/intel-iommu.c36
-rw-r--r--drivers/iommu/intr_remapping.c40
3 files changed, 62 insertions, 62 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 587e8f2d38d8..35c1e17fce1d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -652,7 +652,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
652 (unsigned long long)iommu->cap, 652 (unsigned long long)iommu->cap,
653 (unsigned long long)iommu->ecap); 653 (unsigned long long)iommu->ecap);
654 654
655 spin_lock_init(&iommu->register_lock); 655 raw_spin_lock_init(&iommu->register_lock);
656 656
657 drhd->iommu = iommu; 657 drhd->iommu = iommu;
658 return 0; 658 return 0;
@@ -771,11 +771,11 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
771restart: 771restart:
772 rc = 0; 772 rc = 0;
773 773
774 spin_lock_irqsave(&qi->q_lock, flags); 774 raw_spin_lock_irqsave(&qi->q_lock, flags);
775 while (qi->free_cnt < 3) { 775 while (qi->free_cnt < 3) {
776 spin_unlock_irqrestore(&qi->q_lock, flags); 776 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
777 cpu_relax(); 777 cpu_relax();
778 spin_lock_irqsave(&qi->q_lock, flags); 778 raw_spin_lock_irqsave(&qi->q_lock, flags);
779 } 779 }
780 780
781 index = qi->free_head; 781 index = qi->free_head;
@@ -815,15 +815,15 @@ restart:
815 if (rc) 815 if (rc)
816 break; 816 break;
817 817
818 spin_unlock(&qi->q_lock); 818 raw_spin_unlock(&qi->q_lock);
819 cpu_relax(); 819 cpu_relax();
820 spin_lock(&qi->q_lock); 820 raw_spin_lock(&qi->q_lock);
821 } 821 }
822 822
823 qi->desc_status[index] = QI_DONE; 823 qi->desc_status[index] = QI_DONE;
824 824
825 reclaim_free_desc(qi); 825 reclaim_free_desc(qi);
826 spin_unlock_irqrestore(&qi->q_lock, flags); 826 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
827 827
828 if (rc == -EAGAIN) 828 if (rc == -EAGAIN)
829 goto restart; 829 goto restart;
@@ -912,7 +912,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
912 if (!ecap_qis(iommu->ecap)) 912 if (!ecap_qis(iommu->ecap))
913 return; 913 return;
914 914
915 spin_lock_irqsave(&iommu->register_lock, flags); 915 raw_spin_lock_irqsave(&iommu->register_lock, flags);
916 916
917 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 917 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
918 if (!(sts & DMA_GSTS_QIES)) 918 if (!(sts & DMA_GSTS_QIES))
@@ -932,7 +932,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
932 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, 932 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
933 !(sts & DMA_GSTS_QIES), sts); 933 !(sts & DMA_GSTS_QIES), sts);
934end: 934end:
935 spin_unlock_irqrestore(&iommu->register_lock, flags); 935 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
936} 936}
937 937
938/* 938/*
@@ -947,7 +947,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
947 qi->free_head = qi->free_tail = 0; 947 qi->free_head = qi->free_tail = 0;
948 qi->free_cnt = QI_LENGTH; 948 qi->free_cnt = QI_LENGTH;
949 949
950 spin_lock_irqsave(&iommu->register_lock, flags); 950 raw_spin_lock_irqsave(&iommu->register_lock, flags);
951 951
952 /* write zero to the tail reg */ 952 /* write zero to the tail reg */
953 writel(0, iommu->reg + DMAR_IQT_REG); 953 writel(0, iommu->reg + DMAR_IQT_REG);
@@ -960,7 +960,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
960 /* Make sure hardware complete it */ 960 /* Make sure hardware complete it */
961 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 961 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
962 962
963 spin_unlock_irqrestore(&iommu->register_lock, flags); 963 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
964} 964}
965 965
966/* 966/*
@@ -1009,7 +1009,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
1009 qi->free_head = qi->free_tail = 0; 1009 qi->free_head = qi->free_tail = 0;
1010 qi->free_cnt = QI_LENGTH; 1010 qi->free_cnt = QI_LENGTH;
1011 1011
1012 spin_lock_init(&qi->q_lock); 1012 raw_spin_lock_init(&qi->q_lock);
1013 1013
1014 __dmar_enable_qi(iommu); 1014 __dmar_enable_qi(iommu);
1015 1015
@@ -1075,11 +1075,11 @@ void dmar_msi_unmask(struct irq_data *data)
1075 unsigned long flag; 1075 unsigned long flag;
1076 1076
1077 /* unmask it */ 1077 /* unmask it */
1078 spin_lock_irqsave(&iommu->register_lock, flag); 1078 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1079 writel(0, iommu->reg + DMAR_FECTL_REG); 1079 writel(0, iommu->reg + DMAR_FECTL_REG);
1080 /* Read a reg to force flush the post write */ 1080 /* Read a reg to force flush the post write */
1081 readl(iommu->reg + DMAR_FECTL_REG); 1081 readl(iommu->reg + DMAR_FECTL_REG);
1082 spin_unlock_irqrestore(&iommu->register_lock, flag); 1082 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1083} 1083}
1084 1084
1085void dmar_msi_mask(struct irq_data *data) 1085void dmar_msi_mask(struct irq_data *data)
@@ -1088,11 +1088,11 @@ void dmar_msi_mask(struct irq_data *data)
1088 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); 1088 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1089 1089
1090 /* mask it */ 1090 /* mask it */
1091 spin_lock_irqsave(&iommu->register_lock, flag); 1091 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1092 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); 1092 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1093 /* Read a reg to force flush the post write */ 1093 /* Read a reg to force flush the post write */
1094 readl(iommu->reg + DMAR_FECTL_REG); 1094 readl(iommu->reg + DMAR_FECTL_REG);
1095 spin_unlock_irqrestore(&iommu->register_lock, flag); 1095 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1096} 1096}
1097 1097
1098void dmar_msi_write(int irq, struct msi_msg *msg) 1098void dmar_msi_write(int irq, struct msi_msg *msg)
@@ -1100,11 +1100,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
1100 struct intel_iommu *iommu = irq_get_handler_data(irq); 1100 struct intel_iommu *iommu = irq_get_handler_data(irq);
1101 unsigned long flag; 1101 unsigned long flag;
1102 1102
1103 spin_lock_irqsave(&iommu->register_lock, flag); 1103 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1104 writel(msg->data, iommu->reg + DMAR_FEDATA_REG); 1104 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1105 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); 1105 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1106 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); 1106 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1107 spin_unlock_irqrestore(&iommu->register_lock, flag); 1107 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1108} 1108}
1109 1109
1110void dmar_msi_read(int irq, struct msi_msg *msg) 1110void dmar_msi_read(int irq, struct msi_msg *msg)
@@ -1112,11 +1112,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
1112 struct intel_iommu *iommu = irq_get_handler_data(irq); 1112 struct intel_iommu *iommu = irq_get_handler_data(irq);
1113 unsigned long flag; 1113 unsigned long flag;
1114 1114
1115 spin_lock_irqsave(&iommu->register_lock, flag); 1115 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1116 msg->data = readl(iommu->reg + DMAR_FEDATA_REG); 1116 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1117 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); 1117 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1118 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); 1118 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1119 spin_unlock_irqrestore(&iommu->register_lock, flag); 1119 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1120} 1120}
1121 1121
1122static int dmar_fault_do_one(struct intel_iommu *iommu, int type, 1122static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
@@ -1153,7 +1153,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1153 u32 fault_status; 1153 u32 fault_status;
1154 unsigned long flag; 1154 unsigned long flag;
1155 1155
1156 spin_lock_irqsave(&iommu->register_lock, flag); 1156 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1157 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1157 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1158 if (fault_status) 1158 if (fault_status)
1159 printk(KERN_ERR "DRHD: handling fault status reg %x\n", 1159 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
@@ -1192,7 +1192,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1192 writel(DMA_FRCD_F, iommu->reg + reg + 1192 writel(DMA_FRCD_F, iommu->reg + reg +
1193 fault_index * PRIMARY_FAULT_REG_LEN + 12); 1193 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1194 1194
1195 spin_unlock_irqrestore(&iommu->register_lock, flag); 1195 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1196 1196
1197 dmar_fault_do_one(iommu, type, fault_reason, 1197 dmar_fault_do_one(iommu, type, fault_reason,
1198 source_id, guest_addr); 1198 source_id, guest_addr);
@@ -1200,14 +1200,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1200 fault_index++; 1200 fault_index++;
1201 if (fault_index >= cap_num_fault_regs(iommu->cap)) 1201 if (fault_index >= cap_num_fault_regs(iommu->cap))
1202 fault_index = 0; 1202 fault_index = 0;
1203 spin_lock_irqsave(&iommu->register_lock, flag); 1203 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1204 } 1204 }
1205clear_rest: 1205clear_rest:
1206 /* clear all the other faults */ 1206 /* clear all the other faults */
1207 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1207 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1208 writel(fault_status, iommu->reg + DMAR_FSTS_REG); 1208 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1209 1209
1210 spin_unlock_irqrestore(&iommu->register_lock, flag); 1210 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1211 return IRQ_HANDLED; 1211 return IRQ_HANDLED;
1212} 1212}
1213 1213
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f28d933c7927..be1953c239b0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -939,7 +939,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
939 939
940 addr = iommu->root_entry; 940 addr = iommu->root_entry;
941 941
942 spin_lock_irqsave(&iommu->register_lock, flag); 942 raw_spin_lock_irqsave(&iommu->register_lock, flag);
943 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); 943 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
944 944
945 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); 945 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
@@ -948,7 +948,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
948 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 948 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
949 readl, (sts & DMA_GSTS_RTPS), sts); 949 readl, (sts & DMA_GSTS_RTPS), sts);
950 950
951 spin_unlock_irqrestore(&iommu->register_lock, flag); 951 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
952} 952}
953 953
954static void iommu_flush_write_buffer(struct intel_iommu *iommu) 954static void iommu_flush_write_buffer(struct intel_iommu *iommu)
@@ -959,14 +959,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
959 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) 959 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
960 return; 960 return;
961 961
962 spin_lock_irqsave(&iommu->register_lock, flag); 962 raw_spin_lock_irqsave(&iommu->register_lock, flag);
963 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); 963 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
964 964
965 /* Make sure hardware complete it */ 965 /* Make sure hardware complete it */
966 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 966 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
967 readl, (!(val & DMA_GSTS_WBFS)), val); 967 readl, (!(val & DMA_GSTS_WBFS)), val);
968 968
969 spin_unlock_irqrestore(&iommu->register_lock, flag); 969 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
970} 970}
971 971
972/* return value determine if we need a write buffer flush */ 972/* return value determine if we need a write buffer flush */
@@ -993,14 +993,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
993 } 993 }
994 val |= DMA_CCMD_ICC; 994 val |= DMA_CCMD_ICC;
995 995
996 spin_lock_irqsave(&iommu->register_lock, flag); 996 raw_spin_lock_irqsave(&iommu->register_lock, flag);
997 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); 997 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
998 998
999 /* Make sure hardware complete it */ 999 /* Make sure hardware complete it */
1000 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, 1000 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1001 dmar_readq, (!(val & DMA_CCMD_ICC)), val); 1001 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1002 1002
1003 spin_unlock_irqrestore(&iommu->register_lock, flag); 1003 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1004} 1004}
1005 1005
1006/* return value determine if we need a write buffer flush */ 1006/* return value determine if we need a write buffer flush */
@@ -1039,7 +1039,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1039 if (cap_write_drain(iommu->cap)) 1039 if (cap_write_drain(iommu->cap))
1040 val |= DMA_TLB_WRITE_DRAIN; 1040 val |= DMA_TLB_WRITE_DRAIN;
1041 1041
1042 spin_lock_irqsave(&iommu->register_lock, flag); 1042 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1043 /* Note: Only uses first TLB reg currently */ 1043 /* Note: Only uses first TLB reg currently */
1044 if (val_iva) 1044 if (val_iva)
1045 dmar_writeq(iommu->reg + tlb_offset, val_iva); 1045 dmar_writeq(iommu->reg + tlb_offset, val_iva);
@@ -1049,7 +1049,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1049 IOMMU_WAIT_OP(iommu, tlb_offset + 8, 1049 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1050 dmar_readq, (!(val & DMA_TLB_IVT)), val); 1050 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1051 1051
1052 spin_unlock_irqrestore(&iommu->register_lock, flag); 1052 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1053 1053
1054 /* check IOTLB invalidation granularity */ 1054 /* check IOTLB invalidation granularity */
1055 if (DMA_TLB_IAIG(val) == 0) 1055 if (DMA_TLB_IAIG(val) == 0)
@@ -1165,7 +1165,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1165 u32 pmen; 1165 u32 pmen;
1166 unsigned long flags; 1166 unsigned long flags;
1167 1167
1168 spin_lock_irqsave(&iommu->register_lock, flags); 1168 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1169 pmen = readl(iommu->reg + DMAR_PMEN_REG); 1169 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1170 pmen &= ~DMA_PMEN_EPM; 1170 pmen &= ~DMA_PMEN_EPM;
1171 writel(pmen, iommu->reg + DMAR_PMEN_REG); 1171 writel(pmen, iommu->reg + DMAR_PMEN_REG);
@@ -1174,7 +1174,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1174 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, 1174 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1175 readl, !(pmen & DMA_PMEN_PRS), pmen); 1175 readl, !(pmen & DMA_PMEN_PRS), pmen);
1176 1176
1177 spin_unlock_irqrestore(&iommu->register_lock, flags); 1177 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1178} 1178}
1179 1179
1180static int iommu_enable_translation(struct intel_iommu *iommu) 1180static int iommu_enable_translation(struct intel_iommu *iommu)
@@ -1182,7 +1182,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1182 u32 sts; 1182 u32 sts;
1183 unsigned long flags; 1183 unsigned long flags;
1184 1184
1185 spin_lock_irqsave(&iommu->register_lock, flags); 1185 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1186 iommu->gcmd |= DMA_GCMD_TE; 1186 iommu->gcmd |= DMA_GCMD_TE;
1187 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1187 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1188 1188
@@ -1190,7 +1190,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1190 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1190 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1191 readl, (sts & DMA_GSTS_TES), sts); 1191 readl, (sts & DMA_GSTS_TES), sts);
1192 1192
1193 spin_unlock_irqrestore(&iommu->register_lock, flags); 1193 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1194 return 0; 1194 return 0;
1195} 1195}
1196 1196
@@ -1199,7 +1199,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1199 u32 sts; 1199 u32 sts;
1200 unsigned long flag; 1200 unsigned long flag;
1201 1201
1202 spin_lock_irqsave(&iommu->register_lock, flag); 1202 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1203 iommu->gcmd &= ~DMA_GCMD_TE; 1203 iommu->gcmd &= ~DMA_GCMD_TE;
1204 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1204 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1205 1205
@@ -1207,7 +1207,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1207 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1207 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1208 readl, (!(sts & DMA_GSTS_TES)), sts); 1208 readl, (!(sts & DMA_GSTS_TES)), sts);
1209 1209
1210 spin_unlock_irqrestore(&iommu->register_lock, flag); 1210 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1211 return 0; 1211 return 0;
1212} 1212}
1213 1213
@@ -3329,7 +3329,7 @@ static int iommu_suspend(void)
3329 for_each_active_iommu(iommu, drhd) { 3329 for_each_active_iommu(iommu, drhd) {
3330 iommu_disable_translation(iommu); 3330 iommu_disable_translation(iommu);
3331 3331
3332 spin_lock_irqsave(&iommu->register_lock, flag); 3332 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3333 3333
3334 iommu->iommu_state[SR_DMAR_FECTL_REG] = 3334 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3335 readl(iommu->reg + DMAR_FECTL_REG); 3335 readl(iommu->reg + DMAR_FECTL_REG);
@@ -3340,7 +3340,7 @@ static int iommu_suspend(void)
3340 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = 3340 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3341 readl(iommu->reg + DMAR_FEUADDR_REG); 3341 readl(iommu->reg + DMAR_FEUADDR_REG);
3342 3342
3343 spin_unlock_irqrestore(&iommu->register_lock, flag); 3343 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3344 } 3344 }
3345 return 0; 3345 return 0;
3346 3346
@@ -3367,7 +3367,7 @@ static void iommu_resume(void)
3367 3367
3368 for_each_active_iommu(iommu, drhd) { 3368 for_each_active_iommu(iommu, drhd) {
3369 3369
3370 spin_lock_irqsave(&iommu->register_lock, flag); 3370 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3371 3371
3372 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], 3372 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3373 iommu->reg + DMAR_FECTL_REG); 3373 iommu->reg + DMAR_FECTL_REG);
@@ -3378,7 +3378,7 @@ static void iommu_resume(void)
3378 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], 3378 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3379 iommu->reg + DMAR_FEUADDR_REG); 3379 iommu->reg + DMAR_FEUADDR_REG);
3380 3380
3381 spin_unlock_irqrestore(&iommu->register_lock, flag); 3381 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3382 } 3382 }
3383 3383
3384 for_each_active_iommu(iommu, drhd) 3384 for_each_active_iommu(iommu, drhd)
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index cfb0dd4bf0b6..07c9f189f314 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -54,7 +54,7 @@ static __init int setup_intremap(char *str)
54} 54}
55early_param("intremap", setup_intremap); 55early_param("intremap", setup_intremap);
56 56
57static DEFINE_SPINLOCK(irq_2_ir_lock); 57static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
58 58
59static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 59static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
60{ 60{
@@ -71,12 +71,12 @@ int get_irte(int irq, struct irte *entry)
71 if (!entry || !irq_iommu) 71 if (!entry || !irq_iommu)
72 return -1; 72 return -1;
73 73
74 spin_lock_irqsave(&irq_2_ir_lock, flags); 74 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
75 75
76 index = irq_iommu->irte_index + irq_iommu->sub_handle; 76 index = irq_iommu->irte_index + irq_iommu->sub_handle;
77 *entry = *(irq_iommu->iommu->ir_table->base + index); 77 *entry = *(irq_iommu->iommu->ir_table->base + index);
78 78
79 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 79 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
80 return 0; 80 return 0;
81} 81}
82 82
@@ -110,7 +110,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
110 return -1; 110 return -1;
111 } 111 }
112 112
113 spin_lock_irqsave(&irq_2_ir_lock, flags); 113 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
114 do { 114 do {
115 for (i = index; i < index + count; i++) 115 for (i = index; i < index + count; i++)
116 if (table->base[i].present) 116 if (table->base[i].present)
@@ -122,7 +122,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
122 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 122 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
123 123
124 if (index == start_index) { 124 if (index == start_index) {
125 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 125 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
126 printk(KERN_ERR "can't allocate an IRTE\n"); 126 printk(KERN_ERR "can't allocate an IRTE\n");
127 return -1; 127 return -1;
128 } 128 }
@@ -136,7 +136,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
136 irq_iommu->sub_handle = 0; 136 irq_iommu->sub_handle = 0;
137 irq_iommu->irte_mask = mask; 137 irq_iommu->irte_mask = mask;
138 138
139 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 139 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
140 140
141 return index; 141 return index;
142} 142}
@@ -161,10 +161,10 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
161 if (!irq_iommu) 161 if (!irq_iommu)
162 return -1; 162 return -1;
163 163
164 spin_lock_irqsave(&irq_2_ir_lock, flags); 164 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
165 *sub_handle = irq_iommu->sub_handle; 165 *sub_handle = irq_iommu->sub_handle;
166 index = irq_iommu->irte_index; 166 index = irq_iommu->irte_index;
167 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 167 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
168 return index; 168 return index;
169} 169}
170 170
@@ -176,14 +176,14 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
176 if (!irq_iommu) 176 if (!irq_iommu)
177 return -1; 177 return -1;
178 178
179 spin_lock_irqsave(&irq_2_ir_lock, flags); 179 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
180 180
181 irq_iommu->iommu = iommu; 181 irq_iommu->iommu = iommu;
182 irq_iommu->irte_index = index; 182 irq_iommu->irte_index = index;
183 irq_iommu->sub_handle = subhandle; 183 irq_iommu->sub_handle = subhandle;
184 irq_iommu->irte_mask = 0; 184 irq_iommu->irte_mask = 0;
185 185
186 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 186 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
187 187
188 return 0; 188 return 0;
189} 189}
@@ -199,7 +199,7 @@ int modify_irte(int irq, struct irte *irte_modified)
199 if (!irq_iommu) 199 if (!irq_iommu)
200 return -1; 200 return -1;
201 201
202 spin_lock_irqsave(&irq_2_ir_lock, flags); 202 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
203 203
204 iommu = irq_iommu->iommu; 204 iommu = irq_iommu->iommu;
205 205
@@ -211,7 +211,7 @@ int modify_irte(int irq, struct irte *irte_modified)
211 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 211 __iommu_flush_cache(iommu, irte, sizeof(*irte));
212 212
213 rc = qi_flush_iec(iommu, index, 0); 213 rc = qi_flush_iec(iommu, index, 0);
214 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 214 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
215 215
216 return rc; 216 return rc;
217} 217}
@@ -279,7 +279,7 @@ int free_irte(int irq)
279 if (!irq_iommu) 279 if (!irq_iommu)
280 return -1; 280 return -1;
281 281
282 spin_lock_irqsave(&irq_2_ir_lock, flags); 282 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
283 283
284 rc = clear_entries(irq_iommu); 284 rc = clear_entries(irq_iommu);
285 285
@@ -288,7 +288,7 @@ int free_irte(int irq)
288 irq_iommu->sub_handle = 0; 288 irq_iommu->sub_handle = 0;
289 irq_iommu->irte_mask = 0; 289 irq_iommu->irte_mask = 0;
290 290
291 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 291 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
292 292
293 return rc; 293 return rc;
294} 294}
@@ -418,7 +418,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
418 418
419 addr = virt_to_phys((void *)iommu->ir_table->base); 419 addr = virt_to_phys((void *)iommu->ir_table->base);
420 420
421 spin_lock_irqsave(&iommu->register_lock, flags); 421 raw_spin_lock_irqsave(&iommu->register_lock, flags);
422 422
423 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 423 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
424 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 424 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
@@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
429 429
430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
431 readl, (sts & DMA_GSTS_IRTPS), sts); 431 readl, (sts & DMA_GSTS_IRTPS), sts);
432 spin_unlock_irqrestore(&iommu->register_lock, flags); 432 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
433 433
434 /* 434 /*
435 * global invalidation of interrupt entry cache before enabling 435 * global invalidation of interrupt entry cache before enabling
@@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
437 */ 437 */
438 qi_global_iec(iommu); 438 qi_global_iec(iommu);
439 439
440 spin_lock_irqsave(&iommu->register_lock, flags); 440 raw_spin_lock_irqsave(&iommu->register_lock, flags);
441 441
442 /* Enable interrupt-remapping */ 442 /* Enable interrupt-remapping */
443 iommu->gcmd |= DMA_GCMD_IRE; 443 iommu->gcmd |= DMA_GCMD_IRE;
@@ -446,7 +446,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
446 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 446 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
447 readl, (sts & DMA_GSTS_IRES), sts); 447 readl, (sts & DMA_GSTS_IRES), sts);
448 448
449 spin_unlock_irqrestore(&iommu->register_lock, flags); 449 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
450} 450}
451 451
452 452
@@ -494,7 +494,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
494 */ 494 */
495 qi_global_iec(iommu); 495 qi_global_iec(iommu);
496 496
497 spin_lock_irqsave(&iommu->register_lock, flags); 497 raw_spin_lock_irqsave(&iommu->register_lock, flags);
498 498
499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
500 if (!(sts & DMA_GSTS_IRES)) 500 if (!(sts & DMA_GSTS_IRES))
@@ -507,7 +507,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
507 readl, !(sts & DMA_GSTS_IRES), sts); 507 readl, !(sts & DMA_GSTS_IRES), sts);
508 508
509end: 509end:
510 spin_unlock_irqrestore(&iommu->register_lock, flags); 510 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
511} 511}
512 512
513static int __init dmar_x2apic_optout(void) 513static int __init dmar_x2apic_optout(void)