aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/dmar.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
commit3cfef9524677a4ecb392d6fbffe6ebce6302f1d4 (patch)
tree88647d9dc50d634dee9cfeb7f354d620977a2f33 /drivers/iommu/dmar.c
parent982653009b883ef1529089e3e6f1ae2fee41cbe2 (diff)
parent68cc3990a545dc0da221b4844dd8b9c06623a6c5 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock() lockdep: Comment all warnings lib: atomic64: Change the type of local lock to raw_spinlock_t locking, lib/atomic64: Annotate atomic64_lock::lock as raw locking, x86, iommu: Annotate qi->q_lock as raw locking, x86, iommu: Annotate irq_2_ir_lock as raw locking, x86, iommu: Annotate iommu->register_lock as raw locking, dma, ipu: Annotate bank_lock as raw locking, ARM: Annotate low level hw locks as raw locking, drivers/dca: Annotate dca_lock as raw locking, powerpc: Annotate uic->lock as raw locking, x86: mce: Annotate cmci_discover_lock as raw locking, ACPI: Annotate c3_lock as raw locking, oprofile: Annotate oprofilefs lock as raw locking, video: Annotate vga console lock as raw locking, latencytop: Annotate latency_lock as raw locking, timer_stats: Annotate table_lock as raw locking, rwsem: Annotate inner lock as raw locking, semaphores: Annotate inner lock as raw locking, sched: Annotate thread_group_cputimer as raw ... Fix up conflicts in kernel/posix-cpu-timers.c manually: making cputimer->cputime a raw lock conflicted with the ABBA fix in commit bcd5cff7216f ("cputimer: Cure lock inversion").
Diffstat (limited to 'drivers/iommu/dmar.c')
-rw-r--r--drivers/iommu/dmar.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 587e8f2d38d8..35c1e17fce1d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -652,7 +652,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
652 (unsigned long long)iommu->cap, 652 (unsigned long long)iommu->cap,
653 (unsigned long long)iommu->ecap); 653 (unsigned long long)iommu->ecap);
654 654
655 spin_lock_init(&iommu->register_lock); 655 raw_spin_lock_init(&iommu->register_lock);
656 656
657 drhd->iommu = iommu; 657 drhd->iommu = iommu;
658 return 0; 658 return 0;
@@ -771,11 +771,11 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
771restart: 771restart:
772 rc = 0; 772 rc = 0;
773 773
774 spin_lock_irqsave(&qi->q_lock, flags); 774 raw_spin_lock_irqsave(&qi->q_lock, flags);
775 while (qi->free_cnt < 3) { 775 while (qi->free_cnt < 3) {
776 spin_unlock_irqrestore(&qi->q_lock, flags); 776 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
777 cpu_relax(); 777 cpu_relax();
778 spin_lock_irqsave(&qi->q_lock, flags); 778 raw_spin_lock_irqsave(&qi->q_lock, flags);
779 } 779 }
780 780
781 index = qi->free_head; 781 index = qi->free_head;
@@ -815,15 +815,15 @@ restart:
815 if (rc) 815 if (rc)
816 break; 816 break;
817 817
818 spin_unlock(&qi->q_lock); 818 raw_spin_unlock(&qi->q_lock);
819 cpu_relax(); 819 cpu_relax();
820 spin_lock(&qi->q_lock); 820 raw_spin_lock(&qi->q_lock);
821 } 821 }
822 822
823 qi->desc_status[index] = QI_DONE; 823 qi->desc_status[index] = QI_DONE;
824 824
825 reclaim_free_desc(qi); 825 reclaim_free_desc(qi);
826 spin_unlock_irqrestore(&qi->q_lock, flags); 826 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
827 827
828 if (rc == -EAGAIN) 828 if (rc == -EAGAIN)
829 goto restart; 829 goto restart;
@@ -912,7 +912,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
912 if (!ecap_qis(iommu->ecap)) 912 if (!ecap_qis(iommu->ecap))
913 return; 913 return;
914 914
915 spin_lock_irqsave(&iommu->register_lock, flags); 915 raw_spin_lock_irqsave(&iommu->register_lock, flags);
916 916
917 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 917 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
918 if (!(sts & DMA_GSTS_QIES)) 918 if (!(sts & DMA_GSTS_QIES))
@@ -932,7 +932,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
932 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, 932 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
933 !(sts & DMA_GSTS_QIES), sts); 933 !(sts & DMA_GSTS_QIES), sts);
934end: 934end:
935 spin_unlock_irqrestore(&iommu->register_lock, flags); 935 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
936} 936}
937 937
938/* 938/*
@@ -947,7 +947,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
947 qi->free_head = qi->free_tail = 0; 947 qi->free_head = qi->free_tail = 0;
948 qi->free_cnt = QI_LENGTH; 948 qi->free_cnt = QI_LENGTH;
949 949
950 spin_lock_irqsave(&iommu->register_lock, flags); 950 raw_spin_lock_irqsave(&iommu->register_lock, flags);
951 951
952 /* write zero to the tail reg */ 952 /* write zero to the tail reg */
953 writel(0, iommu->reg + DMAR_IQT_REG); 953 writel(0, iommu->reg + DMAR_IQT_REG);
@@ -960,7 +960,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
960 /* Make sure hardware complete it */ 960 /* Make sure hardware complete it */
961 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 961 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
962 962
963 spin_unlock_irqrestore(&iommu->register_lock, flags); 963 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
964} 964}
965 965
966/* 966/*
@@ -1009,7 +1009,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
1009 qi->free_head = qi->free_tail = 0; 1009 qi->free_head = qi->free_tail = 0;
1010 qi->free_cnt = QI_LENGTH; 1010 qi->free_cnt = QI_LENGTH;
1011 1011
1012 spin_lock_init(&qi->q_lock); 1012 raw_spin_lock_init(&qi->q_lock);
1013 1013
1014 __dmar_enable_qi(iommu); 1014 __dmar_enable_qi(iommu);
1015 1015
@@ -1075,11 +1075,11 @@ void dmar_msi_unmask(struct irq_data *data)
1075 unsigned long flag; 1075 unsigned long flag;
1076 1076
1077 /* unmask it */ 1077 /* unmask it */
1078 spin_lock_irqsave(&iommu->register_lock, flag); 1078 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1079 writel(0, iommu->reg + DMAR_FECTL_REG); 1079 writel(0, iommu->reg + DMAR_FECTL_REG);
1080 /* Read a reg to force flush the post write */ 1080 /* Read a reg to force flush the post write */
1081 readl(iommu->reg + DMAR_FECTL_REG); 1081 readl(iommu->reg + DMAR_FECTL_REG);
1082 spin_unlock_irqrestore(&iommu->register_lock, flag); 1082 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1083} 1083}
1084 1084
1085void dmar_msi_mask(struct irq_data *data) 1085void dmar_msi_mask(struct irq_data *data)
@@ -1088,11 +1088,11 @@ void dmar_msi_mask(struct irq_data *data)
1088 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); 1088 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1089 1089
1090 /* mask it */ 1090 /* mask it */
1091 spin_lock_irqsave(&iommu->register_lock, flag); 1091 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1092 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); 1092 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1093 /* Read a reg to force flush the post write */ 1093 /* Read a reg to force flush the post write */
1094 readl(iommu->reg + DMAR_FECTL_REG); 1094 readl(iommu->reg + DMAR_FECTL_REG);
1095 spin_unlock_irqrestore(&iommu->register_lock, flag); 1095 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1096} 1096}
1097 1097
1098void dmar_msi_write(int irq, struct msi_msg *msg) 1098void dmar_msi_write(int irq, struct msi_msg *msg)
@@ -1100,11 +1100,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
1100 struct intel_iommu *iommu = irq_get_handler_data(irq); 1100 struct intel_iommu *iommu = irq_get_handler_data(irq);
1101 unsigned long flag; 1101 unsigned long flag;
1102 1102
1103 spin_lock_irqsave(&iommu->register_lock, flag); 1103 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1104 writel(msg->data, iommu->reg + DMAR_FEDATA_REG); 1104 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1105 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); 1105 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1106 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); 1106 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1107 spin_unlock_irqrestore(&iommu->register_lock, flag); 1107 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1108} 1108}
1109 1109
1110void dmar_msi_read(int irq, struct msi_msg *msg) 1110void dmar_msi_read(int irq, struct msi_msg *msg)
@@ -1112,11 +1112,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
1112 struct intel_iommu *iommu = irq_get_handler_data(irq); 1112 struct intel_iommu *iommu = irq_get_handler_data(irq);
1113 unsigned long flag; 1113 unsigned long flag;
1114 1114
1115 spin_lock_irqsave(&iommu->register_lock, flag); 1115 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1116 msg->data = readl(iommu->reg + DMAR_FEDATA_REG); 1116 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1117 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); 1117 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1118 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); 1118 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1119 spin_unlock_irqrestore(&iommu->register_lock, flag); 1119 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1120} 1120}
1121 1121
1122static int dmar_fault_do_one(struct intel_iommu *iommu, int type, 1122static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
@@ -1153,7 +1153,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1153 u32 fault_status; 1153 u32 fault_status;
1154 unsigned long flag; 1154 unsigned long flag;
1155 1155
1156 spin_lock_irqsave(&iommu->register_lock, flag); 1156 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1157 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1157 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1158 if (fault_status) 1158 if (fault_status)
1159 printk(KERN_ERR "DRHD: handling fault status reg %x\n", 1159 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
@@ -1192,7 +1192,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1192 writel(DMA_FRCD_F, iommu->reg + reg + 1192 writel(DMA_FRCD_F, iommu->reg + reg +
1193 fault_index * PRIMARY_FAULT_REG_LEN + 12); 1193 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1194 1194
1195 spin_unlock_irqrestore(&iommu->register_lock, flag); 1195 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1196 1196
1197 dmar_fault_do_one(iommu, type, fault_reason, 1197 dmar_fault_do_one(iommu, type, fault_reason,
1198 source_id, guest_addr); 1198 source_id, guest_addr);
@@ -1200,14 +1200,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1200 fault_index++; 1200 fault_index++;
1201 if (fault_index >= cap_num_fault_regs(iommu->cap)) 1201 if (fault_index >= cap_num_fault_regs(iommu->cap))
1202 fault_index = 0; 1202 fault_index = 0;
1203 spin_lock_irqsave(&iommu->register_lock, flag); 1203 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1204 } 1204 }
1205clear_rest: 1205clear_rest:
1206 /* clear all the other faults */ 1206 /* clear all the other faults */
1207 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1207 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1208 writel(fault_status, iommu->reg + DMAR_FSTS_REG); 1208 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1209 1209
1210 spin_unlock_irqrestore(&iommu->register_lock, flag); 1210 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1211 return IRQ_HANDLED; 1211 return IRQ_HANDLED;
1212} 1212}
1213 1213