aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/amd_iommu.c22
-rw-r--r--arch/x86/kernel/vsmp_64.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c4
3 files changed, 20 insertions, 8 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 69b4d060b21c..042fdc27bc92 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -101,10 +101,10 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
101 */ 101 */
102static int iommu_completion_wait(struct amd_iommu *iommu) 102static int iommu_completion_wait(struct amd_iommu *iommu)
103{ 103{
104 int ret, ready = 0; 104 int ret = 0, ready = 0;
105 unsigned status = 0; 105 unsigned status = 0;
106 struct iommu_cmd cmd; 106 struct iommu_cmd cmd;
107 unsigned long i = 0; 107 unsigned long flags, i = 0;
108 108
109 memset(&cmd, 0, sizeof(cmd)); 109 memset(&cmd, 0, sizeof(cmd));
110 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 110 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
@@ -112,10 +112,12 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
112 112
113 iommu->need_sync = 0; 113 iommu->need_sync = 0;
114 114
115 ret = iommu_queue_command(iommu, &cmd); 115 spin_lock_irqsave(&iommu->lock, flags);
116
117 ret = __iommu_queue_command(iommu, &cmd);
116 118
117 if (ret) 119 if (ret)
118 return ret; 120 goto out;
119 121
120 while (!ready && (i < EXIT_LOOP_COUNT)) { 122 while (!ready && (i < EXIT_LOOP_COUNT)) {
121 ++i; 123 ++i;
@@ -130,6 +132,8 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
130 132
131 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) 133 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
132 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); 134 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
135out:
136 spin_unlock_irqrestore(&iommu->lock, flags);
133 137
134 return 0; 138 return 0;
135} 139}
@@ -140,6 +144,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
140static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) 144static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
141{ 145{
142 struct iommu_cmd cmd; 146 struct iommu_cmd cmd;
147 int ret;
143 148
144 BUG_ON(iommu == NULL); 149 BUG_ON(iommu == NULL);
145 150
@@ -147,9 +152,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
147 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); 152 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
148 cmd.data[0] = devid; 153 cmd.data[0] = devid;
149 154
155 ret = iommu_queue_command(iommu, &cmd);
156
150 iommu->need_sync = 1; 157 iommu->need_sync = 1;
151 158
152 return iommu_queue_command(iommu, &cmd); 159 return ret;
153} 160}
154 161
155/* 162/*
@@ -159,6 +166,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
159 u64 address, u16 domid, int pde, int s) 166 u64 address, u16 domid, int pde, int s)
160{ 167{
161 struct iommu_cmd cmd; 168 struct iommu_cmd cmd;
169 int ret;
162 170
163 memset(&cmd, 0, sizeof(cmd)); 171 memset(&cmd, 0, sizeof(cmd));
164 address &= PAGE_MASK; 172 address &= PAGE_MASK;
@@ -171,9 +179,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
171 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ 179 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
172 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 180 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
173 181
182 ret = iommu_queue_command(iommu, &cmd);
183
174 iommu->need_sync = 1; 184 iommu->need_sync = 1;
175 185
176 return iommu_queue_command(iommu, &cmd); 186 return ret;
177} 187}
178 188
179/* 189/*
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 0c029e8959c7..7766d36983fc 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -61,7 +61,7 @@ static void vsmp_irq_enable(void)
61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); 61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
62} 62}
63 63
64static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, 64static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
65 unsigned long addr, unsigned len) 65 unsigned long addr, unsigned len)
66{ 66{
67 switch (type) { 67 switch (type) {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 0227694f7dab..8a5f1614a3d5 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -295,10 +295,12 @@ static void nmi_cpu_shutdown(void *dummy)
295 295
296static void nmi_shutdown(void) 296static void nmi_shutdown(void)
297{ 297{
298 struct op_msrs *msrs = &get_cpu_var(cpu_msrs); 298 struct op_msrs *msrs;
299
299 nmi_enabled = 0; 300 nmi_enabled = 0;
300 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 301 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
301 unregister_die_notifier(&profile_exceptions_nb); 302 unregister_die_notifier(&profile_exceptions_nb);
303 msrs = &get_cpu_var(cpu_msrs);
302 model->shutdown(msrs); 304 model->shutdown(msrs);
303 free_msrs(); 305 free_msrs();
304 put_cpu_var(cpu_msrs); 306 put_cpu_var(cpu_msrs);