diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/processor_idle.c | 10 | ||||
-rw-r--r-- | drivers/dca/dca-core.c | 78 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_irq.c | 48 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 48 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 36 | ||||
-rw-r--r-- | drivers/iommu/intr_remapping.c | 40 | ||||
-rw-r--r-- | drivers/oprofile/event_buffer.c | 4 | ||||
-rw-r--r-- | drivers/oprofile/oprofile_perf.c | 4 | ||||
-rw-r--r-- | drivers/oprofile/oprofilefs.c | 6 | ||||
-rw-r--r-- | drivers/video/console/vgacon.c | 42 |
10 files changed, 163 insertions, 153 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 2e69e09ff03e..9b88f9828d8c 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
852 | } | 852 | } |
853 | 853 | ||
854 | static int c3_cpu_count; | 854 | static int c3_cpu_count; |
855 | static DEFINE_SPINLOCK(c3_lock); | 855 | static DEFINE_RAW_SPINLOCK(c3_lock); |
856 | 856 | ||
857 | /** | 857 | /** |
858 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 858 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
@@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
930 | * without doing anything. | 930 | * without doing anything. |
931 | */ | 931 | */ |
932 | if (pr->flags.bm_check && pr->flags.bm_control) { | 932 | if (pr->flags.bm_check && pr->flags.bm_control) { |
933 | spin_lock(&c3_lock); | 933 | raw_spin_lock(&c3_lock); |
934 | c3_cpu_count++; | 934 | c3_cpu_count++; |
935 | /* Disable bus master arbitration when all CPUs are in C3 */ | 935 | /* Disable bus master arbitration when all CPUs are in C3 */ |
936 | if (c3_cpu_count == num_online_cpus()) | 936 | if (c3_cpu_count == num_online_cpus()) |
937 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); | 937 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); |
938 | spin_unlock(&c3_lock); | 938 | raw_spin_unlock(&c3_lock); |
939 | } else if (!pr->flags.bm_check) { | 939 | } else if (!pr->flags.bm_check) { |
940 | ACPI_FLUSH_CPU_CACHE(); | 940 | ACPI_FLUSH_CPU_CACHE(); |
941 | } | 941 | } |
@@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
944 | 944 | ||
945 | /* Re-enable bus master arbitration */ | 945 | /* Re-enable bus master arbitration */ |
946 | if (pr->flags.bm_check && pr->flags.bm_control) { | 946 | if (pr->flags.bm_check && pr->flags.bm_control) { |
947 | spin_lock(&c3_lock); | 947 | raw_spin_lock(&c3_lock); |
948 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); | 948 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); |
949 | c3_cpu_count--; | 949 | c3_cpu_count--; |
950 | spin_unlock(&c3_lock); | 950 | raw_spin_unlock(&c3_lock); |
951 | } | 951 | } |
952 | kt2 = ktime_get_real(); | 952 | kt2 = ktime_get_real(); |
953 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | 953 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); |
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 4abd089a094f..25ec0bb05198 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION); | |||
35 | MODULE_LICENSE("GPL"); | 35 | MODULE_LICENSE("GPL"); |
36 | MODULE_AUTHOR("Intel Corporation"); | 36 | MODULE_AUTHOR("Intel Corporation"); |
37 | 37 | ||
38 | static DEFINE_SPINLOCK(dca_lock); | 38 | static DEFINE_RAW_SPINLOCK(dca_lock); |
39 | 39 | ||
40 | static LIST_HEAD(dca_domains); | 40 | static LIST_HEAD(dca_domains); |
41 | 41 | ||
@@ -101,10 +101,10 @@ static void unregister_dca_providers(void) | |||
101 | 101 | ||
102 | INIT_LIST_HEAD(&unregistered_providers); | 102 | INIT_LIST_HEAD(&unregistered_providers); |
103 | 103 | ||
104 | spin_lock_irqsave(&dca_lock, flags); | 104 | raw_spin_lock_irqsave(&dca_lock, flags); |
105 | 105 | ||
106 | if (list_empty(&dca_domains)) { | 106 | if (list_empty(&dca_domains)) { |
107 | spin_unlock_irqrestore(&dca_lock, flags); | 107 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
108 | return; | 108 | return; |
109 | } | 109 | } |
110 | 110 | ||
@@ -116,7 +116,7 @@ static void unregister_dca_providers(void) | |||
116 | 116 | ||
117 | dca_free_domain(domain); | 117 | dca_free_domain(domain); |
118 | 118 | ||
119 | spin_unlock_irqrestore(&dca_lock, flags); | 119 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
120 | 120 | ||
121 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { | 121 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { |
122 | dca_sysfs_remove_provider(dca); | 122 | dca_sysfs_remove_provider(dca); |
@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain(struct device *dev) | |||
144 | domain = dca_find_domain(rc); | 144 | domain = dca_find_domain(rc); |
145 | 145 | ||
146 | if (!domain) { | 146 | if (!domain) { |
147 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) { | 147 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) |
148 | dca_providers_blocked = 1; | 148 | dca_providers_blocked = 1; |
149 | } else { | ||
150 | domain = dca_allocate_domain(rc); | ||
151 | if (domain) | ||
152 | list_add(&domain->node, &dca_domains); | ||
153 | } | ||
154 | } | 149 | } |
155 | 150 | ||
156 | return domain; | 151 | return domain; |
@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev) | |||
198 | if (!dev) | 193 | if (!dev) |
199 | return -EFAULT; | 194 | return -EFAULT; |
200 | 195 | ||
201 | spin_lock_irqsave(&dca_lock, flags); | 196 | raw_spin_lock_irqsave(&dca_lock, flags); |
202 | 197 | ||
203 | /* check if the requester has not been added already */ | 198 | /* check if the requester has not been added already */ |
204 | dca = dca_find_provider_by_dev(dev); | 199 | dca = dca_find_provider_by_dev(dev); |
205 | if (dca) { | 200 | if (dca) { |
206 | spin_unlock_irqrestore(&dca_lock, flags); | 201 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
207 | return -EEXIST; | 202 | return -EEXIST; |
208 | } | 203 | } |
209 | 204 | ||
210 | pci_rc = dca_pci_rc_from_dev(dev); | 205 | pci_rc = dca_pci_rc_from_dev(dev); |
211 | domain = dca_find_domain(pci_rc); | 206 | domain = dca_find_domain(pci_rc); |
212 | if (!domain) { | 207 | if (!domain) { |
213 | spin_unlock_irqrestore(&dca_lock, flags); | 208 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
214 | return -ENODEV; | 209 | return -ENODEV; |
215 | } | 210 | } |
216 | 211 | ||
@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev) | |||
220 | break; | 215 | break; |
221 | } | 216 | } |
222 | 217 | ||
223 | spin_unlock_irqrestore(&dca_lock, flags); | 218 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
224 | 219 | ||
225 | if (slot < 0) | 220 | if (slot < 0) |
226 | return slot; | 221 | return slot; |
227 | 222 | ||
228 | err = dca_sysfs_add_req(dca, dev, slot); | 223 | err = dca_sysfs_add_req(dca, dev, slot); |
229 | if (err) { | 224 | if (err) { |
230 | spin_lock_irqsave(&dca_lock, flags); | 225 | raw_spin_lock_irqsave(&dca_lock, flags); |
231 | if (dca == dca_find_provider_by_dev(dev)) | 226 | if (dca == dca_find_provider_by_dev(dev)) |
232 | dca->ops->remove_requester(dca, dev); | 227 | dca->ops->remove_requester(dca, dev); |
233 | spin_unlock_irqrestore(&dca_lock, flags); | 228 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
234 | return err; | 229 | return err; |
235 | } | 230 | } |
236 | 231 | ||
@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *dev) | |||
251 | if (!dev) | 246 | if (!dev) |
252 | return -EFAULT; | 247 | return -EFAULT; |
253 | 248 | ||
254 | spin_lock_irqsave(&dca_lock, flags); | 249 | raw_spin_lock_irqsave(&dca_lock, flags); |
255 | dca = dca_find_provider_by_dev(dev); | 250 | dca = dca_find_provider_by_dev(dev); |
256 | if (!dca) { | 251 | if (!dca) { |
257 | spin_unlock_irqrestore(&dca_lock, flags); | 252 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
258 | return -ENODEV; | 253 | return -ENODEV; |
259 | } | 254 | } |
260 | slot = dca->ops->remove_requester(dca, dev); | 255 | slot = dca->ops->remove_requester(dca, dev); |
261 | spin_unlock_irqrestore(&dca_lock, flags); | 256 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
262 | 257 | ||
263 | if (slot < 0) | 258 | if (slot < 0) |
264 | return slot; | 259 | return slot; |
@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev, int cpu) | |||
280 | u8 tag; | 275 | u8 tag; |
281 | unsigned long flags; | 276 | unsigned long flags; |
282 | 277 | ||
283 | spin_lock_irqsave(&dca_lock, flags); | 278 | raw_spin_lock_irqsave(&dca_lock, flags); |
284 | 279 | ||
285 | dca = dca_find_provider_by_dev(dev); | 280 | dca = dca_find_provider_by_dev(dev); |
286 | if (!dca) { | 281 | if (!dca) { |
287 | spin_unlock_irqrestore(&dca_lock, flags); | 282 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
288 | return -ENODEV; | 283 | return -ENODEV; |
289 | } | 284 | } |
290 | tag = dca->ops->get_tag(dca, dev, cpu); | 285 | tag = dca->ops->get_tag(dca, dev, cpu); |
291 | 286 | ||
292 | spin_unlock_irqrestore(&dca_lock, flags); | 287 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
293 | return tag; | 288 | return tag; |
294 | } | 289 | } |
295 | 290 | ||
@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) | |||
360 | { | 355 | { |
361 | int err; | 356 | int err; |
362 | unsigned long flags; | 357 | unsigned long flags; |
363 | struct dca_domain *domain; | 358 | struct dca_domain *domain, *newdomain = NULL; |
364 | 359 | ||
365 | spin_lock_irqsave(&dca_lock, flags); | 360 | raw_spin_lock_irqsave(&dca_lock, flags); |
366 | if (dca_providers_blocked) { | 361 | if (dca_providers_blocked) { |
367 | spin_unlock_irqrestore(&dca_lock, flags); | 362 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
368 | return -ENODEV; | 363 | return -ENODEV; |
369 | } | 364 | } |
370 | spin_unlock_irqrestore(&dca_lock, flags); | 365 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
371 | 366 | ||
372 | err = dca_sysfs_add_provider(dca, dev); | 367 | err = dca_sysfs_add_provider(dca, dev); |
373 | if (err) | 368 | if (err) |
374 | return err; | 369 | return err; |
375 | 370 | ||
376 | spin_lock_irqsave(&dca_lock, flags); | 371 | raw_spin_lock_irqsave(&dca_lock, flags); |
377 | domain = dca_get_domain(dev); | 372 | domain = dca_get_domain(dev); |
378 | if (!domain) { | 373 | if (!domain) { |
374 | struct pci_bus *rc; | ||
375 | |||
379 | if (dca_providers_blocked) { | 376 | if (dca_providers_blocked) { |
380 | spin_unlock_irqrestore(&dca_lock, flags); | 377 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
381 | dca_sysfs_remove_provider(dca); | 378 | dca_sysfs_remove_provider(dca); |
382 | unregister_dca_providers(); | 379 | unregister_dca_providers(); |
383 | } else { | 380 | return -ENODEV; |
384 | spin_unlock_irqrestore(&dca_lock, flags); | 381 | } |
382 | |||
383 | raw_spin_unlock_irqrestore(&dca_lock, flags); | ||
384 | rc = dca_pci_rc_from_dev(dev); | ||
385 | newdomain = dca_allocate_domain(rc); | ||
386 | if (!newdomain) | ||
387 | return -ENODEV; | ||
388 | raw_spin_lock_irqsave(&dca_lock, flags); | ||
389 | /* Recheck, we might have raced after dropping the lock */ | ||
390 | domain = dca_get_domain(dev); | ||
391 | if (!domain) { | ||
392 | domain = newdomain; | ||
393 | newdomain = NULL; | ||
394 | list_add(&domain->node, &dca_domains); | ||
385 | } | 395 | } |
386 | return -ENODEV; | ||
387 | } | 396 | } |
388 | list_add(&dca->node, &domain->dca_providers); | 397 | list_add(&dca->node, &domain->dca_providers); |
389 | spin_unlock_irqrestore(&dca_lock, flags); | 398 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
390 | 399 | ||
391 | blocking_notifier_call_chain(&dca_provider_chain, | 400 | blocking_notifier_call_chain(&dca_provider_chain, |
392 | DCA_PROVIDER_ADD, NULL); | 401 | DCA_PROVIDER_ADD, NULL); |
402 | kfree(newdomain); | ||
393 | return 0; | 403 | return 0; |
394 | } | 404 | } |
395 | EXPORT_SYMBOL_GPL(register_dca_provider); | 405 | EXPORT_SYMBOL_GPL(register_dca_provider); |
@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
407 | blocking_notifier_call_chain(&dca_provider_chain, | 417 | blocking_notifier_call_chain(&dca_provider_chain, |
408 | DCA_PROVIDER_REMOVE, NULL); | 418 | DCA_PROVIDER_REMOVE, NULL); |
409 | 419 | ||
410 | spin_lock_irqsave(&dca_lock, flags); | 420 | raw_spin_lock_irqsave(&dca_lock, flags); |
411 | 421 | ||
412 | list_del(&dca->node); | 422 | list_del(&dca->node); |
413 | 423 | ||
@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
416 | if (list_empty(&domain->dca_providers)) | 426 | if (list_empty(&domain->dca_providers)) |
417 | dca_free_domain(domain); | 427 | dca_free_domain(domain); |
418 | 428 | ||
419 | spin_unlock_irqrestore(&dca_lock, flags); | 429 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
420 | 430 | ||
421 | dca_sysfs_remove_provider(dca); | 431 | dca_sysfs_remove_provider(dca); |
422 | } | 432 | } |
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index ab8a4eff072a..a71f55e72be9 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS]; | |||
81 | /* Protects allocations from the above array of maps */ | 81 | /* Protects allocations from the above array of maps */ |
82 | static DEFINE_MUTEX(map_lock); | 82 | static DEFINE_MUTEX(map_lock); |
83 | /* Protects register accesses and individual mappings */ | 83 | /* Protects register accesses and individual mappings */ |
84 | static DEFINE_SPINLOCK(bank_lock); | 84 | static DEFINE_RAW_SPINLOCK(bank_lock); |
85 | 85 | ||
86 | static struct ipu_irq_map *src2map(unsigned int src) | 86 | static struct ipu_irq_map *src2map(unsigned int src) |
87 | { | 87 | { |
@@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_data *d) | |||
101 | uint32_t reg; | 101 | uint32_t reg; |
102 | unsigned long lock_flags; | 102 | unsigned long lock_flags; |
103 | 103 | ||
104 | spin_lock_irqsave(&bank_lock, lock_flags); | 104 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
105 | 105 | ||
106 | bank = map->bank; | 106 | bank = map->bank; |
107 | if (!bank) { | 107 | if (!bank) { |
108 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 108 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
110 | return; | 110 | return; |
111 | } | 111 | } |
@@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_data *d) | |||
114 | reg |= (1UL << (map->source & 31)); | 114 | reg |= (1UL << (map->source & 31)); |
115 | ipu_write_reg(bank->ipu, reg, bank->control); | 115 | ipu_write_reg(bank->ipu, reg, bank->control); |
116 | 116 | ||
117 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 117 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void ipu_irq_mask(struct irq_data *d) | 120 | static void ipu_irq_mask(struct irq_data *d) |
@@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data *d) | |||
124 | uint32_t reg; | 124 | uint32_t reg; |
125 | unsigned long lock_flags; | 125 | unsigned long lock_flags; |
126 | 126 | ||
127 | spin_lock_irqsave(&bank_lock, lock_flags); | 127 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
128 | 128 | ||
129 | bank = map->bank; | 129 | bank = map->bank; |
130 | if (!bank) { | 130 | if (!bank) { |
131 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 131 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
133 | return; | 133 | return; |
134 | } | 134 | } |
@@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data *d) | |||
137 | reg &= ~(1UL << (map->source & 31)); | 137 | reg &= ~(1UL << (map->source & 31)); |
138 | ipu_write_reg(bank->ipu, reg, bank->control); | 138 | ipu_write_reg(bank->ipu, reg, bank->control); |
139 | 139 | ||
140 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 140 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void ipu_irq_ack(struct irq_data *d) | 143 | static void ipu_irq_ack(struct irq_data *d) |
@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data *d) | |||
146 | struct ipu_irq_bank *bank; | 146 | struct ipu_irq_bank *bank; |
147 | unsigned long lock_flags; | 147 | unsigned long lock_flags; |
148 | 148 | ||
149 | spin_lock_irqsave(&bank_lock, lock_flags); | 149 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
150 | 150 | ||
151 | bank = map->bank; | 151 | bank = map->bank; |
152 | if (!bank) { | 152 | if (!bank) { |
153 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 153 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
155 | return; | 155 | return; |
156 | } | 156 | } |
157 | 157 | ||
158 | ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); | 158 | ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); |
159 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 159 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
160 | } | 160 | } |
161 | 161 | ||
162 | /** | 162 | /** |
@@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq) | |||
172 | unsigned long lock_flags; | 172 | unsigned long lock_flags; |
173 | bool ret; | 173 | bool ret; |
174 | 174 | ||
175 | spin_lock_irqsave(&bank_lock, lock_flags); | 175 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
176 | bank = map->bank; | 176 | bank = map->bank; |
177 | ret = bank && ipu_read_reg(bank->ipu, bank->status) & | 177 | ret = bank && ipu_read_reg(bank->ipu, bank->status) & |
178 | (1UL << (map->source & 31)); | 178 | (1UL << (map->source & 31)); |
179 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 179 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
180 | 180 | ||
181 | return ret; | 181 | return ret; |
182 | } | 182 | } |
@@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source) | |||
213 | if (irq_map[i].source < 0) { | 213 | if (irq_map[i].source < 0) { |
214 | unsigned long lock_flags; | 214 | unsigned long lock_flags; |
215 | 215 | ||
216 | spin_lock_irqsave(&bank_lock, lock_flags); | 216 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
217 | irq_map[i].source = source; | 217 | irq_map[i].source = source; |
218 | irq_map[i].bank = irq_bank + source / 32; | 218 | irq_map[i].bank = irq_bank + source / 32; |
219 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 219 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
220 | 220 | ||
221 | ret = irq_map[i].irq; | 221 | ret = irq_map[i].irq; |
222 | pr_debug("IPU: mapped source %u to IRQ %u\n", | 222 | pr_debug("IPU: mapped source %u to IRQ %u\n", |
@@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source) | |||
252 | pr_debug("IPU: unmapped source %u from IRQ %u\n", | 252 | pr_debug("IPU: unmapped source %u from IRQ %u\n", |
253 | source, irq_map[i].irq); | 253 | source, irq_map[i].irq); |
254 | 254 | ||
255 | spin_lock_irqsave(&bank_lock, lock_flags); | 255 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
256 | irq_map[i].source = -EINVAL; | 256 | irq_map[i].source = -EINVAL; |
257 | irq_map[i].bank = NULL; | 257 | irq_map[i].bank = NULL; |
258 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 258 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
259 | 259 | ||
260 | ret = 0; | 260 | ret = 0; |
261 | break; | 261 | break; |
@@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
276 | for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { | 276 | for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { |
277 | struct ipu_irq_bank *bank = irq_bank + i; | 277 | struct ipu_irq_bank *bank = irq_bank + i; |
278 | 278 | ||
279 | spin_lock(&bank_lock); | 279 | raw_spin_lock(&bank_lock); |
280 | status = ipu_read_reg(ipu, bank->status); | 280 | status = ipu_read_reg(ipu, bank->status); |
281 | /* | 281 | /* |
282 | * Don't think we have to clear all interrupts here, they will | 282 | * Don't think we have to clear all interrupts here, they will |
@@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
284 | * might want to clear unhandled interrupts after the loop... | 284 | * might want to clear unhandled interrupts after the loop... |
285 | */ | 285 | */ |
286 | status &= ipu_read_reg(ipu, bank->control); | 286 | status &= ipu_read_reg(ipu, bank->control); |
287 | spin_unlock(&bank_lock); | 287 | raw_spin_unlock(&bank_lock); |
288 | while ((line = ffs(status))) { | 288 | while ((line = ffs(status))) { |
289 | struct ipu_irq_map *map; | 289 | struct ipu_irq_map *map; |
290 | 290 | ||
291 | line--; | 291 | line--; |
292 | status &= ~(1UL << line); | 292 | status &= ~(1UL << line); |
293 | 293 | ||
294 | spin_lock(&bank_lock); | 294 | raw_spin_lock(&bank_lock); |
295 | map = src2map(32 * i + line); | 295 | map = src2map(32 * i + line); |
296 | if (map) | 296 | if (map) |
297 | irq = map->irq; | 297 | irq = map->irq; |
298 | spin_unlock(&bank_lock); | 298 | raw_spin_unlock(&bank_lock); |
299 | 299 | ||
300 | if (!map) { | 300 | if (!map) { |
301 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", | 301 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", |
@@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | |||
317 | for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { | 317 | for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { |
318 | struct ipu_irq_bank *bank = irq_bank + i; | 318 | struct ipu_irq_bank *bank = irq_bank + i; |
319 | 319 | ||
320 | spin_lock(&bank_lock); | 320 | raw_spin_lock(&bank_lock); |
321 | status = ipu_read_reg(ipu, bank->status); | 321 | status = ipu_read_reg(ipu, bank->status); |
322 | /* Not clearing all interrupts, see above */ | 322 | /* Not clearing all interrupts, see above */ |
323 | status &= ipu_read_reg(ipu, bank->control); | 323 | status &= ipu_read_reg(ipu, bank->control); |
324 | spin_unlock(&bank_lock); | 324 | raw_spin_unlock(&bank_lock); |
325 | while ((line = ffs(status))) { | 325 | while ((line = ffs(status))) { |
326 | struct ipu_irq_map *map; | 326 | struct ipu_irq_map *map; |
327 | 327 | ||
328 | line--; | 328 | line--; |
329 | status &= ~(1UL << line); | 329 | status &= ~(1UL << line); |
330 | 330 | ||
331 | spin_lock(&bank_lock); | 331 | raw_spin_lock(&bank_lock); |
332 | map = src2map(32 * i + line); | 332 | map = src2map(32 * i + line); |
333 | if (map) | 333 | if (map) |
334 | irq = map->irq; | 334 | irq = map->irq; |
335 | spin_unlock(&bank_lock); | 335 | raw_spin_unlock(&bank_lock); |
336 | 336 | ||
337 | if (!map) { | 337 | if (!map) { |
338 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", | 338 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 587e8f2d38d8..35c1e17fce1d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -652,7 +652,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
652 | (unsigned long long)iommu->cap, | 652 | (unsigned long long)iommu->cap, |
653 | (unsigned long long)iommu->ecap); | 653 | (unsigned long long)iommu->ecap); |
654 | 654 | ||
655 | spin_lock_init(&iommu->register_lock); | 655 | raw_spin_lock_init(&iommu->register_lock); |
656 | 656 | ||
657 | drhd->iommu = iommu; | 657 | drhd->iommu = iommu; |
658 | return 0; | 658 | return 0; |
@@ -771,11 +771,11 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
771 | restart: | 771 | restart: |
772 | rc = 0; | 772 | rc = 0; |
773 | 773 | ||
774 | spin_lock_irqsave(&qi->q_lock, flags); | 774 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
775 | while (qi->free_cnt < 3) { | 775 | while (qi->free_cnt < 3) { |
776 | spin_unlock_irqrestore(&qi->q_lock, flags); | 776 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
777 | cpu_relax(); | 777 | cpu_relax(); |
778 | spin_lock_irqsave(&qi->q_lock, flags); | 778 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
779 | } | 779 | } |
780 | 780 | ||
781 | index = qi->free_head; | 781 | index = qi->free_head; |
@@ -815,15 +815,15 @@ restart: | |||
815 | if (rc) | 815 | if (rc) |
816 | break; | 816 | break; |
817 | 817 | ||
818 | spin_unlock(&qi->q_lock); | 818 | raw_spin_unlock(&qi->q_lock); |
819 | cpu_relax(); | 819 | cpu_relax(); |
820 | spin_lock(&qi->q_lock); | 820 | raw_spin_lock(&qi->q_lock); |
821 | } | 821 | } |
822 | 822 | ||
823 | qi->desc_status[index] = QI_DONE; | 823 | qi->desc_status[index] = QI_DONE; |
824 | 824 | ||
825 | reclaim_free_desc(qi); | 825 | reclaim_free_desc(qi); |
826 | spin_unlock_irqrestore(&qi->q_lock, flags); | 826 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
827 | 827 | ||
828 | if (rc == -EAGAIN) | 828 | if (rc == -EAGAIN) |
829 | goto restart; | 829 | goto restart; |
@@ -912,7 +912,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) | |||
912 | if (!ecap_qis(iommu->ecap)) | 912 | if (!ecap_qis(iommu->ecap)) |
913 | return; | 913 | return; |
914 | 914 | ||
915 | spin_lock_irqsave(&iommu->register_lock, flags); | 915 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
916 | 916 | ||
917 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | 917 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); |
918 | if (!(sts & DMA_GSTS_QIES)) | 918 | if (!(sts & DMA_GSTS_QIES)) |
@@ -932,7 +932,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) | |||
932 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | 932 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, |
933 | !(sts & DMA_GSTS_QIES), sts); | 933 | !(sts & DMA_GSTS_QIES), sts); |
934 | end: | 934 | end: |
935 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 935 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
936 | } | 936 | } |
937 | 937 | ||
938 | /* | 938 | /* |
@@ -947,7 +947,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) | |||
947 | qi->free_head = qi->free_tail = 0; | 947 | qi->free_head = qi->free_tail = 0; |
948 | qi->free_cnt = QI_LENGTH; | 948 | qi->free_cnt = QI_LENGTH; |
949 | 949 | ||
950 | spin_lock_irqsave(&iommu->register_lock, flags); | 950 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
951 | 951 | ||
952 | /* write zero to the tail reg */ | 952 | /* write zero to the tail reg */ |
953 | writel(0, iommu->reg + DMAR_IQT_REG); | 953 | writel(0, iommu->reg + DMAR_IQT_REG); |
@@ -960,7 +960,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) | |||
960 | /* Make sure hardware complete it */ | 960 | /* Make sure hardware complete it */ |
961 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | 961 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); |
962 | 962 | ||
963 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 963 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
964 | } | 964 | } |
965 | 965 | ||
966 | /* | 966 | /* |
@@ -1009,7 +1009,7 @@ int dmar_enable_qi(struct intel_iommu *iommu) | |||
1009 | qi->free_head = qi->free_tail = 0; | 1009 | qi->free_head = qi->free_tail = 0; |
1010 | qi->free_cnt = QI_LENGTH; | 1010 | qi->free_cnt = QI_LENGTH; |
1011 | 1011 | ||
1012 | spin_lock_init(&qi->q_lock); | 1012 | raw_spin_lock_init(&qi->q_lock); |
1013 | 1013 | ||
1014 | __dmar_enable_qi(iommu); | 1014 | __dmar_enable_qi(iommu); |
1015 | 1015 | ||
@@ -1075,11 +1075,11 @@ void dmar_msi_unmask(struct irq_data *data) | |||
1075 | unsigned long flag; | 1075 | unsigned long flag; |
1076 | 1076 | ||
1077 | /* unmask it */ | 1077 | /* unmask it */ |
1078 | spin_lock_irqsave(&iommu->register_lock, flag); | 1078 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1079 | writel(0, iommu->reg + DMAR_FECTL_REG); | 1079 | writel(0, iommu->reg + DMAR_FECTL_REG); |
1080 | /* Read a reg to force flush the post write */ | 1080 | /* Read a reg to force flush the post write */ |
1081 | readl(iommu->reg + DMAR_FECTL_REG); | 1081 | readl(iommu->reg + DMAR_FECTL_REG); |
1082 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1082 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | void dmar_msi_mask(struct irq_data *data) | 1085 | void dmar_msi_mask(struct irq_data *data) |
@@ -1088,11 +1088,11 @@ void dmar_msi_mask(struct irq_data *data) | |||
1088 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); | 1088 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1089 | 1089 | ||
1090 | /* mask it */ | 1090 | /* mask it */ |
1091 | spin_lock_irqsave(&iommu->register_lock, flag); | 1091 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1092 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | 1092 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); |
1093 | /* Read a reg to force flush the post write */ | 1093 | /* Read a reg to force flush the post write */ |
1094 | readl(iommu->reg + DMAR_FECTL_REG); | 1094 | readl(iommu->reg + DMAR_FECTL_REG); |
1095 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1095 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1096 | } | 1096 | } |
1097 | 1097 | ||
1098 | void dmar_msi_write(int irq, struct msi_msg *msg) | 1098 | void dmar_msi_write(int irq, struct msi_msg *msg) |
@@ -1100,11 +1100,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg) | |||
1100 | struct intel_iommu *iommu = irq_get_handler_data(irq); | 1100 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1101 | unsigned long flag; | 1101 | unsigned long flag; |
1102 | 1102 | ||
1103 | spin_lock_irqsave(&iommu->register_lock, flag); | 1103 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1104 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | 1104 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); |
1105 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | 1105 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); |
1106 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | 1106 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); |
1107 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1107 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1108 | } | 1108 | } |
1109 | 1109 | ||
1110 | void dmar_msi_read(int irq, struct msi_msg *msg) | 1110 | void dmar_msi_read(int irq, struct msi_msg *msg) |
@@ -1112,11 +1112,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg) | |||
1112 | struct intel_iommu *iommu = irq_get_handler_data(irq); | 1112 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1113 | unsigned long flag; | 1113 | unsigned long flag; |
1114 | 1114 | ||
1115 | spin_lock_irqsave(&iommu->register_lock, flag); | 1115 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1116 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | 1116 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); |
1117 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | 1117 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); |
1118 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | 1118 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); |
1119 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1119 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | 1122 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, |
@@ -1153,7 +1153,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
1153 | u32 fault_status; | 1153 | u32 fault_status; |
1154 | unsigned long flag; | 1154 | unsigned long flag; |
1155 | 1155 | ||
1156 | spin_lock_irqsave(&iommu->register_lock, flag); | 1156 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1157 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 1157 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
1158 | if (fault_status) | 1158 | if (fault_status) |
1159 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", | 1159 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", |
@@ -1192,7 +1192,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
1192 | writel(DMA_FRCD_F, iommu->reg + reg + | 1192 | writel(DMA_FRCD_F, iommu->reg + reg + |
1193 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | 1193 | fault_index * PRIMARY_FAULT_REG_LEN + 12); |
1194 | 1194 | ||
1195 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1195 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1196 | 1196 | ||
1197 | dmar_fault_do_one(iommu, type, fault_reason, | 1197 | dmar_fault_do_one(iommu, type, fault_reason, |
1198 | source_id, guest_addr); | 1198 | source_id, guest_addr); |
@@ -1200,14 +1200,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
1200 | fault_index++; | 1200 | fault_index++; |
1201 | if (fault_index >= cap_num_fault_regs(iommu->cap)) | 1201 | if (fault_index >= cap_num_fault_regs(iommu->cap)) |
1202 | fault_index = 0; | 1202 | fault_index = 0; |
1203 | spin_lock_irqsave(&iommu->register_lock, flag); | 1203 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1204 | } | 1204 | } |
1205 | clear_rest: | 1205 | clear_rest: |
1206 | /* clear all the other faults */ | 1206 | /* clear all the other faults */ |
1207 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 1207 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
1208 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); | 1208 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); |
1209 | 1209 | ||
1210 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1210 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1211 | return IRQ_HANDLED; | 1211 | return IRQ_HANDLED; |
1212 | } | 1212 | } |
1213 | 1213 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f28d933c7927..be1953c239b0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -939,7 +939,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) | |||
939 | 939 | ||
940 | addr = iommu->root_entry; | 940 | addr = iommu->root_entry; |
941 | 941 | ||
942 | spin_lock_irqsave(&iommu->register_lock, flag); | 942 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
943 | dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); | 943 | dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); |
944 | 944 | ||
945 | writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); | 945 | writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); |
@@ -948,7 +948,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) | |||
948 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 948 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
949 | readl, (sts & DMA_GSTS_RTPS), sts); | 949 | readl, (sts & DMA_GSTS_RTPS), sts); |
950 | 950 | ||
951 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 951 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
952 | } | 952 | } |
953 | 953 | ||
954 | static void iommu_flush_write_buffer(struct intel_iommu *iommu) | 954 | static void iommu_flush_write_buffer(struct intel_iommu *iommu) |
@@ -959,14 +959,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu) | |||
959 | if (!rwbf_quirk && !cap_rwbf(iommu->cap)) | 959 | if (!rwbf_quirk && !cap_rwbf(iommu->cap)) |
960 | return; | 960 | return; |
961 | 961 | ||
962 | spin_lock_irqsave(&iommu->register_lock, flag); | 962 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
963 | writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); | 963 | writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); |
964 | 964 | ||
965 | /* Make sure hardware complete it */ | 965 | /* Make sure hardware complete it */ |
966 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 966 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
967 | readl, (!(val & DMA_GSTS_WBFS)), val); | 967 | readl, (!(val & DMA_GSTS_WBFS)), val); |
968 | 968 | ||
969 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 969 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
970 | } | 970 | } |
971 | 971 | ||
972 | /* return value determine if we need a write buffer flush */ | 972 | /* return value determine if we need a write buffer flush */ |
@@ -993,14 +993,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu, | |||
993 | } | 993 | } |
994 | val |= DMA_CCMD_ICC; | 994 | val |= DMA_CCMD_ICC; |
995 | 995 | ||
996 | spin_lock_irqsave(&iommu->register_lock, flag); | 996 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
997 | dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); | 997 | dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); |
998 | 998 | ||
999 | /* Make sure hardware complete it */ | 999 | /* Make sure hardware complete it */ |
1000 | IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, | 1000 | IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, |
1001 | dmar_readq, (!(val & DMA_CCMD_ICC)), val); | 1001 | dmar_readq, (!(val & DMA_CCMD_ICC)), val); |
1002 | 1002 | ||
1003 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1003 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | /* return value determine if we need a write buffer flush */ | 1006 | /* return value determine if we need a write buffer flush */ |
@@ -1039,7 +1039,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
1039 | if (cap_write_drain(iommu->cap)) | 1039 | if (cap_write_drain(iommu->cap)) |
1040 | val |= DMA_TLB_WRITE_DRAIN; | 1040 | val |= DMA_TLB_WRITE_DRAIN; |
1041 | 1041 | ||
1042 | spin_lock_irqsave(&iommu->register_lock, flag); | 1042 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1043 | /* Note: Only uses first TLB reg currently */ | 1043 | /* Note: Only uses first TLB reg currently */ |
1044 | if (val_iva) | 1044 | if (val_iva) |
1045 | dmar_writeq(iommu->reg + tlb_offset, val_iva); | 1045 | dmar_writeq(iommu->reg + tlb_offset, val_iva); |
@@ -1049,7 +1049,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
1049 | IOMMU_WAIT_OP(iommu, tlb_offset + 8, | 1049 | IOMMU_WAIT_OP(iommu, tlb_offset + 8, |
1050 | dmar_readq, (!(val & DMA_TLB_IVT)), val); | 1050 | dmar_readq, (!(val & DMA_TLB_IVT)), val); |
1051 | 1051 | ||
1052 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1052 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1053 | 1053 | ||
1054 | /* check IOTLB invalidation granularity */ | 1054 | /* check IOTLB invalidation granularity */ |
1055 | if (DMA_TLB_IAIG(val) == 0) | 1055 | if (DMA_TLB_IAIG(val) == 0) |
@@ -1165,7 +1165,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | |||
1165 | u32 pmen; | 1165 | u32 pmen; |
1166 | unsigned long flags; | 1166 | unsigned long flags; |
1167 | 1167 | ||
1168 | spin_lock_irqsave(&iommu->register_lock, flags); | 1168 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
1169 | pmen = readl(iommu->reg + DMAR_PMEN_REG); | 1169 | pmen = readl(iommu->reg + DMAR_PMEN_REG); |
1170 | pmen &= ~DMA_PMEN_EPM; | 1170 | pmen &= ~DMA_PMEN_EPM; |
1171 | writel(pmen, iommu->reg + DMAR_PMEN_REG); | 1171 | writel(pmen, iommu->reg + DMAR_PMEN_REG); |
@@ -1174,7 +1174,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | |||
1174 | IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, | 1174 | IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, |
1175 | readl, !(pmen & DMA_PMEN_PRS), pmen); | 1175 | readl, !(pmen & DMA_PMEN_PRS), pmen); |
1176 | 1176 | ||
1177 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 1177 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
1178 | } | 1178 | } |
1179 | 1179 | ||
1180 | static int iommu_enable_translation(struct intel_iommu *iommu) | 1180 | static int iommu_enable_translation(struct intel_iommu *iommu) |
@@ -1182,7 +1182,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu) | |||
1182 | u32 sts; | 1182 | u32 sts; |
1183 | unsigned long flags; | 1183 | unsigned long flags; |
1184 | 1184 | ||
1185 | spin_lock_irqsave(&iommu->register_lock, flags); | 1185 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
1186 | iommu->gcmd |= DMA_GCMD_TE; | 1186 | iommu->gcmd |= DMA_GCMD_TE; |
1187 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 1187 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
1188 | 1188 | ||
@@ -1190,7 +1190,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu) | |||
1190 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 1190 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
1191 | readl, (sts & DMA_GSTS_TES), sts); | 1191 | readl, (sts & DMA_GSTS_TES), sts); |
1192 | 1192 | ||
1193 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 1193 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
1194 | return 0; | 1194 | return 0; |
1195 | } | 1195 | } |
1196 | 1196 | ||
@@ -1199,7 +1199,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
1199 | u32 sts; | 1199 | u32 sts; |
1200 | unsigned long flag; | 1200 | unsigned long flag; |
1201 | 1201 | ||
1202 | spin_lock_irqsave(&iommu->register_lock, flag); | 1202 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1203 | iommu->gcmd &= ~DMA_GCMD_TE; | 1203 | iommu->gcmd &= ~DMA_GCMD_TE; |
1204 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 1204 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
1205 | 1205 | ||
@@ -1207,7 +1207,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
1207 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 1207 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
1208 | readl, (!(sts & DMA_GSTS_TES)), sts); | 1208 | readl, (!(sts & DMA_GSTS_TES)), sts); |
1209 | 1209 | ||
1210 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1210 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1211 | return 0; | 1211 | return 0; |
1212 | } | 1212 | } |
1213 | 1213 | ||
@@ -3329,7 +3329,7 @@ static int iommu_suspend(void) | |||
3329 | for_each_active_iommu(iommu, drhd) { | 3329 | for_each_active_iommu(iommu, drhd) { |
3330 | iommu_disable_translation(iommu); | 3330 | iommu_disable_translation(iommu); |
3331 | 3331 | ||
3332 | spin_lock_irqsave(&iommu->register_lock, flag); | 3332 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
3333 | 3333 | ||
3334 | iommu->iommu_state[SR_DMAR_FECTL_REG] = | 3334 | iommu->iommu_state[SR_DMAR_FECTL_REG] = |
3335 | readl(iommu->reg + DMAR_FECTL_REG); | 3335 | readl(iommu->reg + DMAR_FECTL_REG); |
@@ -3340,7 +3340,7 @@ static int iommu_suspend(void) | |||
3340 | iommu->iommu_state[SR_DMAR_FEUADDR_REG] = | 3340 | iommu->iommu_state[SR_DMAR_FEUADDR_REG] = |
3341 | readl(iommu->reg + DMAR_FEUADDR_REG); | 3341 | readl(iommu->reg + DMAR_FEUADDR_REG); |
3342 | 3342 | ||
3343 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 3343 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
3344 | } | 3344 | } |
3345 | return 0; | 3345 | return 0; |
3346 | 3346 | ||
@@ -3367,7 +3367,7 @@ static void iommu_resume(void) | |||
3367 | 3367 | ||
3368 | for_each_active_iommu(iommu, drhd) { | 3368 | for_each_active_iommu(iommu, drhd) { |
3369 | 3369 | ||
3370 | spin_lock_irqsave(&iommu->register_lock, flag); | 3370 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
3371 | 3371 | ||
3372 | writel(iommu->iommu_state[SR_DMAR_FECTL_REG], | 3372 | writel(iommu->iommu_state[SR_DMAR_FECTL_REG], |
3373 | iommu->reg + DMAR_FECTL_REG); | 3373 | iommu->reg + DMAR_FECTL_REG); |
@@ -3378,7 +3378,7 @@ static void iommu_resume(void) | |||
3378 | writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], | 3378 | writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], |
3379 | iommu->reg + DMAR_FEUADDR_REG); | 3379 | iommu->reg + DMAR_FEUADDR_REG); |
3380 | 3380 | ||
3381 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 3381 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
3382 | } | 3382 | } |
3383 | 3383 | ||
3384 | for_each_active_iommu(iommu, drhd) | 3384 | for_each_active_iommu(iommu, drhd) |
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c index cfb0dd4bf0b6..07c9f189f314 100644 --- a/drivers/iommu/intr_remapping.c +++ b/drivers/iommu/intr_remapping.c | |||
@@ -54,7 +54,7 @@ static __init int setup_intremap(char *str) | |||
54 | } | 54 | } |
55 | early_param("intremap", setup_intremap); | 55 | early_param("intremap", setup_intremap); |
56 | 56 | ||
57 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 57 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); |
58 | 58 | ||
59 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 59 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
60 | { | 60 | { |
@@ -71,12 +71,12 @@ int get_irte(int irq, struct irte *entry) | |||
71 | if (!entry || !irq_iommu) | 71 | if (!entry || !irq_iommu) |
72 | return -1; | 72 | return -1; |
73 | 73 | ||
74 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 74 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
75 | 75 | ||
76 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 76 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
77 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 77 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
78 | 78 | ||
79 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 79 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | 82 | ||
@@ -110,7 +110,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
110 | return -1; | 110 | return -1; |
111 | } | 111 | } |
112 | 112 | ||
113 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 113 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
114 | do { | 114 | do { |
115 | for (i = index; i < index + count; i++) | 115 | for (i = index; i < index + count; i++) |
116 | if (table->base[i].present) | 116 | if (table->base[i].present) |
@@ -122,7 +122,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
122 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | 122 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; |
123 | 123 | ||
124 | if (index == start_index) { | 124 | if (index == start_index) { |
125 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 125 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
126 | printk(KERN_ERR "can't allocate an IRTE\n"); | 126 | printk(KERN_ERR "can't allocate an IRTE\n"); |
127 | return -1; | 127 | return -1; |
128 | } | 128 | } |
@@ -136,7 +136,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
136 | irq_iommu->sub_handle = 0; | 136 | irq_iommu->sub_handle = 0; |
137 | irq_iommu->irte_mask = mask; | 137 | irq_iommu->irte_mask = mask; |
138 | 138 | ||
139 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 139 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
140 | 140 | ||
141 | return index; | 141 | return index; |
142 | } | 142 | } |
@@ -161,10 +161,10 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |||
161 | if (!irq_iommu) | 161 | if (!irq_iommu) |
162 | return -1; | 162 | return -1; |
163 | 163 | ||
164 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 164 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
165 | *sub_handle = irq_iommu->sub_handle; | 165 | *sub_handle = irq_iommu->sub_handle; |
166 | index = irq_iommu->irte_index; | 166 | index = irq_iommu->irte_index; |
167 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 167 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
168 | return index; | 168 | return index; |
169 | } | 169 | } |
170 | 170 | ||
@@ -176,14 +176,14 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
176 | if (!irq_iommu) | 176 | if (!irq_iommu) |
177 | return -1; | 177 | return -1; |
178 | 178 | ||
179 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 179 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
180 | 180 | ||
181 | irq_iommu->iommu = iommu; | 181 | irq_iommu->iommu = iommu; |
182 | irq_iommu->irte_index = index; | 182 | irq_iommu->irte_index = index; |
183 | irq_iommu->sub_handle = subhandle; | 183 | irq_iommu->sub_handle = subhandle; |
184 | irq_iommu->irte_mask = 0; | 184 | irq_iommu->irte_mask = 0; |
185 | 185 | ||
186 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 186 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
187 | 187 | ||
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
@@ -199,7 +199,7 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
199 | if (!irq_iommu) | 199 | if (!irq_iommu) |
200 | return -1; | 200 | return -1; |
201 | 201 | ||
202 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 202 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
203 | 203 | ||
204 | iommu = irq_iommu->iommu; | 204 | iommu = irq_iommu->iommu; |
205 | 205 | ||
@@ -211,7 +211,7 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
211 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 211 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
212 | 212 | ||
213 | rc = qi_flush_iec(iommu, index, 0); | 213 | rc = qi_flush_iec(iommu, index, 0); |
214 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 214 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
215 | 215 | ||
216 | return rc; | 216 | return rc; |
217 | } | 217 | } |
@@ -279,7 +279,7 @@ int free_irte(int irq) | |||
279 | if (!irq_iommu) | 279 | if (!irq_iommu) |
280 | return -1; | 280 | return -1; |
281 | 281 | ||
282 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 282 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
283 | 283 | ||
284 | rc = clear_entries(irq_iommu); | 284 | rc = clear_entries(irq_iommu); |
285 | 285 | ||
@@ -288,7 +288,7 @@ int free_irte(int irq) | |||
288 | irq_iommu->sub_handle = 0; | 288 | irq_iommu->sub_handle = 0; |
289 | irq_iommu->irte_mask = 0; | 289 | irq_iommu->irte_mask = 0; |
290 | 290 | ||
291 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 291 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
292 | 292 | ||
293 | return rc; | 293 | return rc; |
294 | } | 294 | } |
@@ -418,7 +418,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
418 | 418 | ||
419 | addr = virt_to_phys((void *)iommu->ir_table->base); | 419 | addr = virt_to_phys((void *)iommu->ir_table->base); |
420 | 420 | ||
421 | spin_lock_irqsave(&iommu->register_lock, flags); | 421 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
422 | 422 | ||
423 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | 423 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, |
424 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | 424 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); |
@@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
429 | 429 | ||
430 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 430 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
431 | readl, (sts & DMA_GSTS_IRTPS), sts); | 431 | readl, (sts & DMA_GSTS_IRTPS), sts); |
432 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 432 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
433 | 433 | ||
434 | /* | 434 | /* |
435 | * global invalidation of interrupt entry cache before enabling | 435 | * global invalidation of interrupt entry cache before enabling |
@@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
437 | */ | 437 | */ |
438 | qi_global_iec(iommu); | 438 | qi_global_iec(iommu); |
439 | 439 | ||
440 | spin_lock_irqsave(&iommu->register_lock, flags); | 440 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
441 | 441 | ||
442 | /* Enable interrupt-remapping */ | 442 | /* Enable interrupt-remapping */ |
443 | iommu->gcmd |= DMA_GCMD_IRE; | 443 | iommu->gcmd |= DMA_GCMD_IRE; |
@@ -446,7 +446,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
446 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 446 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
447 | readl, (sts & DMA_GSTS_IRES), sts); | 447 | readl, (sts & DMA_GSTS_IRES), sts); |
448 | 448 | ||
449 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 449 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
450 | } | 450 | } |
451 | 451 | ||
452 | 452 | ||
@@ -494,7 +494,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu) | |||
494 | */ | 494 | */ |
495 | qi_global_iec(iommu); | 495 | qi_global_iec(iommu); |
496 | 496 | ||
497 | spin_lock_irqsave(&iommu->register_lock, flags); | 497 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
498 | 498 | ||
499 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | 499 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); |
500 | if (!(sts & DMA_GSTS_IRES)) | 500 | if (!(sts & DMA_GSTS_IRES)) |
@@ -507,7 +507,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu) | |||
507 | readl, !(sts & DMA_GSTS_IRES), sts); | 507 | readl, !(sts & DMA_GSTS_IRES), sts); |
508 | 508 | ||
509 | end: | 509 | end: |
510 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 510 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
511 | } | 511 | } |
512 | 512 | ||
513 | static int __init dmar_x2apic_optout(void) | 513 | static int __init dmar_x2apic_optout(void) |
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index dd87e86048be..c0cc4e7ff023 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c | |||
@@ -82,10 +82,10 @@ int alloc_event_buffer(void) | |||
82 | { | 82 | { |
83 | unsigned long flags; | 83 | unsigned long flags; |
84 | 84 | ||
85 | spin_lock_irqsave(&oprofilefs_lock, flags); | 85 | raw_spin_lock_irqsave(&oprofilefs_lock, flags); |
86 | buffer_size = oprofile_buffer_size; | 86 | buffer_size = oprofile_buffer_size; |
87 | buffer_watershed = oprofile_buffer_watershed; | 87 | buffer_watershed = oprofile_buffer_watershed; |
88 | spin_unlock_irqrestore(&oprofilefs_lock, flags); | 88 | raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); |
89 | 89 | ||
90 | if (buffer_watershed >= buffer_size) | 90 | if (buffer_watershed >= buffer_size) |
91 | return -EINVAL; | 91 | return -EINVAL; |
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c index 94796f39bc47..da14432806c6 100644 --- a/drivers/oprofile/oprofile_perf.c +++ b/drivers/oprofile/oprofile_perf.c | |||
@@ -160,9 +160,9 @@ static int oprofile_perf_create_files(struct super_block *sb, struct dentry *roo | |||
160 | 160 | ||
161 | static int oprofile_perf_setup(void) | 161 | static int oprofile_perf_setup(void) |
162 | { | 162 | { |
163 | spin_lock(&oprofilefs_lock); | 163 | raw_spin_lock(&oprofilefs_lock); |
164 | op_perf_setup(); | 164 | op_perf_setup(); |
165 | spin_unlock(&oprofilefs_lock); | 165 | raw_spin_unlock(&oprofilefs_lock); |
166 | return 0; | 166 | return 0; |
167 | } | 167 | } |
168 | 168 | ||
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index e9ff6f7770be..d0de6cc2d7a5 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #define OPROFILEFS_MAGIC 0x6f70726f | 22 | #define OPROFILEFS_MAGIC 0x6f70726f |
23 | 23 | ||
24 | DEFINE_SPINLOCK(oprofilefs_lock); | 24 | DEFINE_RAW_SPINLOCK(oprofilefs_lock); |
25 | 25 | ||
26 | static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) | 26 | static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) |
27 | { | 27 | { |
@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_ | |||
76 | if (copy_from_user(tmpbuf, buf, count)) | 76 | if (copy_from_user(tmpbuf, buf, count)) |
77 | return -EFAULT; | 77 | return -EFAULT; |
78 | 78 | ||
79 | spin_lock_irqsave(&oprofilefs_lock, flags); | 79 | raw_spin_lock_irqsave(&oprofilefs_lock, flags); |
80 | *val = simple_strtoul(tmpbuf, NULL, 0); | 80 | *val = simple_strtoul(tmpbuf, NULL, 0); |
81 | spin_unlock_irqrestore(&oprofilefs_lock, flags); | 81 | raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 915fd74da7a2..d449a74d4a31 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include <video/vga.h> | 50 | #include <video/vga.h> |
51 | #include <asm/io.h> | 51 | #include <asm/io.h> |
52 | 52 | ||
53 | static DEFINE_SPINLOCK(vga_lock); | 53 | static DEFINE_RAW_SPINLOCK(vga_lock); |
54 | static int cursor_size_lastfrom; | 54 | static int cursor_size_lastfrom; |
55 | static int cursor_size_lastto; | 55 | static int cursor_size_lastto; |
56 | static u32 vgacon_xres; | 56 | static u32 vgacon_xres; |
@@ -157,7 +157,7 @@ static inline void write_vga(unsigned char reg, unsigned int val) | |||
157 | * ddprintk might set the console position from interrupt | 157 | * ddprintk might set the console position from interrupt |
158 | * handlers, thus the write has to be IRQ-atomic. | 158 | * handlers, thus the write has to be IRQ-atomic. |
159 | */ | 159 | */ |
160 | spin_lock_irqsave(&vga_lock, flags); | 160 | raw_spin_lock_irqsave(&vga_lock, flags); |
161 | 161 | ||
162 | #ifndef SLOW_VGA | 162 | #ifndef SLOW_VGA |
163 | v1 = reg + (val & 0xff00); | 163 | v1 = reg + (val & 0xff00); |
@@ -170,7 +170,7 @@ static inline void write_vga(unsigned char reg, unsigned int val) | |||
170 | outb_p(reg + 1, vga_video_port_reg); | 170 | outb_p(reg + 1, vga_video_port_reg); |
171 | outb_p(val & 0xff, vga_video_port_val); | 171 | outb_p(val & 0xff, vga_video_port_val); |
172 | #endif | 172 | #endif |
173 | spin_unlock_irqrestore(&vga_lock, flags); | 173 | raw_spin_unlock_irqrestore(&vga_lock, flags); |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline void vga_set_mem_top(struct vc_data *c) | 176 | static inline void vga_set_mem_top(struct vc_data *c) |
@@ -664,7 +664,7 @@ static void vgacon_set_cursor_size(int xpos, int from, int to) | |||
664 | cursor_size_lastfrom = from; | 664 | cursor_size_lastfrom = from; |
665 | cursor_size_lastto = to; | 665 | cursor_size_lastto = to; |
666 | 666 | ||
667 | spin_lock_irqsave(&vga_lock, flags); | 667 | raw_spin_lock_irqsave(&vga_lock, flags); |
668 | if (vga_video_type >= VIDEO_TYPE_VGAC) { | 668 | if (vga_video_type >= VIDEO_TYPE_VGAC) { |
669 | outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); | 669 | outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); |
670 | curs = inb_p(vga_video_port_val); | 670 | curs = inb_p(vga_video_port_val); |
@@ -682,7 +682,7 @@ static void vgacon_set_cursor_size(int xpos, int from, int to) | |||
682 | outb_p(curs, vga_video_port_val); | 682 | outb_p(curs, vga_video_port_val); |
683 | outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); | 683 | outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); |
684 | outb_p(cure, vga_video_port_val); | 684 | outb_p(cure, vga_video_port_val); |
685 | spin_unlock_irqrestore(&vga_lock, flags); | 685 | raw_spin_unlock_irqrestore(&vga_lock, flags); |
686 | } | 686 | } |
687 | 687 | ||
688 | static void vgacon_cursor(struct vc_data *c, int mode) | 688 | static void vgacon_cursor(struct vc_data *c, int mode) |
@@ -757,7 +757,7 @@ static int vgacon_doresize(struct vc_data *c, | |||
757 | unsigned int scanlines = height * c->vc_font.height; | 757 | unsigned int scanlines = height * c->vc_font.height; |
758 | u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; | 758 | u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; |
759 | 759 | ||
760 | spin_lock_irqsave(&vga_lock, flags); | 760 | raw_spin_lock_irqsave(&vga_lock, flags); |
761 | 761 | ||
762 | vgacon_xres = width * VGA_FONTWIDTH; | 762 | vgacon_xres = width * VGA_FONTWIDTH; |
763 | vgacon_yres = height * c->vc_font.height; | 763 | vgacon_yres = height * c->vc_font.height; |
@@ -808,7 +808,7 @@ static int vgacon_doresize(struct vc_data *c, | |||
808 | outb_p(vsync_end, vga_video_port_val); | 808 | outb_p(vsync_end, vga_video_port_val); |
809 | } | 809 | } |
810 | 810 | ||
811 | spin_unlock_irqrestore(&vga_lock, flags); | 811 | raw_spin_unlock_irqrestore(&vga_lock, flags); |
812 | return 0; | 812 | return 0; |
813 | } | 813 | } |
814 | 814 | ||
@@ -891,11 +891,11 @@ static void vga_vesa_blank(struct vgastate *state, int mode) | |||
891 | { | 891 | { |
892 | /* save original values of VGA controller registers */ | 892 | /* save original values of VGA controller registers */ |
893 | if (!vga_vesa_blanked) { | 893 | if (!vga_vesa_blanked) { |
894 | spin_lock_irq(&vga_lock); | 894 | raw_spin_lock_irq(&vga_lock); |
895 | vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I); | 895 | vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I); |
896 | vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg); | 896 | vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg); |
897 | vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R); | 897 | vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R); |
898 | spin_unlock_irq(&vga_lock); | 898 | raw_spin_unlock_irq(&vga_lock); |
899 | 899 | ||
900 | outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ | 900 | outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ |
901 | vga_state.HorizontalTotal = inb_p(vga_video_port_val); | 901 | vga_state.HorizontalTotal = inb_p(vga_video_port_val); |
@@ -918,7 +918,7 @@ static void vga_vesa_blank(struct vgastate *state, int mode) | |||
918 | 918 | ||
919 | /* assure that video is enabled */ | 919 | /* assure that video is enabled */ |
920 | /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ | 920 | /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ |
921 | spin_lock_irq(&vga_lock); | 921 | raw_spin_lock_irq(&vga_lock); |
922 | vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20); | 922 | vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20); |
923 | 923 | ||
924 | /* test for vertical retrace in process.... */ | 924 | /* test for vertical retrace in process.... */ |
@@ -954,13 +954,13 @@ static void vga_vesa_blank(struct vgastate *state, int mode) | |||
954 | /* restore both index registers */ | 954 | /* restore both index registers */ |
955 | vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); | 955 | vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); |
956 | outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); | 956 | outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); |
957 | spin_unlock_irq(&vga_lock); | 957 | raw_spin_unlock_irq(&vga_lock); |
958 | } | 958 | } |
959 | 959 | ||
960 | static void vga_vesa_unblank(struct vgastate *state) | 960 | static void vga_vesa_unblank(struct vgastate *state) |
961 | { | 961 | { |
962 | /* restore original values of VGA controller registers */ | 962 | /* restore original values of VGA controller registers */ |
963 | spin_lock_irq(&vga_lock); | 963 | raw_spin_lock_irq(&vga_lock); |
964 | vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO); | 964 | vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO); |
965 | 965 | ||
966 | outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ | 966 | outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ |
@@ -985,7 +985,7 @@ static void vga_vesa_unblank(struct vgastate *state) | |||
985 | /* restore index/control registers */ | 985 | /* restore index/control registers */ |
986 | vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); | 986 | vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); |
987 | outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); | 987 | outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); |
988 | spin_unlock_irq(&vga_lock); | 988 | raw_spin_unlock_irq(&vga_lock); |
989 | } | 989 | } |
990 | 990 | ||
991 | static void vga_pal_blank(struct vgastate *state) | 991 | static void vga_pal_blank(struct vgastate *state) |
@@ -1104,7 +1104,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) | |||
1104 | charmap += 4 * cmapsz; | 1104 | charmap += 4 * cmapsz; |
1105 | #endif | 1105 | #endif |
1106 | 1106 | ||
1107 | spin_lock_irq(&vga_lock); | 1107 | raw_spin_lock_irq(&vga_lock); |
1108 | /* First, the Sequencer */ | 1108 | /* First, the Sequencer */ |
1109 | vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1); | 1109 | vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1); |
1110 | /* CPU writes only to map 2 */ | 1110 | /* CPU writes only to map 2 */ |
@@ -1120,7 +1120,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) | |||
1120 | vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00); | 1120 | vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00); |
1121 | /* map start at A000:0000 */ | 1121 | /* map start at A000:0000 */ |
1122 | vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00); | 1122 | vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00); |
1123 | spin_unlock_irq(&vga_lock); | 1123 | raw_spin_unlock_irq(&vga_lock); |
1124 | 1124 | ||
1125 | if (arg) { | 1125 | if (arg) { |
1126 | if (set) | 1126 | if (set) |
@@ -1147,7 +1147,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) | |||
1147 | } | 1147 | } |
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | spin_lock_irq(&vga_lock); | 1150 | raw_spin_lock_irq(&vga_lock); |
1151 | /* First, the sequencer, Synchronous reset */ | 1151 | /* First, the sequencer, Synchronous reset */ |
1152 | vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01); | 1152 | vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01); |
1153 | /* CPU writes to maps 0 and 1 */ | 1153 | /* CPU writes to maps 0 and 1 */ |
@@ -1186,7 +1186,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) | |||
1186 | inb_p(video_port_status); | 1186 | inb_p(video_port_status); |
1187 | vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); | 1187 | vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); |
1188 | } | 1188 | } |
1189 | spin_unlock_irq(&vga_lock); | 1189 | raw_spin_unlock_irq(&vga_lock); |
1190 | return 0; | 1190 | return 0; |
1191 | } | 1191 | } |
1192 | 1192 | ||
@@ -1211,26 +1211,26 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) | |||
1211 | registers; they are write-only on EGA, but it appears that they | 1211 | registers; they are write-only on EGA, but it appears that they |
1212 | are all don't care bits on EGA, so I guess it doesn't matter. */ | 1212 | are all don't care bits on EGA, so I guess it doesn't matter. */ |
1213 | 1213 | ||
1214 | spin_lock_irq(&vga_lock); | 1214 | raw_spin_lock_irq(&vga_lock); |
1215 | outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ | 1215 | outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ |
1216 | ovr = inb_p(vga_video_port_val); | 1216 | ovr = inb_p(vga_video_port_val); |
1217 | outb_p(0x09, vga_video_port_reg); /* Font size register */ | 1217 | outb_p(0x09, vga_video_port_reg); /* Font size register */ |
1218 | fsr = inb_p(vga_video_port_val); | 1218 | fsr = inb_p(vga_video_port_val); |
1219 | spin_unlock_irq(&vga_lock); | 1219 | raw_spin_unlock_irq(&vga_lock); |
1220 | 1220 | ||
1221 | vde = maxscan & 0xff; /* Vertical display end reg */ | 1221 | vde = maxscan & 0xff; /* Vertical display end reg */ |
1222 | ovr = (ovr & 0xbd) + /* Overflow register */ | 1222 | ovr = (ovr & 0xbd) + /* Overflow register */ |
1223 | ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3); | 1223 | ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3); |
1224 | fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */ | 1224 | fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */ |
1225 | 1225 | ||
1226 | spin_lock_irq(&vga_lock); | 1226 | raw_spin_lock_irq(&vga_lock); |
1227 | outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ | 1227 | outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ |
1228 | outb_p(ovr, vga_video_port_val); | 1228 | outb_p(ovr, vga_video_port_val); |
1229 | outb_p(0x09, vga_video_port_reg); /* Font size */ | 1229 | outb_p(0x09, vga_video_port_reg); /* Font size */ |
1230 | outb_p(fsr, vga_video_port_val); | 1230 | outb_p(fsr, vga_video_port_val); |
1231 | outb_p(0x12, vga_video_port_reg); /* Vertical display limit */ | 1231 | outb_p(0x12, vga_video_port_reg); /* Vertical display limit */ |
1232 | outb_p(vde, vga_video_port_val); | 1232 | outb_p(vde, vga_video_port_val); |
1233 | spin_unlock_irq(&vga_lock); | 1233 | raw_spin_unlock_irq(&vga_lock); |
1234 | vga_video_font_height = fontheight; | 1234 | vga_video_font_height = fontheight; |
1235 | 1235 | ||
1236 | for (i = 0; i < MAX_NR_CONSOLES; i++) { | 1236 | for (i = 0; i < MAX_NR_CONSOLES; i++) { |