diff options
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r-- | drivers/pci/msi.c | 394 |
1 files changed, 307 insertions, 87 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 084587d7cd13..fd60806d3fd0 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -19,19 +19,82 @@ | |||
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/irqdomain.h> | ||
22 | 23 | ||
23 | #include "pci.h" | 24 | #include "pci.h" |
24 | 25 | ||
25 | static int pci_msi_enable = 1; | 26 | static int pci_msi_enable = 1; |
27 | int pci_msi_ignore_mask; | ||
26 | 28 | ||
27 | #define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) | 29 | #define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) |
28 | 30 | ||
31 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN | ||
32 | static struct irq_domain *pci_msi_default_domain; | ||
33 | static DEFINE_MUTEX(pci_msi_domain_lock); | ||
34 | |||
35 | struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev) | ||
36 | { | ||
37 | return pci_msi_default_domain; | ||
38 | } | ||
39 | |||
40 | static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev) | ||
41 | { | ||
42 | struct irq_domain *domain = NULL; | ||
43 | |||
44 | if (dev->bus->msi) | ||
45 | domain = dev->bus->msi->domain; | ||
46 | if (!domain) | ||
47 | domain = arch_get_pci_msi_domain(dev); | ||
48 | |||
49 | return domain; | ||
50 | } | ||
51 | |||
52 | static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
53 | { | ||
54 | struct irq_domain *domain; | ||
55 | |||
56 | domain = pci_msi_get_domain(dev); | ||
57 | if (domain) | ||
58 | return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); | ||
59 | |||
60 | return arch_setup_msi_irqs(dev, nvec, type); | ||
61 | } | ||
62 | |||
63 | static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) | ||
64 | { | ||
65 | struct irq_domain *domain; | ||
66 | |||
67 | domain = pci_msi_get_domain(dev); | ||
68 | if (domain) | ||
69 | pci_msi_domain_free_irqs(domain, dev); | ||
70 | else | ||
71 | arch_teardown_msi_irqs(dev); | ||
72 | } | ||
73 | #else | ||
74 | #define pci_msi_setup_msi_irqs arch_setup_msi_irqs | ||
75 | #define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs | ||
76 | #endif | ||
29 | 77 | ||
30 | /* Arch hooks */ | 78 | /* Arch hooks */ |
31 | 79 | ||
80 | struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev) | ||
81 | { | ||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | static struct msi_controller *pci_msi_controller(struct pci_dev *dev) | ||
86 | { | ||
87 | struct msi_controller *msi_ctrl = dev->bus->msi; | ||
88 | |||
89 | if (msi_ctrl) | ||
90 | return msi_ctrl; | ||
91 | |||
92 | return pcibios_msi_controller(dev); | ||
93 | } | ||
94 | |||
32 | int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | 95 | int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) |
33 | { | 96 | { |
34 | struct msi_chip *chip = dev->bus->msi; | 97 | struct msi_controller *chip = pci_msi_controller(dev); |
35 | int err; | 98 | int err; |
36 | 99 | ||
37 | if (!chip || !chip->setup_irq) | 100 | if (!chip || !chip->setup_irq) |
@@ -48,7 +111,7 @@ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | |||
48 | 111 | ||
49 | void __weak arch_teardown_msi_irq(unsigned int irq) | 112 | void __weak arch_teardown_msi_irq(unsigned int irq) |
50 | { | 113 | { |
51 | struct msi_chip *chip = irq_get_chip_data(irq); | 114 | struct msi_controller *chip = irq_get_chip_data(irq); |
52 | 115 | ||
53 | if (!chip || !chip->teardown_irq) | 116 | if (!chip || !chip->teardown_irq) |
54 | return; | 117 | return; |
@@ -85,19 +148,13 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
85 | */ | 148 | */ |
86 | void default_teardown_msi_irqs(struct pci_dev *dev) | 149 | void default_teardown_msi_irqs(struct pci_dev *dev) |
87 | { | 150 | { |
151 | int i; | ||
88 | struct msi_desc *entry; | 152 | struct msi_desc *entry; |
89 | 153 | ||
90 | list_for_each_entry(entry, &dev->msi_list, list) { | 154 | list_for_each_entry(entry, &dev->msi_list, list) |
91 | int i, nvec; | 155 | if (entry->irq) |
92 | if (entry->irq == 0) | 156 | for (i = 0; i < entry->nvec_used; i++) |
93 | continue; | 157 | arch_teardown_msi_irq(entry->irq + i); |
94 | if (entry->nvec_used) | ||
95 | nvec = entry->nvec_used; | ||
96 | else | ||
97 | nvec = 1 << entry->msi_attrib.multiple; | ||
98 | for (i = 0; i < nvec; i++) | ||
99 | arch_teardown_msi_irq(entry->irq + i); | ||
100 | } | ||
101 | } | 158 | } |
102 | 159 | ||
103 | void __weak arch_teardown_msi_irqs(struct pci_dev *dev) | 160 | void __weak arch_teardown_msi_irqs(struct pci_dev *dev) |
@@ -120,7 +177,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) | |||
120 | } | 177 | } |
121 | 178 | ||
122 | if (entry) | 179 | if (entry) |
123 | __write_msi_msg(entry, &entry->msg); | 180 | __pci_write_msi_msg(entry, &entry->msg); |
124 | } | 181 | } |
125 | 182 | ||
126 | void __weak arch_restore_msi_irqs(struct pci_dev *dev) | 183 | void __weak arch_restore_msi_irqs(struct pci_dev *dev) |
@@ -163,11 +220,11 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) | |||
163 | * reliably as devices without an INTx disable bit will then generate a | 220 | * reliably as devices without an INTx disable bit will then generate a |
164 | * level IRQ which will never be cleared. | 221 | * level IRQ which will never be cleared. |
165 | */ | 222 | */ |
166 | u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 223 | u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
167 | { | 224 | { |
168 | u32 mask_bits = desc->masked; | 225 | u32 mask_bits = desc->masked; |
169 | 226 | ||
170 | if (!desc->msi_attrib.maskbit) | 227 | if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) |
171 | return 0; | 228 | return 0; |
172 | 229 | ||
173 | mask_bits &= ~mask; | 230 | mask_bits &= ~mask; |
@@ -177,14 +234,9 @@ u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |||
177 | return mask_bits; | 234 | return mask_bits; |
178 | } | 235 | } |
179 | 236 | ||
180 | __weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | ||
181 | { | ||
182 | return default_msi_mask_irq(desc, mask, flag); | ||
183 | } | ||
184 | |||
185 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 237 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
186 | { | 238 | { |
187 | desc->masked = arch_msi_mask_irq(desc, mask, flag); | 239 | desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); |
188 | } | 240 | } |
189 | 241 | ||
190 | /* | 242 | /* |
@@ -194,11 +246,15 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |||
194 | * file. This saves a few milliseconds when initialising devices with lots | 246 | * file. This saves a few milliseconds when initialising devices with lots |
195 | * of MSI-X interrupts. | 247 | * of MSI-X interrupts. |
196 | */ | 248 | */ |
197 | u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) | 249 | u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) |
198 | { | 250 | { |
199 | u32 mask_bits = desc->masked; | 251 | u32 mask_bits = desc->masked; |
200 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | 252 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + |
201 | PCI_MSIX_ENTRY_VECTOR_CTRL; | 253 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
254 | |||
255 | if (pci_msi_ignore_mask) | ||
256 | return 0; | ||
257 | |||
202 | mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; | 258 | mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; |
203 | if (flag) | 259 | if (flag) |
204 | mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; | 260 | mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; |
@@ -207,14 +263,9 @@ u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) | |||
207 | return mask_bits; | 263 | return mask_bits; |
208 | } | 264 | } |
209 | 265 | ||
210 | __weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) | ||
211 | { | ||
212 | return default_msix_mask_irq(desc, flag); | ||
213 | } | ||
214 | |||
215 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | 266 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) |
216 | { | 267 | { |
217 | desc->masked = arch_msix_mask_irq(desc, flag); | 268 | desc->masked = __pci_msix_desc_mask_irq(desc, flag); |
218 | } | 269 | } |
219 | 270 | ||
220 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) | 271 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) |
@@ -230,12 +281,20 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag) | |||
230 | } | 281 | } |
231 | } | 282 | } |
232 | 283 | ||
233 | void mask_msi_irq(struct irq_data *data) | 284 | /** |
285 | * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts | ||
286 | * @data: pointer to irqdata associated to that interrupt | ||
287 | */ | ||
288 | void pci_msi_mask_irq(struct irq_data *data) | ||
234 | { | 289 | { |
235 | msi_set_mask_bit(data, 1); | 290 | msi_set_mask_bit(data, 1); |
236 | } | 291 | } |
237 | 292 | ||
238 | void unmask_msi_irq(struct irq_data *data) | 293 | /** |
294 | * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts | ||
295 | * @data: pointer to irqdata associated to that interrupt | ||
296 | */ | ||
297 | void pci_msi_unmask_irq(struct irq_data *data) | ||
239 | { | 298 | { |
240 | msi_set_mask_bit(data, 0); | 299 | msi_set_mask_bit(data, 0); |
241 | } | 300 | } |
@@ -244,12 +303,11 @@ void default_restore_msi_irqs(struct pci_dev *dev) | |||
244 | { | 303 | { |
245 | struct msi_desc *entry; | 304 | struct msi_desc *entry; |
246 | 305 | ||
247 | list_for_each_entry(entry, &dev->msi_list, list) { | 306 | list_for_each_entry(entry, &dev->msi_list, list) |
248 | default_restore_msi_irq(dev, entry->irq); | 307 | default_restore_msi_irq(dev, entry->irq); |
249 | } | ||
250 | } | 308 | } |
251 | 309 | ||
252 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | 310 | void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
253 | { | 311 | { |
254 | BUG_ON(entry->dev->current_state != PCI_D0); | 312 | BUG_ON(entry->dev->current_state != PCI_D0); |
255 | 313 | ||
@@ -279,32 +337,7 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
279 | } | 337 | } |
280 | } | 338 | } |
281 | 339 | ||
282 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 340 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
283 | { | ||
284 | struct msi_desc *entry = irq_get_msi_desc(irq); | ||
285 | |||
286 | __read_msi_msg(entry, msg); | ||
287 | } | ||
288 | |||
289 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | ||
290 | { | ||
291 | /* Assert that the cache is valid, assuming that | ||
292 | * valid messages are not all-zeroes. */ | ||
293 | BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | | ||
294 | entry->msg.data)); | ||
295 | |||
296 | *msg = entry->msg; | ||
297 | } | ||
298 | |||
299 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | ||
300 | { | ||
301 | struct msi_desc *entry = irq_get_msi_desc(irq); | ||
302 | |||
303 | __get_cached_msi_msg(entry, msg); | ||
304 | } | ||
305 | EXPORT_SYMBOL_GPL(get_cached_msi_msg); | ||
306 | |||
307 | void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | ||
308 | { | 341 | { |
309 | if (entry->dev->current_state != PCI_D0) { | 342 | if (entry->dev->current_state != PCI_D0) { |
310 | /* Don't touch the hardware now */ | 343 | /* Don't touch the hardware now */ |
@@ -341,34 +374,27 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
341 | entry->msg = *msg; | 374 | entry->msg = *msg; |
342 | } | 375 | } |
343 | 376 | ||
344 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 377 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) |
345 | { | 378 | { |
346 | struct msi_desc *entry = irq_get_msi_desc(irq); | 379 | struct msi_desc *entry = irq_get_msi_desc(irq); |
347 | 380 | ||
348 | __write_msi_msg(entry, msg); | 381 | __pci_write_msi_msg(entry, msg); |
349 | } | 382 | } |
350 | EXPORT_SYMBOL_GPL(write_msi_msg); | 383 | EXPORT_SYMBOL_GPL(pci_write_msi_msg); |
351 | 384 | ||
352 | static void free_msi_irqs(struct pci_dev *dev) | 385 | static void free_msi_irqs(struct pci_dev *dev) |
353 | { | 386 | { |
354 | struct msi_desc *entry, *tmp; | 387 | struct msi_desc *entry, *tmp; |
355 | struct attribute **msi_attrs; | 388 | struct attribute **msi_attrs; |
356 | struct device_attribute *dev_attr; | 389 | struct device_attribute *dev_attr; |
357 | int count = 0; | 390 | int i, count = 0; |
358 | 391 | ||
359 | list_for_each_entry(entry, &dev->msi_list, list) { | 392 | list_for_each_entry(entry, &dev->msi_list, list) |
360 | int i, nvec; | 393 | if (entry->irq) |
361 | if (!entry->irq) | 394 | for (i = 0; i < entry->nvec_used; i++) |
362 | continue; | 395 | BUG_ON(irq_has_action(entry->irq + i)); |
363 | if (entry->nvec_used) | ||
364 | nvec = entry->nvec_used; | ||
365 | else | ||
366 | nvec = 1 << entry->msi_attrib.multiple; | ||
367 | for (i = 0; i < nvec; i++) | ||
368 | BUG_ON(irq_has_action(entry->irq + i)); | ||
369 | } | ||
370 | 396 | ||
371 | arch_teardown_msi_irqs(dev); | 397 | pci_msi_teardown_msi_irqs(dev); |
372 | 398 | ||
373 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | 399 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { |
374 | if (entry->msi_attrib.is_msix) { | 400 | if (entry->msi_attrib.is_msix) { |
@@ -451,9 +477,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
451 | PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); | 477 | PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); |
452 | 478 | ||
453 | arch_restore_msi_irqs(dev); | 479 | arch_restore_msi_irqs(dev); |
454 | list_for_each_entry(entry, &dev->msi_list, list) { | 480 | list_for_each_entry(entry, &dev->msi_list, list) |
455 | msix_mask_irq(entry, entry->masked); | 481 | msix_mask_irq(entry, entry->masked); |
456 | } | ||
457 | 482 | ||
458 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); | 483 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); |
459 | } | 484 | } |
@@ -497,9 +522,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev) | |||
497 | int count = 0; | 522 | int count = 0; |
498 | 523 | ||
499 | /* Determine how many msi entries we have */ | 524 | /* Determine how many msi entries we have */ |
500 | list_for_each_entry(entry, &pdev->msi_list, list) { | 525 | list_for_each_entry(entry, &pdev->msi_list, list) |
501 | ++num_msi; | 526 | ++num_msi; |
502 | } | ||
503 | if (!num_msi) | 527 | if (!num_msi) |
504 | return 0; | 528 | return 0; |
505 | 529 | ||
@@ -559,7 +583,7 @@ error_attrs: | |||
559 | return ret; | 583 | return ret; |
560 | } | 584 | } |
561 | 585 | ||
562 | static struct msi_desc *msi_setup_entry(struct pci_dev *dev) | 586 | static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) |
563 | { | 587 | { |
564 | u16 control; | 588 | u16 control; |
565 | struct msi_desc *entry; | 589 | struct msi_desc *entry; |
@@ -577,6 +601,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev) | |||
577 | entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); | 601 | entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); |
578 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | 602 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ |
579 | entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; | 603 | entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; |
604 | entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); | ||
605 | entry->nvec_used = nvec; | ||
580 | 606 | ||
581 | if (control & PCI_MSI_FLAGS_64BIT) | 607 | if (control & PCI_MSI_FLAGS_64BIT) |
582 | entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; | 608 | entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; |
@@ -623,7 +649,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
623 | 649 | ||
624 | msi_set_enable(dev, 0); /* Disable MSI during set up */ | 650 | msi_set_enable(dev, 0); /* Disable MSI during set up */ |
625 | 651 | ||
626 | entry = msi_setup_entry(dev); | 652 | entry = msi_setup_entry(dev, nvec); |
627 | if (!entry) | 653 | if (!entry) |
628 | return -ENOMEM; | 654 | return -ENOMEM; |
629 | 655 | ||
@@ -634,7 +660,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
634 | list_add_tail(&entry->list, &dev->msi_list); | 660 | list_add_tail(&entry->list, &dev->msi_list); |
635 | 661 | ||
636 | /* Configure MSI capability structure */ | 662 | /* Configure MSI capability structure */ |
637 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); | 663 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
638 | if (ret) { | 664 | if (ret) { |
639 | msi_mask_irq(entry, mask, ~mask); | 665 | msi_mask_irq(entry, mask, ~mask); |
640 | free_msi_irqs(dev); | 666 | free_msi_irqs(dev); |
@@ -701,6 +727,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | |||
701 | entry->msi_attrib.entry_nr = entries[i].entry; | 727 | entry->msi_attrib.entry_nr = entries[i].entry; |
702 | entry->msi_attrib.default_irq = dev->irq; | 728 | entry->msi_attrib.default_irq = dev->irq; |
703 | entry->mask_base = base; | 729 | entry->mask_base = base; |
730 | entry->nvec_used = 1; | ||
704 | 731 | ||
705 | list_add_tail(&entry->list, &dev->msi_list); | 732 | list_add_tail(&entry->list, &dev->msi_list); |
706 | } | 733 | } |
@@ -719,7 +746,6 @@ static void msix_program_entries(struct pci_dev *dev, | |||
719 | PCI_MSIX_ENTRY_VECTOR_CTRL; | 746 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
720 | 747 | ||
721 | entries[i].vector = entry->irq; | 748 | entries[i].vector = entry->irq; |
722 | irq_set_msi_desc(entry->irq, entry); | ||
723 | entry->masked = readl(entry->mask_base + offset); | 749 | entry->masked = readl(entry->mask_base + offset); |
724 | msix_mask_irq(entry, 1); | 750 | msix_mask_irq(entry, 1); |
725 | i++; | 751 | i++; |
@@ -756,7 +782,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
756 | if (ret) | 782 | if (ret) |
757 | return ret; | 783 | return ret; |
758 | 784 | ||
759 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | 785 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); |
760 | if (ret) | 786 | if (ret) |
761 | goto out_avail; | 787 | goto out_avail; |
762 | 788 | ||
@@ -895,7 +921,7 @@ void pci_msi_shutdown(struct pci_dev *dev) | |||
895 | /* Return the device with MSI unmasked as initial states */ | 921 | /* Return the device with MSI unmasked as initial states */ |
896 | mask = msi_mask(desc->msi_attrib.multi_cap); | 922 | mask = msi_mask(desc->msi_attrib.multi_cap); |
897 | /* Keep cached state to be restored */ | 923 | /* Keep cached state to be restored */ |
898 | arch_msi_mask_irq(desc, mask, ~mask); | 924 | __pci_msi_desc_mask_irq(desc, mask, ~mask); |
899 | 925 | ||
900 | /* Restore dev->irq to its default pin-assertion irq */ | 926 | /* Restore dev->irq to its default pin-assertion irq */ |
901 | dev->irq = desc->msi_attrib.default_irq; | 927 | dev->irq = desc->msi_attrib.default_irq; |
@@ -993,7 +1019,7 @@ void pci_msix_shutdown(struct pci_dev *dev) | |||
993 | /* Return the device with MSI-X masked as initial states */ | 1019 | /* Return the device with MSI-X masked as initial states */ |
994 | list_for_each_entry(entry, &dev->msi_list, list) { | 1020 | list_for_each_entry(entry, &dev->msi_list, list) { |
995 | /* Keep cached states to be restored */ | 1021 | /* Keep cached states to be restored */ |
996 | arch_msix_mask_irq(entry, 1); | 1022 | __pci_msix_desc_mask_irq(entry, 1); |
997 | } | 1023 | } |
998 | 1024 | ||
999 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); | 1025 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
@@ -1138,3 +1164,197 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, | |||
1138 | return nvec; | 1164 | return nvec; |
1139 | } | 1165 | } |
1140 | EXPORT_SYMBOL(pci_enable_msix_range); | 1166 | EXPORT_SYMBOL(pci_enable_msix_range); |
1167 | |||
1168 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN | ||
1169 | /** | ||
1170 | * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space | ||
1171 | * @irq_data: Pointer to interrupt data of the MSI interrupt | ||
1172 | * @msg: Pointer to the message | ||
1173 | */ | ||
1174 | void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) | ||
1175 | { | ||
1176 | struct msi_desc *desc = irq_data->msi_desc; | ||
1177 | |||
1178 | /* | ||
1179 | * For MSI-X desc->irq is always equal to irq_data->irq. For | ||
1180 | * MSI only the first interrupt of MULTI MSI passes the test. | ||
1181 | */ | ||
1182 | if (desc->irq == irq_data->irq) | ||
1183 | __pci_write_msi_msg(desc, msg); | ||
1184 | } | ||
1185 | |||
1186 | /** | ||
1187 | * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source | ||
1188 | * @dev: Pointer to the PCI device | ||
1189 | * @desc: Pointer to the msi descriptor | ||
1190 | * | ||
1191 | * The ID number is only used within the irqdomain. | ||
1192 | */ | ||
1193 | irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, | ||
1194 | struct msi_desc *desc) | ||
1195 | { | ||
1196 | return (irq_hw_number_t)desc->msi_attrib.entry_nr | | ||
1197 | PCI_DEVID(dev->bus->number, dev->devfn) << 11 | | ||
1198 | (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; | ||
1199 | } | ||
1200 | |||
1201 | static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) | ||
1202 | { | ||
1203 | return !desc->msi_attrib.is_msix && desc->nvec_used > 1; | ||
1204 | } | ||
1205 | |||
1206 | /** | ||
1207 | * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev | ||
1208 | * @domain: The interrupt domain to check | ||
1209 | * @info: The domain info for verification | ||
1210 | * @dev: The device to check | ||
1211 | * | ||
1212 | * Returns: | ||
1213 | * 0 if the functionality is supported | ||
1214 | * 1 if Multi MSI is requested, but the domain does not support it | ||
1215 | * -ENOTSUPP otherwise | ||
1216 | */ | ||
1217 | int pci_msi_domain_check_cap(struct irq_domain *domain, | ||
1218 | struct msi_domain_info *info, struct device *dev) | ||
1219 | { | ||
1220 | struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); | ||
1221 | |||
1222 | /* Special handling to support pci_enable_msi_range() */ | ||
1223 | if (pci_msi_desc_is_multi_msi(desc) && | ||
1224 | !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) | ||
1225 | return 1; | ||
1226 | else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) | ||
1227 | return -ENOTSUPP; | ||
1228 | |||
1229 | return 0; | ||
1230 | } | ||
1231 | |||
1232 | static int pci_msi_domain_handle_error(struct irq_domain *domain, | ||
1233 | struct msi_desc *desc, int error) | ||
1234 | { | ||
1235 | /* Special handling to support pci_enable_msi_range() */ | ||
1236 | if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) | ||
1237 | return 1; | ||
1238 | |||
1239 | return error; | ||
1240 | } | ||
1241 | |||
1242 | #ifdef GENERIC_MSI_DOMAIN_OPS | ||
1243 | static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, | ||
1244 | struct msi_desc *desc) | ||
1245 | { | ||
1246 | arg->desc = desc; | ||
1247 | arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), | ||
1248 | desc); | ||
1249 | } | ||
1250 | #else | ||
1251 | #define pci_msi_domain_set_desc NULL | ||
1252 | #endif | ||
1253 | |||
1254 | static struct msi_domain_ops pci_msi_domain_ops_default = { | ||
1255 | .set_desc = pci_msi_domain_set_desc, | ||
1256 | .msi_check = pci_msi_domain_check_cap, | ||
1257 | .handle_error = pci_msi_domain_handle_error, | ||
1258 | }; | ||
1259 | |||
1260 | static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) | ||
1261 | { | ||
1262 | struct msi_domain_ops *ops = info->ops; | ||
1263 | |||
1264 | if (ops == NULL) { | ||
1265 | info->ops = &pci_msi_domain_ops_default; | ||
1266 | } else { | ||
1267 | if (ops->set_desc == NULL) | ||
1268 | ops->set_desc = pci_msi_domain_set_desc; | ||
1269 | if (ops->msi_check == NULL) | ||
1270 | ops->msi_check = pci_msi_domain_check_cap; | ||
1271 | if (ops->handle_error == NULL) | ||
1272 | ops->handle_error = pci_msi_domain_handle_error; | ||
1273 | } | ||
1274 | } | ||
1275 | |||
1276 | static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) | ||
1277 | { | ||
1278 | struct irq_chip *chip = info->chip; | ||
1279 | |||
1280 | BUG_ON(!chip); | ||
1281 | if (!chip->irq_write_msi_msg) | ||
1282 | chip->irq_write_msi_msg = pci_msi_domain_write_msg; | ||
1283 | } | ||
1284 | |||
1285 | /** | ||
1286 | * pci_msi_create_irq_domain - Creat a MSI interrupt domain | ||
1287 | * @node: Optional device-tree node of the interrupt controller | ||
1288 | * @info: MSI domain info | ||
1289 | * @parent: Parent irq domain | ||
1290 | * | ||
1291 | * Updates the domain and chip ops and creates a MSI interrupt domain. | ||
1292 | * | ||
1293 | * Returns: | ||
1294 | * A domain pointer or NULL in case of failure. | ||
1295 | */ | ||
1296 | struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, | ||
1297 | struct msi_domain_info *info, | ||
1298 | struct irq_domain *parent) | ||
1299 | { | ||
1300 | if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) | ||
1301 | pci_msi_domain_update_dom_ops(info); | ||
1302 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | ||
1303 | pci_msi_domain_update_chip_ops(info); | ||
1304 | |||
1305 | return msi_create_irq_domain(node, info, parent); | ||
1306 | } | ||
1307 | |||
1308 | /** | ||
1309 | * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain | ||
1310 | * @domain: The interrupt domain to allocate from | ||
1311 | * @dev: The device for which to allocate | ||
1312 | * @nvec: The number of interrupts to allocate | ||
1313 | * @type: Unused to allow simpler migration from the arch_XXX interfaces | ||
1314 | * | ||
1315 | * Returns: | ||
1316 | * A virtual interrupt number or an error code in case of failure | ||
1317 | */ | ||
1318 | int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, | ||
1319 | int nvec, int type) | ||
1320 | { | ||
1321 | return msi_domain_alloc_irqs(domain, &dev->dev, nvec); | ||
1322 | } | ||
1323 | |||
1324 | /** | ||
1325 | * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain | ||
1326 | * @domain: The interrupt domain | ||
1327 | * @dev: The device for which to free interrupts | ||
1328 | */ | ||
1329 | void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) | ||
1330 | { | ||
1331 | msi_domain_free_irqs(domain, &dev->dev); | ||
1332 | } | ||
1333 | |||
1334 | /** | ||
1335 | * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain | ||
1336 | * @node: Optional device-tree node of the interrupt controller | ||
1337 | * @info: MSI domain info | ||
1338 | * @parent: Parent irq domain | ||
1339 | * | ||
1340 | * Returns: A domain pointer or NULL in case of failure. If successful | ||
1341 | * the default PCI/MSI irqdomain pointer is updated. | ||
1342 | */ | ||
1343 | struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, | ||
1344 | struct msi_domain_info *info, struct irq_domain *parent) | ||
1345 | { | ||
1346 | struct irq_domain *domain; | ||
1347 | |||
1348 | mutex_lock(&pci_msi_domain_lock); | ||
1349 | if (pci_msi_default_domain) { | ||
1350 | pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); | ||
1351 | domain = NULL; | ||
1352 | } else { | ||
1353 | domain = pci_msi_create_irq_domain(node, info, parent); | ||
1354 | pci_msi_default_domain = domain; | ||
1355 | } | ||
1356 | mutex_unlock(&pci_msi_domain_lock); | ||
1357 | |||
1358 | return domain; | ||
1359 | } | ||
1360 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ | ||