diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/pci/msi.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r-- | drivers/pci/msi.c | 1151 |
1 files changed, 1151 insertions, 0 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c new file mode 100644 index 000000000000..22ecd3b058be --- /dev/null +++ b/drivers/pci/msi.c | |||
@@ -0,0 +1,1151 @@ | |||
1 | /* | ||
2 | * File: msi.c | ||
3 | * Purpose: PCI Message Signaled Interrupt (MSI) | ||
4 | * | ||
5 | * Copyright (C) 2003-2004 Intel | ||
6 | * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <linux/irq.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/ioport.h> | ||
15 | #include <linux/smp_lock.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | |||
19 | #include <asm/errno.h> | ||
20 | #include <asm/io.h> | ||
21 | #include <asm/smp.h> | ||
22 | |||
23 | #include "pci.h" | ||
24 | #include "msi.h" | ||
25 | |||
26 | static DEFINE_SPINLOCK(msi_lock); | ||
27 | static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; | ||
28 | static kmem_cache_t* msi_cachep; | ||
29 | |||
30 | static int pci_msi_enable = 1; | ||
31 | static int last_alloc_vector = 0; | ||
32 | static int nr_released_vectors = 0; | ||
33 | static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS; | ||
34 | static int nr_msix_devices = 0; | ||
35 | |||
36 | #ifndef CONFIG_X86_IO_APIC | ||
37 | int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; | ||
38 | u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 }; | ||
39 | #endif | ||
40 | |||
41 | static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags) | ||
42 | { | ||
43 | memset(p, 0, NR_IRQS * sizeof(struct msi_desc)); | ||
44 | } | ||
45 | |||
46 | static int msi_cache_init(void) | ||
47 | { | ||
48 | msi_cachep = kmem_cache_create("msi_cache", | ||
49 | NR_IRQS * sizeof(struct msi_desc), | ||
50 | 0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL); | ||
51 | if (!msi_cachep) | ||
52 | return -ENOMEM; | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static void msi_set_mask_bit(unsigned int vector, int flag) | ||
58 | { | ||
59 | struct msi_desc *entry; | ||
60 | |||
61 | entry = (struct msi_desc *)msi_desc[vector]; | ||
62 | if (!entry || !entry->dev || !entry->mask_base) | ||
63 | return; | ||
64 | switch (entry->msi_attrib.type) { | ||
65 | case PCI_CAP_ID_MSI: | ||
66 | { | ||
67 | int pos; | ||
68 | u32 mask_bits; | ||
69 | |||
70 | pos = (long)entry->mask_base; | ||
71 | pci_read_config_dword(entry->dev, pos, &mask_bits); | ||
72 | mask_bits &= ~(1); | ||
73 | mask_bits |= flag; | ||
74 | pci_write_config_dword(entry->dev, pos, mask_bits); | ||
75 | break; | ||
76 | } | ||
77 | case PCI_CAP_ID_MSIX: | ||
78 | { | ||
79 | int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | ||
80 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; | ||
81 | writel(flag, entry->mask_base + offset); | ||
82 | break; | ||
83 | } | ||
84 | default: | ||
85 | break; | ||
86 | } | ||
87 | } | ||
88 | |||
89 | #ifdef CONFIG_SMP | ||
90 | static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) | ||
91 | { | ||
92 | struct msi_desc *entry; | ||
93 | struct msg_address address; | ||
94 | |||
95 | entry = (struct msi_desc *)msi_desc[vector]; | ||
96 | if (!entry || !entry->dev) | ||
97 | return; | ||
98 | |||
99 | switch (entry->msi_attrib.type) { | ||
100 | case PCI_CAP_ID_MSI: | ||
101 | { | ||
102 | int pos; | ||
103 | |||
104 | if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI))) | ||
105 | return; | ||
106 | |||
107 | pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), | ||
108 | &address.lo_address.value); | ||
109 | address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; | ||
110 | address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) << | ||
111 | MSI_TARGET_CPU_SHIFT); | ||
112 | entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask); | ||
113 | pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), | ||
114 | address.lo_address.value); | ||
115 | break; | ||
116 | } | ||
117 | case PCI_CAP_ID_MSIX: | ||
118 | { | ||
119 | int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | ||
120 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; | ||
121 | |||
122 | address.lo_address.value = readl(entry->mask_base + offset); | ||
123 | address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; | ||
124 | address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) << | ||
125 | MSI_TARGET_CPU_SHIFT); | ||
126 | entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask); | ||
127 | writel(address.lo_address.value, entry->mask_base + offset); | ||
128 | break; | ||
129 | } | ||
130 | default: | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | #ifdef CONFIG_IRQBALANCE | ||
136 | static inline void move_msi(int vector) | ||
137 | { | ||
138 | if (!cpus_empty(pending_irq_balance_cpumask[vector])) { | ||
139 | set_msi_affinity(vector, pending_irq_balance_cpumask[vector]); | ||
140 | cpus_clear(pending_irq_balance_cpumask[vector]); | ||
141 | } | ||
142 | } | ||
143 | #endif /* CONFIG_IRQBALANCE */ | ||
144 | #endif /* CONFIG_SMP */ | ||
145 | |||
146 | static void mask_MSI_irq(unsigned int vector) | ||
147 | { | ||
148 | msi_set_mask_bit(vector, 1); | ||
149 | } | ||
150 | |||
151 | static void unmask_MSI_irq(unsigned int vector) | ||
152 | { | ||
153 | msi_set_mask_bit(vector, 0); | ||
154 | } | ||
155 | |||
156 | static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector) | ||
157 | { | ||
158 | struct msi_desc *entry; | ||
159 | unsigned long flags; | ||
160 | |||
161 | spin_lock_irqsave(&msi_lock, flags); | ||
162 | entry = msi_desc[vector]; | ||
163 | if (!entry || !entry->dev) { | ||
164 | spin_unlock_irqrestore(&msi_lock, flags); | ||
165 | return 0; | ||
166 | } | ||
167 | entry->msi_attrib.state = 1; /* Mark it active */ | ||
168 | spin_unlock_irqrestore(&msi_lock, flags); | ||
169 | |||
170 | return 0; /* never anything pending */ | ||
171 | } | ||
172 | |||
173 | static void release_msi(unsigned int vector); | ||
174 | static void shutdown_msi_irq(unsigned int vector) | ||
175 | { | ||
176 | release_msi(vector); | ||
177 | } | ||
178 | |||
179 | #define shutdown_msi_irq_wo_maskbit shutdown_msi_irq | ||
180 | static void enable_msi_irq_wo_maskbit(unsigned int vector) {} | ||
181 | static void disable_msi_irq_wo_maskbit(unsigned int vector) {} | ||
182 | static void ack_msi_irq_wo_maskbit(unsigned int vector) {} | ||
183 | static void end_msi_irq_wo_maskbit(unsigned int vector) | ||
184 | { | ||
185 | move_msi(vector); | ||
186 | ack_APIC_irq(); | ||
187 | } | ||
188 | |||
189 | static unsigned int startup_msi_irq_w_maskbit(unsigned int vector) | ||
190 | { | ||
191 | struct msi_desc *entry; | ||
192 | unsigned long flags; | ||
193 | |||
194 | spin_lock_irqsave(&msi_lock, flags); | ||
195 | entry = msi_desc[vector]; | ||
196 | if (!entry || !entry->dev) { | ||
197 | spin_unlock_irqrestore(&msi_lock, flags); | ||
198 | return 0; | ||
199 | } | ||
200 | entry->msi_attrib.state = 1; /* Mark it active */ | ||
201 | spin_unlock_irqrestore(&msi_lock, flags); | ||
202 | |||
203 | unmask_MSI_irq(vector); | ||
204 | return 0; /* never anything pending */ | ||
205 | } | ||
206 | |||
207 | #define shutdown_msi_irq_w_maskbit shutdown_msi_irq | ||
208 | #define enable_msi_irq_w_maskbit unmask_MSI_irq | ||
209 | #define disable_msi_irq_w_maskbit mask_MSI_irq | ||
210 | #define ack_msi_irq_w_maskbit mask_MSI_irq | ||
211 | |||
212 | static void end_msi_irq_w_maskbit(unsigned int vector) | ||
213 | { | ||
214 | move_msi(vector); | ||
215 | unmask_MSI_irq(vector); | ||
216 | ack_APIC_irq(); | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices, | ||
221 | * which implement the MSI-X Capability Structure. | ||
222 | */ | ||
223 | static struct hw_interrupt_type msix_irq_type = { | ||
224 | .typename = "PCI-MSI-X", | ||
225 | .startup = startup_msi_irq_w_maskbit, | ||
226 | .shutdown = shutdown_msi_irq_w_maskbit, | ||
227 | .enable = enable_msi_irq_w_maskbit, | ||
228 | .disable = disable_msi_irq_w_maskbit, | ||
229 | .ack = ack_msi_irq_w_maskbit, | ||
230 | .end = end_msi_irq_w_maskbit, | ||
231 | .set_affinity = set_msi_irq_affinity | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices, | ||
236 | * which implement the MSI Capability Structure with | ||
237 | * Mask-and-Pending Bits. | ||
238 | */ | ||
239 | static struct hw_interrupt_type msi_irq_w_maskbit_type = { | ||
240 | .typename = "PCI-MSI", | ||
241 | .startup = startup_msi_irq_w_maskbit, | ||
242 | .shutdown = shutdown_msi_irq_w_maskbit, | ||
243 | .enable = enable_msi_irq_w_maskbit, | ||
244 | .disable = disable_msi_irq_w_maskbit, | ||
245 | .ack = ack_msi_irq_w_maskbit, | ||
246 | .end = end_msi_irq_w_maskbit, | ||
247 | .set_affinity = set_msi_irq_affinity | ||
248 | }; | ||
249 | |||
250 | /* | ||
251 | * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices, | ||
252 | * which implement the MSI Capability Structure without | ||
253 | * Mask-and-Pending Bits. | ||
254 | */ | ||
255 | static struct hw_interrupt_type msi_irq_wo_maskbit_type = { | ||
256 | .typename = "PCI-MSI", | ||
257 | .startup = startup_msi_irq_wo_maskbit, | ||
258 | .shutdown = shutdown_msi_irq_wo_maskbit, | ||
259 | .enable = enable_msi_irq_wo_maskbit, | ||
260 | .disable = disable_msi_irq_wo_maskbit, | ||
261 | .ack = ack_msi_irq_wo_maskbit, | ||
262 | .end = end_msi_irq_wo_maskbit, | ||
263 | .set_affinity = set_msi_irq_affinity | ||
264 | }; | ||
265 | |||
266 | static void msi_data_init(struct msg_data *msi_data, | ||
267 | unsigned int vector) | ||
268 | { | ||
269 | memset(msi_data, 0, sizeof(struct msg_data)); | ||
270 | msi_data->vector = (u8)vector; | ||
271 | msi_data->delivery_mode = MSI_DELIVERY_MODE; | ||
272 | msi_data->level = MSI_LEVEL_MODE; | ||
273 | msi_data->trigger = MSI_TRIGGER_MODE; | ||
274 | } | ||
275 | |||
276 | static void msi_address_init(struct msg_address *msi_address) | ||
277 | { | ||
278 | unsigned int dest_id; | ||
279 | |||
280 | memset(msi_address, 0, sizeof(struct msg_address)); | ||
281 | msi_address->hi_address = (u32)0; | ||
282 | dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT); | ||
283 | msi_address->lo_address.u.dest_mode = MSI_DEST_MODE; | ||
284 | msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE; | ||
285 | msi_address->lo_address.u.dest_id = dest_id; | ||
286 | msi_address->lo_address.value |= (MSI_TARGET_CPU << MSI_TARGET_CPU_SHIFT); | ||
287 | } | ||
288 | |||
289 | static int msi_free_vector(struct pci_dev* dev, int vector, int reassign); | ||
290 | static int assign_msi_vector(void) | ||
291 | { | ||
292 | static int new_vector_avail = 1; | ||
293 | int vector; | ||
294 | unsigned long flags; | ||
295 | |||
296 | /* | ||
297 | * msi_lock is provided to ensure that successful allocation of MSI | ||
298 | * vector is assigned unique among drivers. | ||
299 | */ | ||
300 | spin_lock_irqsave(&msi_lock, flags); | ||
301 | |||
302 | if (!new_vector_avail) { | ||
303 | int free_vector = 0; | ||
304 | |||
305 | /* | ||
306 | * vector_irq[] = -1 indicates that this specific vector is: | ||
307 | * - assigned for MSI (since MSI have no associated IRQ) or | ||
308 | * - assigned for legacy if less than 16, or | ||
309 | * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping | ||
310 | * vector_irq[] = 0 indicates that this vector, previously | ||
311 | * assigned for MSI, is freed by hotplug removed operations. | ||
312 | * This vector will be reused for any subsequent hotplug added | ||
313 | * operations. | ||
314 | * vector_irq[] > 0 indicates that this vector is assigned for | ||
315 | * IOxAPIC IRQs. This vector and its value provides a 1-to-1 | ||
316 | * vector-to-IOxAPIC IRQ mapping. | ||
317 | */ | ||
318 | for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { | ||
319 | if (vector_irq[vector] != 0) | ||
320 | continue; | ||
321 | free_vector = vector; | ||
322 | if (!msi_desc[vector]) | ||
323 | break; | ||
324 | else | ||
325 | continue; | ||
326 | } | ||
327 | if (!free_vector) { | ||
328 | spin_unlock_irqrestore(&msi_lock, flags); | ||
329 | return -EBUSY; | ||
330 | } | ||
331 | vector_irq[free_vector] = -1; | ||
332 | nr_released_vectors--; | ||
333 | spin_unlock_irqrestore(&msi_lock, flags); | ||
334 | if (msi_desc[free_vector] != NULL) { | ||
335 | struct pci_dev *dev; | ||
336 | int tail; | ||
337 | |||
338 | /* free all linked vectors before re-assign */ | ||
339 | do { | ||
340 | spin_lock_irqsave(&msi_lock, flags); | ||
341 | dev = msi_desc[free_vector]->dev; | ||
342 | tail = msi_desc[free_vector]->link.tail; | ||
343 | spin_unlock_irqrestore(&msi_lock, flags); | ||
344 | msi_free_vector(dev, tail, 1); | ||
345 | } while (free_vector != tail); | ||
346 | } | ||
347 | |||
348 | return free_vector; | ||
349 | } | ||
350 | vector = assign_irq_vector(AUTO_ASSIGN); | ||
351 | last_alloc_vector = vector; | ||
352 | if (vector == LAST_DEVICE_VECTOR) | ||
353 | new_vector_avail = 0; | ||
354 | |||
355 | spin_unlock_irqrestore(&msi_lock, flags); | ||
356 | return vector; | ||
357 | } | ||
358 | |||
359 | static int get_new_vector(void) | ||
360 | { | ||
361 | int vector; | ||
362 | |||
363 | if ((vector = assign_msi_vector()) > 0) | ||
364 | set_intr_gate(vector, interrupt[vector]); | ||
365 | |||
366 | return vector; | ||
367 | } | ||
368 | |||
369 | static int msi_init(void) | ||
370 | { | ||
371 | static int status = -ENOMEM; | ||
372 | |||
373 | if (!status) | ||
374 | return status; | ||
375 | |||
376 | if (pci_msi_quirk) { | ||
377 | pci_msi_enable = 0; | ||
378 | printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n"); | ||
379 | status = -EINVAL; | ||
380 | return status; | ||
381 | } | ||
382 | |||
383 | if ((status = msi_cache_init()) < 0) { | ||
384 | pci_msi_enable = 0; | ||
385 | printk(KERN_WARNING "PCI: MSI cache init failed\n"); | ||
386 | return status; | ||
387 | } | ||
388 | last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); | ||
389 | if (last_alloc_vector < 0) { | ||
390 | pci_msi_enable = 0; | ||
391 | printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n"); | ||
392 | status = -EBUSY; | ||
393 | return status; | ||
394 | } | ||
395 | vector_irq[last_alloc_vector] = 0; | ||
396 | nr_released_vectors++; | ||
397 | |||
398 | return status; | ||
399 | } | ||
400 | |||
401 | static int get_msi_vector(struct pci_dev *dev) | ||
402 | { | ||
403 | return get_new_vector(); | ||
404 | } | ||
405 | |||
406 | static struct msi_desc* alloc_msi_entry(void) | ||
407 | { | ||
408 | struct msi_desc *entry; | ||
409 | |||
410 | entry = (struct msi_desc*) kmem_cache_alloc(msi_cachep, SLAB_KERNEL); | ||
411 | if (!entry) | ||
412 | return NULL; | ||
413 | |||
414 | memset(entry, 0, sizeof(struct msi_desc)); | ||
415 | entry->link.tail = entry->link.head = 0; /* single message */ | ||
416 | entry->dev = NULL; | ||
417 | |||
418 | return entry; | ||
419 | } | ||
420 | |||
421 | static void attach_msi_entry(struct msi_desc *entry, int vector) | ||
422 | { | ||
423 | unsigned long flags; | ||
424 | |||
425 | spin_lock_irqsave(&msi_lock, flags); | ||
426 | msi_desc[vector] = entry; | ||
427 | spin_unlock_irqrestore(&msi_lock, flags); | ||
428 | } | ||
429 | |||
430 | static void irq_handler_init(int cap_id, int pos, int mask) | ||
431 | { | ||
432 | spin_lock(&irq_desc[pos].lock); | ||
433 | if (cap_id == PCI_CAP_ID_MSIX) | ||
434 | irq_desc[pos].handler = &msix_irq_type; | ||
435 | else { | ||
436 | if (!mask) | ||
437 | irq_desc[pos].handler = &msi_irq_wo_maskbit_type; | ||
438 | else | ||
439 | irq_desc[pos].handler = &msi_irq_w_maskbit_type; | ||
440 | } | ||
441 | spin_unlock(&irq_desc[pos].lock); | ||
442 | } | ||
443 | |||
444 | static void enable_msi_mode(struct pci_dev *dev, int pos, int type) | ||
445 | { | ||
446 | u16 control; | ||
447 | |||
448 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
449 | if (type == PCI_CAP_ID_MSI) { | ||
450 | /* Set enabled bits to single MSI & enable MSI_enable bit */ | ||
451 | msi_enable(control, 1); | ||
452 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
453 | } else { | ||
454 | msix_enable(control); | ||
455 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
456 | } | ||
457 | if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { | ||
458 | /* PCI Express Endpoint device detected */ | ||
459 | u16 cmd; | ||
460 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
461 | cmd |= PCI_COMMAND_INTX_DISABLE; | ||
462 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
463 | } | ||
464 | } | ||
465 | |||
466 | static void disable_msi_mode(struct pci_dev *dev, int pos, int type) | ||
467 | { | ||
468 | u16 control; | ||
469 | |||
470 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
471 | if (type == PCI_CAP_ID_MSI) { | ||
472 | /* Set enabled bits to single MSI & enable MSI_enable bit */ | ||
473 | msi_disable(control); | ||
474 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
475 | } else { | ||
476 | msix_disable(control); | ||
477 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
478 | } | ||
479 | if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { | ||
480 | /* PCI Express Endpoint device detected */ | ||
481 | u16 cmd; | ||
482 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
483 | cmd &= ~PCI_COMMAND_INTX_DISABLE; | ||
484 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | static int msi_lookup_vector(struct pci_dev *dev, int type) | ||
489 | { | ||
490 | int vector; | ||
491 | unsigned long flags; | ||
492 | |||
493 | spin_lock_irqsave(&msi_lock, flags); | ||
494 | for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { | ||
495 | if (!msi_desc[vector] || msi_desc[vector]->dev != dev || | ||
496 | msi_desc[vector]->msi_attrib.type != type || | ||
497 | msi_desc[vector]->msi_attrib.default_vector != dev->irq) | ||
498 | continue; | ||
499 | spin_unlock_irqrestore(&msi_lock, flags); | ||
500 | /* This pre-assigned MSI vector for this device | ||
501 | already exits. Override dev->irq with this vector */ | ||
502 | dev->irq = vector; | ||
503 | return 0; | ||
504 | } | ||
505 | spin_unlock_irqrestore(&msi_lock, flags); | ||
506 | |||
507 | return -EACCES; | ||
508 | } | ||
509 | |||
510 | void pci_scan_msi_device(struct pci_dev *dev) | ||
511 | { | ||
512 | if (!dev) | ||
513 | return; | ||
514 | |||
515 | if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0) | ||
516 | nr_msix_devices++; | ||
517 | else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0) | ||
518 | nr_reserved_vectors++; | ||
519 | } | ||
520 | |||
521 | /** | ||
522 | * msi_capability_init - configure device's MSI capability structure | ||
523 | * @dev: pointer to the pci_dev data structure of MSI device function | ||
524 | * | ||
525 | * Setup the MSI capability structure of device funtion with a single | ||
526 | * MSI vector, regardless of device function is capable of handling | ||
527 | * multiple messages. A return of zero indicates the successful setup | ||
528 | * of an entry zero with the new MSI vector or non-zero for otherwise. | ||
529 | **/ | ||
530 | static int msi_capability_init(struct pci_dev *dev) | ||
531 | { | ||
532 | struct msi_desc *entry; | ||
533 | struct msg_address address; | ||
534 | struct msg_data data; | ||
535 | int pos, vector; | ||
536 | u16 control; | ||
537 | |||
538 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
539 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
540 | /* MSI Entry Initialization */ | ||
541 | if (!(entry = alloc_msi_entry())) | ||
542 | return -ENOMEM; | ||
543 | |||
544 | if ((vector = get_msi_vector(dev)) < 0) { | ||
545 | kmem_cache_free(msi_cachep, entry); | ||
546 | return -EBUSY; | ||
547 | } | ||
548 | entry->link.head = vector; | ||
549 | entry->link.tail = vector; | ||
550 | entry->msi_attrib.type = PCI_CAP_ID_MSI; | ||
551 | entry->msi_attrib.state = 0; /* Mark it not active */ | ||
552 | entry->msi_attrib.entry_nr = 0; | ||
553 | entry->msi_attrib.maskbit = is_mask_bit_support(control); | ||
554 | entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */ | ||
555 | dev->irq = vector; | ||
556 | entry->dev = dev; | ||
557 | if (is_mask_bit_support(control)) { | ||
558 | entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, | ||
559 | is_64bit_address(control)); | ||
560 | } | ||
561 | /* Replace with MSI handler */ | ||
562 | irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); | ||
563 | /* Configure MSI capability structure */ | ||
564 | msi_address_init(&address); | ||
565 | msi_data_init(&data, vector); | ||
566 | entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >> | ||
567 | MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); | ||
568 | pci_write_config_dword(dev, msi_lower_address_reg(pos), | ||
569 | address.lo_address.value); | ||
570 | if (is_64bit_address(control)) { | ||
571 | pci_write_config_dword(dev, | ||
572 | msi_upper_address_reg(pos), address.hi_address); | ||
573 | pci_write_config_word(dev, | ||
574 | msi_data_reg(pos, 1), *((u32*)&data)); | ||
575 | } else | ||
576 | pci_write_config_word(dev, | ||
577 | msi_data_reg(pos, 0), *((u32*)&data)); | ||
578 | if (entry->msi_attrib.maskbit) { | ||
579 | unsigned int maskbits, temp; | ||
580 | /* All MSIs are unmasked by default, Mask them all */ | ||
581 | pci_read_config_dword(dev, | ||
582 | msi_mask_bits_reg(pos, is_64bit_address(control)), | ||
583 | &maskbits); | ||
584 | temp = (1 << multi_msi_capable(control)); | ||
585 | temp = ((temp - 1) & ~temp); | ||
586 | maskbits |= temp; | ||
587 | pci_write_config_dword(dev, | ||
588 | msi_mask_bits_reg(pos, is_64bit_address(control)), | ||
589 | maskbits); | ||
590 | } | ||
591 | attach_msi_entry(entry, vector); | ||
592 | /* Set MSI enabled bits */ | ||
593 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /** | ||
599 | * msix_capability_init - configure device's MSI-X capability | ||
600 | * @dev: pointer to the pci_dev data structure of MSI-X device function | ||
601 | * | ||
602 | * Setup the MSI-X capability structure of device funtion with a | ||
603 | * single MSI-X vector. A return of zero indicates the successful setup of | ||
604 | * requested MSI-X entries with allocated vectors or non-zero for otherwise. | ||
605 | **/ | ||
606 | static int msix_capability_init(struct pci_dev *dev, | ||
607 | struct msix_entry *entries, int nvec) | ||
608 | { | ||
609 | struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; | ||
610 | struct msg_address address; | ||
611 | struct msg_data data; | ||
612 | int vector, pos, i, j, nr_entries, temp = 0; | ||
613 | u32 phys_addr, table_offset; | ||
614 | u16 control; | ||
615 | u8 bir; | ||
616 | void __iomem *base; | ||
617 | |||
618 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
619 | /* Request & Map MSI-X table region */ | ||
620 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
621 | nr_entries = multi_msix_capable(control); | ||
622 | pci_read_config_dword(dev, msix_table_offset_reg(pos), | ||
623 | &table_offset); | ||
624 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | ||
625 | phys_addr = pci_resource_start (dev, bir); | ||
626 | phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK); | ||
627 | base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); | ||
628 | if (base == NULL) | ||
629 | return -ENOMEM; | ||
630 | |||
631 | /* MSI-X Table Initialization */ | ||
632 | for (i = 0; i < nvec; i++) { | ||
633 | entry = alloc_msi_entry(); | ||
634 | if (!entry) | ||
635 | break; | ||
636 | if ((vector = get_msi_vector(dev)) < 0) | ||
637 | break; | ||
638 | |||
639 | j = entries[i].entry; | ||
640 | entries[i].vector = vector; | ||
641 | entry->msi_attrib.type = PCI_CAP_ID_MSIX; | ||
642 | entry->msi_attrib.state = 0; /* Mark it not active */ | ||
643 | entry->msi_attrib.entry_nr = j; | ||
644 | entry->msi_attrib.maskbit = 1; | ||
645 | entry->msi_attrib.default_vector = dev->irq; | ||
646 | entry->dev = dev; | ||
647 | entry->mask_base = base; | ||
648 | if (!head) { | ||
649 | entry->link.head = vector; | ||
650 | entry->link.tail = vector; | ||
651 | head = entry; | ||
652 | } else { | ||
653 | entry->link.head = temp; | ||
654 | entry->link.tail = tail->link.tail; | ||
655 | tail->link.tail = vector; | ||
656 | head->link.head = vector; | ||
657 | } | ||
658 | temp = vector; | ||
659 | tail = entry; | ||
660 | /* Replace with MSI-X handler */ | ||
661 | irq_handler_init(PCI_CAP_ID_MSIX, vector, 1); | ||
662 | /* Configure MSI-X capability structure */ | ||
663 | msi_address_init(&address); | ||
664 | msi_data_init(&data, vector); | ||
665 | entry->msi_attrib.current_cpu = | ||
666 | ((address.lo_address.u.dest_id >> | ||
667 | MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); | ||
668 | writel(address.lo_address.value, | ||
669 | base + j * PCI_MSIX_ENTRY_SIZE + | ||
670 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | ||
671 | writel(address.hi_address, | ||
672 | base + j * PCI_MSIX_ENTRY_SIZE + | ||
673 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | ||
674 | writel(*(u32*)&data, | ||
675 | base + j * PCI_MSIX_ENTRY_SIZE + | ||
676 | PCI_MSIX_ENTRY_DATA_OFFSET); | ||
677 | attach_msi_entry(entry, vector); | ||
678 | } | ||
679 | if (i != nvec) { | ||
680 | i--; | ||
681 | for (; i >= 0; i--) { | ||
682 | vector = (entries + i)->vector; | ||
683 | msi_free_vector(dev, vector, 0); | ||
684 | (entries + i)->vector = 0; | ||
685 | } | ||
686 | return -EBUSY; | ||
687 | } | ||
688 | /* Set MSI-X enabled bits */ | ||
689 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | ||
690 | |||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | /** | ||
695 | * pci_enable_msi - configure device's MSI capability structure | ||
696 | * @dev: pointer to the pci_dev data structure of MSI device function | ||
697 | * | ||
698 | * Setup the MSI capability structure of device function with | ||
699 | * a single MSI vector upon its software driver call to request for | ||
700 | * MSI mode enabled on its hardware device function. A return of zero | ||
701 | * indicates the successful setup of an entry zero with the new MSI | ||
702 | * vector or non-zero for otherwise. | ||
703 | **/ | ||
704 | int pci_enable_msi(struct pci_dev* dev) | ||
705 | { | ||
706 | int pos, temp, status = -EINVAL; | ||
707 | u16 control; | ||
708 | |||
709 | if (!pci_msi_enable || !dev) | ||
710 | return status; | ||
711 | |||
712 | temp = dev->irq; | ||
713 | |||
714 | if ((status = msi_init()) < 0) | ||
715 | return status; | ||
716 | |||
717 | if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI))) | ||
718 | return -EINVAL; | ||
719 | |||
720 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
721 | if (control & PCI_MSI_FLAGS_ENABLE) | ||
722 | return 0; /* Already in MSI mode */ | ||
723 | |||
724 | if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { | ||
725 | /* Lookup Sucess */ | ||
726 | unsigned long flags; | ||
727 | |||
728 | spin_lock_irqsave(&msi_lock, flags); | ||
729 | if (!vector_irq[dev->irq]) { | ||
730 | msi_desc[dev->irq]->msi_attrib.state = 0; | ||
731 | vector_irq[dev->irq] = -1; | ||
732 | nr_released_vectors--; | ||
733 | spin_unlock_irqrestore(&msi_lock, flags); | ||
734 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | ||
735 | return 0; | ||
736 | } | ||
737 | spin_unlock_irqrestore(&msi_lock, flags); | ||
738 | dev->irq = temp; | ||
739 | } | ||
740 | /* Check whether driver already requested for MSI-X vectors */ | ||
741 | if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 && | ||
742 | !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | ||
743 | printk(KERN_INFO "PCI: %s: Can't enable MSI. " | ||
744 | "Device already has MSI-X vectors assigned\n", | ||
745 | pci_name(dev)); | ||
746 | dev->irq = temp; | ||
747 | return -EINVAL; | ||
748 | } | ||
749 | status = msi_capability_init(dev); | ||
750 | if (!status) { | ||
751 | if (!pos) | ||
752 | nr_reserved_vectors--; /* Only MSI capable */ | ||
753 | else if (nr_msix_devices > 0) | ||
754 | nr_msix_devices--; /* Both MSI and MSI-X capable, | ||
755 | but choose enabling MSI */ | ||
756 | } | ||
757 | |||
758 | return status; | ||
759 | } | ||
760 | |||
761 | void pci_disable_msi(struct pci_dev* dev) | ||
762 | { | ||
763 | struct msi_desc *entry; | ||
764 | int pos, default_vector; | ||
765 | u16 control; | ||
766 | unsigned long flags; | ||
767 | |||
768 | if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI))) | ||
769 | return; | ||
770 | |||
771 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
772 | if (!(control & PCI_MSI_FLAGS_ENABLE)) | ||
773 | return; | ||
774 | |||
775 | spin_lock_irqsave(&msi_lock, flags); | ||
776 | entry = msi_desc[dev->irq]; | ||
777 | if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { | ||
778 | spin_unlock_irqrestore(&msi_lock, flags); | ||
779 | return; | ||
780 | } | ||
781 | if (entry->msi_attrib.state) { | ||
782 | spin_unlock_irqrestore(&msi_lock, flags); | ||
783 | printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " | ||
784 | "free_irq() on MSI vector %d\n", | ||
785 | pci_name(dev), dev->irq); | ||
786 | BUG_ON(entry->msi_attrib.state > 0); | ||
787 | } else { | ||
788 | vector_irq[dev->irq] = 0; /* free it */ | ||
789 | nr_released_vectors++; | ||
790 | default_vector = entry->msi_attrib.default_vector; | ||
791 | spin_unlock_irqrestore(&msi_lock, flags); | ||
792 | /* Restore dev->irq to its default pin-assertion vector */ | ||
793 | dev->irq = default_vector; | ||
794 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | ||
795 | PCI_CAP_ID_MSI); | ||
796 | } | ||
797 | } | ||
798 | |||
799 | static void release_msi(unsigned int vector) | ||
800 | { | ||
801 | struct msi_desc *entry; | ||
802 | unsigned long flags; | ||
803 | |||
804 | spin_lock_irqsave(&msi_lock, flags); | ||
805 | entry = msi_desc[vector]; | ||
806 | if (entry && entry->dev) | ||
807 | entry->msi_attrib.state = 0; /* Mark it not active */ | ||
808 | spin_unlock_irqrestore(&msi_lock, flags); | ||
809 | } | ||
810 | |||
811 | static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) | ||
812 | { | ||
813 | struct msi_desc *entry; | ||
814 | int head, entry_nr, type; | ||
815 | void __iomem *base; | ||
816 | unsigned long flags; | ||
817 | |||
818 | spin_lock_irqsave(&msi_lock, flags); | ||
819 | entry = msi_desc[vector]; | ||
820 | if (!entry || entry->dev != dev) { | ||
821 | spin_unlock_irqrestore(&msi_lock, flags); | ||
822 | return -EINVAL; | ||
823 | } | ||
824 | type = entry->msi_attrib.type; | ||
825 | entry_nr = entry->msi_attrib.entry_nr; | ||
826 | head = entry->link.head; | ||
827 | base = entry->mask_base; | ||
828 | msi_desc[entry->link.head]->link.tail = entry->link.tail; | ||
829 | msi_desc[entry->link.tail]->link.head = entry->link.head; | ||
830 | entry->dev = NULL; | ||
831 | if (!reassign) { | ||
832 | vector_irq[vector] = 0; | ||
833 | nr_released_vectors++; | ||
834 | } | ||
835 | msi_desc[vector] = NULL; | ||
836 | spin_unlock_irqrestore(&msi_lock, flags); | ||
837 | |||
838 | kmem_cache_free(msi_cachep, entry); | ||
839 | |||
840 | if (type == PCI_CAP_ID_MSIX) { | ||
841 | if (!reassign) | ||
842 | writel(1, base + | ||
843 | entry_nr * PCI_MSIX_ENTRY_SIZE + | ||
844 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); | ||
845 | |||
846 | if (head == vector) { | ||
847 | /* | ||
848 | * Detect last MSI-X vector to be released. | ||
849 | * Release the MSI-X memory-mapped table. | ||
850 | */ | ||
851 | int pos, nr_entries; | ||
852 | u32 phys_addr, table_offset; | ||
853 | u16 control; | ||
854 | u8 bir; | ||
855 | |||
856 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
857 | pci_read_config_word(dev, msi_control_reg(pos), | ||
858 | &control); | ||
859 | nr_entries = multi_msix_capable(control); | ||
860 | pci_read_config_dword(dev, msix_table_offset_reg(pos), | ||
861 | &table_offset); | ||
862 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | ||
863 | phys_addr = pci_resource_start (dev, bir); | ||
864 | phys_addr += (u32)(table_offset & | ||
865 | ~PCI_MSIX_FLAGS_BIRMASK); | ||
866 | iounmap(base); | ||
867 | } | ||
868 | } | ||
869 | |||
870 | return 0; | ||
871 | } | ||
872 | |||
873 | static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec) | ||
874 | { | ||
875 | int vector = head, tail = 0; | ||
876 | int i, j = 0, nr_entries = 0; | ||
877 | void __iomem *base; | ||
878 | unsigned long flags; | ||
879 | |||
880 | spin_lock_irqsave(&msi_lock, flags); | ||
881 | while (head != tail) { | ||
882 | nr_entries++; | ||
883 | tail = msi_desc[vector]->link.tail; | ||
884 | if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr) | ||
885 | j = vector; | ||
886 | vector = tail; | ||
887 | } | ||
888 | if (*nvec > nr_entries) { | ||
889 | spin_unlock_irqrestore(&msi_lock, flags); | ||
890 | *nvec = nr_entries; | ||
891 | return -EINVAL; | ||
892 | } | ||
893 | vector = ((j > 0) ? j : head); | ||
894 | for (i = 0; i < *nvec; i++) { | ||
895 | j = msi_desc[vector]->msi_attrib.entry_nr; | ||
896 | msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */ | ||
897 | vector_irq[vector] = -1; /* Mark it busy */ | ||
898 | nr_released_vectors--; | ||
899 | entries[i].vector = vector; | ||
900 | if (j != (entries + i)->entry) { | ||
901 | base = msi_desc[vector]->mask_base; | ||
902 | msi_desc[vector]->msi_attrib.entry_nr = | ||
903 | (entries + i)->entry; | ||
904 | writel( readl(base + j * PCI_MSIX_ENTRY_SIZE + | ||
905 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base + | ||
906 | (entries + i)->entry * PCI_MSIX_ENTRY_SIZE + | ||
907 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | ||
908 | writel( readl(base + j * PCI_MSIX_ENTRY_SIZE + | ||
909 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base + | ||
910 | (entries + i)->entry * PCI_MSIX_ENTRY_SIZE + | ||
911 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | ||
912 | writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE + | ||
913 | PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector, | ||
914 | base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE + | ||
915 | PCI_MSIX_ENTRY_DATA_OFFSET); | ||
916 | } | ||
917 | vector = msi_desc[vector]->link.tail; | ||
918 | } | ||
919 | spin_unlock_irqrestore(&msi_lock, flags); | ||
920 | |||
921 | return 0; | ||
922 | } | ||
923 | |||
924 | /** | ||
925 | * pci_enable_msix - configure device's MSI-X capability structure | ||
926 | * @dev: pointer to the pci_dev data structure of MSI-X device function | ||
927 | * @data: pointer to an array of MSI-X entries | ||
928 | * @nvec: number of MSI-X vectors requested for allocation by device driver | ||
929 | * | ||
930 | * Setup the MSI-X capability structure of device function with the number | ||
931 | * of requested vectors upon its software driver call to request for | ||
932 | * MSI-X mode enabled on its hardware device function. A return of zero | ||
933 | * indicates the successful configuration of MSI-X capability structure | ||
934 | * with new allocated MSI-X vectors. A return of < 0 indicates a failure. | ||
935 | * Or a return of > 0 indicates that driver request is exceeding the number | ||
936 | * of vectors available. Driver should use the returned value to re-send | ||
937 | * its request. | ||
938 | **/ | ||
939 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | ||
940 | { | ||
941 | int status, pos, nr_entries, free_vectors; | ||
942 | int i, j, temp; | ||
943 | u16 control; | ||
944 | unsigned long flags; | ||
945 | |||
946 | if (!pci_msi_enable || !dev || !entries) | ||
947 | return -EINVAL; | ||
948 | |||
949 | if ((status = msi_init()) < 0) | ||
950 | return status; | ||
951 | |||
952 | if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX))) | ||
953 | return -EINVAL; | ||
954 | |||
955 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
956 | if (control & PCI_MSIX_FLAGS_ENABLE) | ||
957 | return -EINVAL; /* Already in MSI-X mode */ | ||
958 | |||
959 | nr_entries = multi_msix_capable(control); | ||
960 | if (nvec > nr_entries) | ||
961 | return -EINVAL; | ||
962 | |||
963 | /* Check for any invalid entries */ | ||
964 | for (i = 0; i < nvec; i++) { | ||
965 | if (entries[i].entry >= nr_entries) | ||
966 | return -EINVAL; /* invalid entry */ | ||
967 | for (j = i + 1; j < nvec; j++) { | ||
968 | if (entries[i].entry == entries[j].entry) | ||
969 | return -EINVAL; /* duplicate entry */ | ||
970 | } | ||
971 | } | ||
972 | temp = dev->irq; | ||
973 | if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | ||
974 | /* Lookup Sucess */ | ||
975 | nr_entries = nvec; | ||
976 | /* Reroute MSI-X table */ | ||
977 | if (reroute_msix_table(dev->irq, entries, &nr_entries)) { | ||
978 | /* #requested > #previous-assigned */ | ||
979 | dev->irq = temp; | ||
980 | return nr_entries; | ||
981 | } | ||
982 | dev->irq = temp; | ||
983 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | ||
984 | return 0; | ||
985 | } | ||
986 | /* Check whether driver already requested for MSI vector */ | ||
987 | if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && | ||
988 | !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { | ||
989 | printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " | ||
990 | "Device already has an MSI vector assigned\n", | ||
991 | pci_name(dev)); | ||
992 | dev->irq = temp; | ||
993 | return -EINVAL; | ||
994 | } | ||
995 | |||
996 | spin_lock_irqsave(&msi_lock, flags); | ||
997 | /* | ||
998 | * msi_lock is provided to ensure that enough vectors resources are | ||
999 | * available before granting. | ||
1000 | */ | ||
1001 | free_vectors = pci_vector_resources(last_alloc_vector, | ||
1002 | nr_released_vectors); | ||
1003 | /* Ensure that each MSI/MSI-X device has one vector reserved by | ||
1004 | default to avoid any MSI-X driver to take all available | ||
1005 | resources */ | ||
1006 | free_vectors -= nr_reserved_vectors; | ||
1007 | /* Find the average of free vectors among MSI-X devices */ | ||
1008 | if (nr_msix_devices > 0) | ||
1009 | free_vectors /= nr_msix_devices; | ||
1010 | spin_unlock_irqrestore(&msi_lock, flags); | ||
1011 | |||
1012 | if (nvec > free_vectors) { | ||
1013 | if (free_vectors > 0) | ||
1014 | return free_vectors; | ||
1015 | else | ||
1016 | return -EBUSY; | ||
1017 | } | ||
1018 | |||
1019 | status = msix_capability_init(dev, entries, nvec); | ||
1020 | if (!status && nr_msix_devices > 0) | ||
1021 | nr_msix_devices--; | ||
1022 | |||
1023 | return status; | ||
1024 | } | ||
1025 | |||
1026 | void pci_disable_msix(struct pci_dev* dev) | ||
1027 | { | ||
1028 | int pos, temp; | ||
1029 | u16 control; | ||
1030 | |||
1031 | if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX))) | ||
1032 | return; | ||
1033 | |||
1034 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
1035 | if (!(control & PCI_MSIX_FLAGS_ENABLE)) | ||
1036 | return; | ||
1037 | |||
1038 | temp = dev->irq; | ||
1039 | if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | ||
1040 | int state, vector, head, tail = 0, warning = 0; | ||
1041 | unsigned long flags; | ||
1042 | |||
1043 | vector = head = dev->irq; | ||
1044 | spin_lock_irqsave(&msi_lock, flags); | ||
1045 | while (head != tail) { | ||
1046 | state = msi_desc[vector]->msi_attrib.state; | ||
1047 | if (state) | ||
1048 | warning = 1; | ||
1049 | else { | ||
1050 | vector_irq[vector] = 0; /* free it */ | ||
1051 | nr_released_vectors++; | ||
1052 | } | ||
1053 | tail = msi_desc[vector]->link.tail; | ||
1054 | vector = tail; | ||
1055 | } | ||
1056 | spin_unlock_irqrestore(&msi_lock, flags); | ||
1057 | if (warning) { | ||
1058 | dev->irq = temp; | ||
1059 | printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without " | ||
1060 | "free_irq() on all MSI-X vectors\n", | ||
1061 | pci_name(dev)); | ||
1062 | BUG_ON(warning > 0); | ||
1063 | } else { | ||
1064 | dev->irq = temp; | ||
1065 | disable_msi_mode(dev, | ||
1066 | pci_find_capability(dev, PCI_CAP_ID_MSIX), | ||
1067 | PCI_CAP_ID_MSIX); | ||
1068 | |||
1069 | } | ||
1070 | } | ||
1071 | } | ||
1072 | |||
1073 | /** | ||
1074 | * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state | ||
1075 | * @dev: pointer to the pci_dev data structure of MSI(X) device function | ||
1076 | * | ||
1077 | * Being called during hotplug remove, from which the device funciton | ||
1078 | * is hot-removed. All previous assigned MSI/MSI-X vectors, if | ||
1079 | * allocated for this device function, are reclaimed to unused state, | ||
1080 | * which may be used later on. | ||
1081 | **/ | ||
1082 | void msi_remove_pci_irq_vectors(struct pci_dev* dev) | ||
1083 | { | ||
1084 | int state, pos, temp; | ||
1085 | unsigned long flags; | ||
1086 | |||
1087 | if (!pci_msi_enable || !dev) | ||
1088 | return; | ||
1089 | |||
1090 | temp = dev->irq; /* Save IOAPIC IRQ */ | ||
1091 | if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) > 0 && | ||
1092 | !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { | ||
1093 | spin_lock_irqsave(&msi_lock, flags); | ||
1094 | state = msi_desc[dev->irq]->msi_attrib.state; | ||
1095 | spin_unlock_irqrestore(&msi_lock, flags); | ||
1096 | if (state) { | ||
1097 | printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " | ||
1098 | "called without free_irq() on MSI vector %d\n", | ||
1099 | pci_name(dev), dev->irq); | ||
1100 | BUG_ON(state > 0); | ||
1101 | } else /* Release MSI vector assigned to this device */ | ||
1102 | msi_free_vector(dev, dev->irq, 0); | ||
1103 | dev->irq = temp; /* Restore IOAPIC IRQ */ | ||
1104 | } | ||
1105 | if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 && | ||
1106 | !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | ||
1107 | int vector, head, tail = 0, warning = 0; | ||
1108 | void __iomem *base = NULL; | ||
1109 | |||
1110 | vector = head = dev->irq; | ||
1111 | while (head != tail) { | ||
1112 | spin_lock_irqsave(&msi_lock, flags); | ||
1113 | state = msi_desc[vector]->msi_attrib.state; | ||
1114 | tail = msi_desc[vector]->link.tail; | ||
1115 | base = msi_desc[vector]->mask_base; | ||
1116 | spin_unlock_irqrestore(&msi_lock, flags); | ||
1117 | if (state) | ||
1118 | warning = 1; | ||
1119 | else if (vector != head) /* Release MSI-X vector */ | ||
1120 | msi_free_vector(dev, vector, 0); | ||
1121 | vector = tail; | ||
1122 | } | ||
1123 | msi_free_vector(dev, vector, 0); | ||
1124 | if (warning) { | ||
1125 | /* Force to release the MSI-X memory-mapped table */ | ||
1126 | u32 phys_addr, table_offset; | ||
1127 | u16 control; | ||
1128 | u8 bir; | ||
1129 | |||
1130 | pci_read_config_word(dev, msi_control_reg(pos), | ||
1131 | &control); | ||
1132 | pci_read_config_dword(dev, msix_table_offset_reg(pos), | ||
1133 | &table_offset); | ||
1134 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | ||
1135 | phys_addr = pci_resource_start (dev, bir); | ||
1136 | phys_addr += (u32)(table_offset & | ||
1137 | ~PCI_MSIX_FLAGS_BIRMASK); | ||
1138 | iounmap(base); | ||
1139 | printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " | ||
1140 | "called without free_irq() on all MSI-X vectors\n", | ||
1141 | pci_name(dev)); | ||
1142 | BUG_ON(warning > 0); | ||
1143 | } | ||
1144 | dev->irq = temp; /* Restore IOAPIC IRQ */ | ||
1145 | } | ||
1146 | } | ||
1147 | |||
1148 | EXPORT_SYMBOL(pci_enable_msi); | ||
1149 | EXPORT_SYMBOL(pci_disable_msi); | ||
1150 | EXPORT_SYMBOL(pci_enable_msix); | ||
1151 | EXPORT_SYMBOL(pci_disable_msix); | ||