diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-13 06:25:51 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-13 06:25:51 -0500 |
commit | 968ea6d80e395cf11a51143cfa1b9a14ada676df (patch) | |
tree | dc2acec8c9bdced33afe1e273ee5e0b0b93d2703 /drivers/pci | |
parent | 7be7585393d311866653564fbcd10a3232773c0b (diff) | |
parent | 8299608f140ae321e4eb5d1306184265d2b9511e (diff) |
Merge ../linux-2.6-x86
Conflicts:
arch/x86/kernel/io_apic.c
kernel/sched.c
kernel/sched_stats.h
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/intr_remapping.c | 76 | ||||
-rw-r--r-- | drivers/pci/msi.c | 55 |
2 files changed, 112 insertions, 19 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 2de5a3238c94..c9958ec5e25e 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -19,17 +19,75 @@ struct irq_2_iommu { | |||
19 | u8 irte_mask; | 19 | u8 irte_mask; |
20 | }; | 20 | }; |
21 | 21 | ||
22 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | 22 | #ifdef CONFIG_SPARSE_IRQ |
23 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) | ||
24 | { | ||
25 | struct irq_2_iommu *iommu; | ||
26 | int node; | ||
27 | |||
28 | node = cpu_to_node(cpu); | ||
29 | |||
30 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | ||
31 | printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); | ||
32 | |||
33 | return iommu; | ||
34 | } | ||
23 | 35 | ||
24 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 36 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
25 | { | 37 | { |
26 | return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; | 38 | struct irq_desc *desc; |
39 | |||
40 | desc = irq_to_desc(irq); | ||
41 | |||
42 | if (WARN_ON_ONCE(!desc)) | ||
43 | return NULL; | ||
44 | |||
45 | return desc->irq_2_iommu; | ||
46 | } | ||
47 | |||
48 | static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) | ||
49 | { | ||
50 | struct irq_desc *desc; | ||
51 | struct irq_2_iommu *irq_iommu; | ||
52 | |||
53 | /* | ||
54 | * alloc irq desc if not allocated already. | ||
55 | */ | ||
56 | desc = irq_to_desc_alloc_cpu(irq, cpu); | ||
57 | if (!desc) { | ||
58 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
59 | return NULL; | ||
60 | } | ||
61 | |||
62 | irq_iommu = desc->irq_2_iommu; | ||
63 | |||
64 | if (!irq_iommu) | ||
65 | desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); | ||
66 | |||
67 | return desc->irq_2_iommu; | ||
27 | } | 68 | } |
28 | 69 | ||
29 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | 70 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
30 | { | 71 | { |
72 | return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); | ||
73 | } | ||
74 | |||
75 | #else /* !CONFIG_SPARSE_IRQ */ | ||
76 | |||
77 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
78 | |||
79 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
80 | { | ||
81 | if (irq < nr_irqs) | ||
82 | return &irq_2_iommuX[irq]; | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
87 | { | ||
31 | return irq_2_iommu(irq); | 88 | return irq_2_iommu(irq); |
32 | } | 89 | } |
90 | #endif | ||
33 | 91 | ||
34 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 92 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
35 | 93 | ||
@@ -86,9 +144,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
86 | if (!count) | 144 | if (!count) |
87 | return -1; | 145 | return -1; |
88 | 146 | ||
147 | #ifndef CONFIG_SPARSE_IRQ | ||
89 | /* protect irq_2_iommu_alloc later */ | 148 | /* protect irq_2_iommu_alloc later */ |
90 | if (irq >= nr_irqs) | 149 | if (irq >= nr_irqs) |
91 | return -1; | 150 | return -1; |
151 | #endif | ||
92 | 152 | ||
93 | /* | 153 | /* |
94 | * start the IRTE search from index 0. | 154 | * start the IRTE search from index 0. |
@@ -130,6 +190,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
130 | table->base[i].present = 1; | 190 | table->base[i].present = 1; |
131 | 191 | ||
132 | irq_iommu = irq_2_iommu_alloc(irq); | 192 | irq_iommu = irq_2_iommu_alloc(irq); |
193 | if (!irq_iommu) { | ||
194 | spin_unlock(&irq_2_ir_lock); | ||
195 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
196 | return -1; | ||
197 | } | ||
198 | |||
133 | irq_iommu->iommu = iommu; | 199 | irq_iommu->iommu = iommu; |
134 | irq_iommu->irte_index = index; | 200 | irq_iommu->irte_index = index; |
135 | irq_iommu->sub_handle = 0; | 201 | irq_iommu->sub_handle = 0; |
@@ -177,6 +243,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
177 | 243 | ||
178 | irq_iommu = irq_2_iommu_alloc(irq); | 244 | irq_iommu = irq_2_iommu_alloc(irq); |
179 | 245 | ||
246 | if (!irq_iommu) { | ||
247 | spin_unlock(&irq_2_ir_lock); | ||
248 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
249 | return -1; | ||
250 | } | ||
251 | |||
180 | irq_iommu->iommu = iommu; | 252 | irq_iommu->iommu = iommu; |
181 | irq_iommu->irte_index = index; | 253 | irq_iommu->irte_index = index; |
182 | irq_iommu->sub_handle = subhandle; | 254 | irq_iommu->sub_handle = subhandle; |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 74801f7df9c9..11a51f8ed3b3 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -103,11 +103,11 @@ static void msix_set_enable(struct pci_dev *dev, int enable) | |||
103 | } | 103 | } |
104 | } | 104 | } |
105 | 105 | ||
106 | static void msix_flush_writes(unsigned int irq) | 106 | static void msix_flush_writes(struct irq_desc *desc) |
107 | { | 107 | { |
108 | struct msi_desc *entry; | 108 | struct msi_desc *entry; |
109 | 109 | ||
110 | entry = get_irq_msi(irq); | 110 | entry = get_irq_desc_msi(desc); |
111 | BUG_ON(!entry || !entry->dev); | 111 | BUG_ON(!entry || !entry->dev); |
112 | switch (entry->msi_attrib.type) { | 112 | switch (entry->msi_attrib.type) { |
113 | case PCI_CAP_ID_MSI: | 113 | case PCI_CAP_ID_MSI: |
@@ -135,11 +135,11 @@ static void msix_flush_writes(unsigned int irq) | |||
135 | * Returns 1 if it succeeded in masking the interrupt and 0 if the device | 135 | * Returns 1 if it succeeded in masking the interrupt and 0 if the device |
136 | * doesn't support MSI masking. | 136 | * doesn't support MSI masking. |
137 | */ | 137 | */ |
138 | static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) | 138 | static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) |
139 | { | 139 | { |
140 | struct msi_desc *entry; | 140 | struct msi_desc *entry; |
141 | 141 | ||
142 | entry = get_irq_msi(irq); | 142 | entry = get_irq_desc_msi(desc); |
143 | BUG_ON(!entry || !entry->dev); | 143 | BUG_ON(!entry || !entry->dev); |
144 | switch (entry->msi_attrib.type) { | 144 | switch (entry->msi_attrib.type) { |
145 | case PCI_CAP_ID_MSI: | 145 | case PCI_CAP_ID_MSI: |
@@ -172,9 +172,9 @@ static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) | |||
172 | return 1; | 172 | return 1; |
173 | } | 173 | } |
174 | 174 | ||
175 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 175 | void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) |
176 | { | 176 | { |
177 | struct msi_desc *entry = get_irq_msi(irq); | 177 | struct msi_desc *entry = get_irq_desc_msi(desc); |
178 | switch(entry->msi_attrib.type) { | 178 | switch(entry->msi_attrib.type) { |
179 | case PCI_CAP_ID_MSI: | 179 | case PCI_CAP_ID_MSI: |
180 | { | 180 | { |
@@ -211,9 +211,16 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg) | |||
211 | } | 211 | } |
212 | } | 212 | } |
213 | 213 | ||
214 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 214 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
215 | { | 215 | { |
216 | struct msi_desc *entry = get_irq_msi(irq); | 216 | struct irq_desc *desc = irq_to_desc(irq); |
217 | |||
218 | read_msi_msg_desc(desc, msg); | ||
219 | } | ||
220 | |||
221 | void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | ||
222 | { | ||
223 | struct msi_desc *entry = get_irq_desc_msi(desc); | ||
217 | switch (entry->msi_attrib.type) { | 224 | switch (entry->msi_attrib.type) { |
218 | case PCI_CAP_ID_MSI: | 225 | case PCI_CAP_ID_MSI: |
219 | { | 226 | { |
@@ -252,21 +259,31 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg) | |||
252 | entry->msg = *msg; | 259 | entry->msg = *msg; |
253 | } | 260 | } |
254 | 261 | ||
262 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | ||
263 | { | ||
264 | struct irq_desc *desc = irq_to_desc(irq); | ||
265 | |||
266 | write_msi_msg_desc(desc, msg); | ||
267 | } | ||
268 | |||
255 | void mask_msi_irq(unsigned int irq) | 269 | void mask_msi_irq(unsigned int irq) |
256 | { | 270 | { |
257 | msi_set_mask_bits(irq, 1, 1); | 271 | struct irq_desc *desc = irq_to_desc(irq); |
258 | msix_flush_writes(irq); | 272 | |
273 | msi_set_mask_bits(desc, 1, 1); | ||
274 | msix_flush_writes(desc); | ||
259 | } | 275 | } |
260 | 276 | ||
261 | void unmask_msi_irq(unsigned int irq) | 277 | void unmask_msi_irq(unsigned int irq) |
262 | { | 278 | { |
263 | msi_set_mask_bits(irq, 1, 0); | 279 | struct irq_desc *desc = irq_to_desc(irq); |
264 | msix_flush_writes(irq); | 280 | |
281 | msi_set_mask_bits(desc, 1, 0); | ||
282 | msix_flush_writes(desc); | ||
265 | } | 283 | } |
266 | 284 | ||
267 | static int msi_free_irqs(struct pci_dev* dev); | 285 | static int msi_free_irqs(struct pci_dev* dev); |
268 | 286 | ||
269 | |||
270 | static struct msi_desc* alloc_msi_entry(void) | 287 | static struct msi_desc* alloc_msi_entry(void) |
271 | { | 288 | { |
272 | struct msi_desc *entry; | 289 | struct msi_desc *entry; |
@@ -303,9 +320,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
303 | pci_intx_for_msi(dev, 0); | 320 | pci_intx_for_msi(dev, 0); |
304 | msi_set_enable(dev, 0); | 321 | msi_set_enable(dev, 0); |
305 | write_msi_msg(dev->irq, &entry->msg); | 322 | write_msi_msg(dev->irq, &entry->msg); |
306 | if (entry->msi_attrib.maskbit) | 323 | if (entry->msi_attrib.maskbit) { |
307 | msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask, | 324 | struct irq_desc *desc = irq_to_desc(dev->irq); |
325 | msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, | ||
308 | entry->msi_attrib.masked); | 326 | entry->msi_attrib.masked); |
327 | } | ||
309 | 328 | ||
310 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | 329 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); |
311 | control &= ~PCI_MSI_FLAGS_QSIZE; | 330 | control &= ~PCI_MSI_FLAGS_QSIZE; |
@@ -327,8 +346,9 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
327 | msix_set_enable(dev, 0); | 346 | msix_set_enable(dev, 0); |
328 | 347 | ||
329 | list_for_each_entry(entry, &dev->msi_list, list) { | 348 | list_for_each_entry(entry, &dev->msi_list, list) { |
349 | struct irq_desc *desc = irq_to_desc(entry->irq); | ||
330 | write_msi_msg(entry->irq, &entry->msg); | 350 | write_msi_msg(entry->irq, &entry->msg); |
331 | msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked); | 351 | msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); |
332 | } | 352 | } |
333 | 353 | ||
334 | BUG_ON(list_empty(&dev->msi_list)); | 354 | BUG_ON(list_empty(&dev->msi_list)); |
@@ -596,7 +616,8 @@ void pci_msi_shutdown(struct pci_dev* dev) | |||
596 | /* Return the the pci reset with msi irqs unmasked */ | 616 | /* Return the the pci reset with msi irqs unmasked */ |
597 | if (entry->msi_attrib.maskbit) { | 617 | if (entry->msi_attrib.maskbit) { |
598 | u32 mask = entry->msi_attrib.maskbits_mask; | 618 | u32 mask = entry->msi_attrib.maskbits_mask; |
599 | msi_set_mask_bits(dev->irq, mask, ~mask); | 619 | struct irq_desc *desc = irq_to_desc(dev->irq); |
620 | msi_set_mask_bits(desc, mask, ~mask); | ||
600 | } | 621 | } |
601 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) | 622 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) |
602 | return; | 623 | return; |