diff options
author | Yinghai Lu <yinghai@kernel.org> | 2008-12-05 21:58:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-08 08:31:51 -0500 |
commit | 0b8f1efad30bd58f89961b82dfe68b9edf8fd2ac (patch) | |
tree | 239251bad791fd60af8c0f2ba365b7188395c83f /drivers/pci | |
parent | 218d11a8b071b23b76c484fd5f72a4fe3306801e (diff) |
sparse irq_desc[] array: core kernel and x86 changes
Impact: new feature
Problem on distro kernels: irq_desc[NR_IRQS] takes megabytes of RAM with
NR_CPUS set to large values. The goal is to be able to scale up to much
larger NR_IRQS value without impacting the (important) common case.
To solve this, we generalize irq_desc[NR_IRQS] to an (optional) array of
irq_desc pointers.
When CONFIG_SPARSE_IRQ=y is used, we use kzalloc_node to get irq_desc,
this also makes the IRQ descriptors NUMA-local (to the site that calls
request_irq()).
This gets rid of the irq_cfg[] static array on x86 as well: irq_cfg now
uses desc->chip_data for x86 to store irq_cfg.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/intr_remapping.c | 76 |
1 files changed, 74 insertions, 2 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 2de5a3238c94..c9958ec5e25e 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -19,17 +19,75 @@ struct irq_2_iommu { | |||
19 | u8 irte_mask; | 19 | u8 irte_mask; |
20 | }; | 20 | }; |
21 | 21 | ||
22 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | 22 | #ifdef CONFIG_SPARSE_IRQ |
23 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) | ||
24 | { | ||
25 | struct irq_2_iommu *iommu; | ||
26 | int node; | ||
27 | |||
28 | node = cpu_to_node(cpu); | ||
29 | |||
30 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | ||
31 | printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); | ||
32 | |||
33 | return iommu; | ||
34 | } | ||
23 | 35 | ||
24 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 36 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
25 | { | 37 | { |
26 | return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; | 38 | struct irq_desc *desc; |
39 | |||
40 | desc = irq_to_desc(irq); | ||
41 | |||
42 | if (WARN_ON_ONCE(!desc)) | ||
43 | return NULL; | ||
44 | |||
45 | return desc->irq_2_iommu; | ||
46 | } | ||
47 | |||
48 | static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) | ||
49 | { | ||
50 | struct irq_desc *desc; | ||
51 | struct irq_2_iommu *irq_iommu; | ||
52 | |||
53 | /* | ||
54 | * alloc irq desc if not allocated already. | ||
55 | */ | ||
56 | desc = irq_to_desc_alloc_cpu(irq, cpu); | ||
57 | if (!desc) { | ||
58 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
59 | return NULL; | ||
60 | } | ||
61 | |||
62 | irq_iommu = desc->irq_2_iommu; | ||
63 | |||
64 | if (!irq_iommu) | ||
65 | desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); | ||
66 | |||
67 | return desc->irq_2_iommu; | ||
27 | } | 68 | } |
28 | 69 | ||
29 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | 70 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
30 | { | 71 | { |
72 | return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); | ||
73 | } | ||
74 | |||
75 | #else /* !CONFIG_SPARSE_IRQ */ | ||
76 | |||
77 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
78 | |||
79 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
80 | { | ||
81 | if (irq < nr_irqs) | ||
82 | return &irq_2_iommuX[irq]; | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
87 | { | ||
31 | return irq_2_iommu(irq); | 88 | return irq_2_iommu(irq); |
32 | } | 89 | } |
90 | #endif | ||
33 | 91 | ||
34 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 92 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
35 | 93 | ||
@@ -86,9 +144,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
86 | if (!count) | 144 | if (!count) |
87 | return -1; | 145 | return -1; |
88 | 146 | ||
147 | #ifndef CONFIG_SPARSE_IRQ | ||
89 | /* protect irq_2_iommu_alloc later */ | 148 | /* protect irq_2_iommu_alloc later */ |
90 | if (irq >= nr_irqs) | 149 | if (irq >= nr_irqs) |
91 | return -1; | 150 | return -1; |
151 | #endif | ||
92 | 152 | ||
93 | /* | 153 | /* |
94 | * start the IRTE search from index 0. | 154 | * start the IRTE search from index 0. |
@@ -130,6 +190,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
130 | table->base[i].present = 1; | 190 | table->base[i].present = 1; |
131 | 191 | ||
132 | irq_iommu = irq_2_iommu_alloc(irq); | 192 | irq_iommu = irq_2_iommu_alloc(irq); |
193 | if (!irq_iommu) { | ||
194 | spin_unlock(&irq_2_ir_lock); | ||
195 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
196 | return -1; | ||
197 | } | ||
198 | |||
133 | irq_iommu->iommu = iommu; | 199 | irq_iommu->iommu = iommu; |
134 | irq_iommu->irte_index = index; | 200 | irq_iommu->irte_index = index; |
135 | irq_iommu->sub_handle = 0; | 201 | irq_iommu->sub_handle = 0; |
@@ -177,6 +243,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
177 | 243 | ||
178 | irq_iommu = irq_2_iommu_alloc(irq); | 244 | irq_iommu = irq_2_iommu_alloc(irq); |
179 | 245 | ||
246 | if (!irq_iommu) { | ||
247 | spin_unlock(&irq_2_ir_lock); | ||
248 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
249 | return -1; | ||
250 | } | ||
251 | |||
180 | irq_iommu->iommu = iommu; | 252 | irq_iommu->iommu = iommu; |
181 | irq_iommu->irte_index = index; | 253 | irq_iommu->irte_index = index; |
182 | irq_iommu->sub_handle = subhandle; | 254 | irq_iommu->sub_handle = subhandle; |