diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2008-07-10 14:16:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-12 02:44:54 -0400 |
commit | b6fcb33ad6c05f152a672f7c96c1fab006527b80 (patch) | |
tree | 9926a4914b7d929f31794315dc21768f38c3628e /drivers/pci | |
parent | 2ae21010694e56461a63bfc80e960090ce0a5ed9 (diff) |
x64, x2apic/intr-remap: routines managing Interrupt remapping table entries.
Routines handling the management of interrupt remapping table entries.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: akpm@linux-foundation.org
Cc: arjan@linux.intel.com
Cc: andi@firstfloor.org
Cc: ebiederm@xmission.com
Cc: jbarnes@virtuousgeek.org
Cc: steiner@sgi.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/intel-iommu.h | 4 | ||||
-rw-r--r-- | drivers/pci/intr_remapping.c | 243 |
2 files changed, 247 insertions, 0 deletions
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h index a81a74e2bd9e..2142c01e0143 100644 --- a/drivers/pci/intel-iommu.h +++ b/drivers/pci/intel-iommu.h | |||
@@ -123,6 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
123 | #define ecap_qis(e) ((e) & 0x2) | 123 | #define ecap_qis(e) ((e) & 0x2) |
124 | #define ecap_eim_support(e) ((e >> 4) & 0x1) | 124 | #define ecap_eim_support(e) ((e >> 4) & 0x1) |
125 | #define ecap_ir_support(e) ((e >> 3) & 0x1) | 125 | #define ecap_ir_support(e) ((e >> 3) & 0x1) |
126 | #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) | ||
126 | 127 | ||
127 | 128 | ||
128 | /* IOTLB_REG */ | 129 | /* IOTLB_REG */ |
@@ -255,6 +256,8 @@ struct q_inval { | |||
255 | #define INTR_REMAP_PAGE_ORDER 8 | 256 | #define INTR_REMAP_PAGE_ORDER 8 |
256 | #define INTR_REMAP_TABLE_REG_SIZE 0xf | 257 | #define INTR_REMAP_TABLE_REG_SIZE 0xf |
257 | 258 | ||
259 | #define INTR_REMAP_TABLE_ENTRIES 65536 | ||
260 | |||
258 | struct ir_table { | 261 | struct ir_table { |
259 | struct irte *base; | 262 | struct irte *base; |
260 | }; | 263 | }; |
@@ -300,4 +303,5 @@ extern void free_iommu(struct intel_iommu *iommu); | |||
300 | extern int dmar_enable_qi(struct intel_iommu *iommu); | 303 | extern int dmar_enable_qi(struct intel_iommu *iommu); |
301 | extern void qi_global_iec(struct intel_iommu *iommu); | 304 | extern void qi_global_iec(struct intel_iommu *iommu); |
302 | 305 | ||
306 | extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | ||
303 | #endif | 307 | #endif |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 3d10cdc90314..bddb4b19b6c7 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
3 | #include <linux/jiffies.h> | 3 | #include <linux/jiffies.h> |
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | #include <linux/irq.h> | ||
5 | #include <asm/io_apic.h> | 6 | #include <asm/io_apic.h> |
6 | #include "intel-iommu.h" | 7 | #include "intel-iommu.h" |
7 | #include "intr_remapping.h" | 8 | #include "intr_remapping.h" |
@@ -10,6 +11,248 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |||
10 | static int ir_ioapic_num; | 11 | static int ir_ioapic_num; |
11 | int intr_remapping_enabled; | 12 | int intr_remapping_enabled; |
12 | 13 | ||
14 | static struct { | ||
15 | struct intel_iommu *iommu; | ||
16 | u16 irte_index; | ||
17 | u16 sub_handle; | ||
18 | u8 irte_mask; | ||
19 | } irq_2_iommu[NR_IRQS]; | ||
20 | |||
21 | static DEFINE_SPINLOCK(irq_2_ir_lock); | ||
22 | |||
23 | int irq_remapped(int irq) | ||
24 | { | ||
25 | if (irq > NR_IRQS) | ||
26 | return 0; | ||
27 | |||
28 | if (!irq_2_iommu[irq].iommu) | ||
29 | return 0; | ||
30 | |||
31 | return 1; | ||
32 | } | ||
33 | |||
34 | int get_irte(int irq, struct irte *entry) | ||
35 | { | ||
36 | int index; | ||
37 | |||
38 | if (!entry || irq > NR_IRQS) | ||
39 | return -1; | ||
40 | |||
41 | spin_lock(&irq_2_ir_lock); | ||
42 | if (!irq_2_iommu[irq].iommu) { | ||
43 | spin_unlock(&irq_2_ir_lock); | ||
44 | return -1; | ||
45 | } | ||
46 | |||
47 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | ||
48 | *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index); | ||
49 | |||
50 | spin_unlock(&irq_2_ir_lock); | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | ||
55 | { | ||
56 | struct ir_table *table = iommu->ir_table; | ||
57 | u16 index, start_index; | ||
58 | unsigned int mask = 0; | ||
59 | int i; | ||
60 | |||
61 | if (!count) | ||
62 | return -1; | ||
63 | |||
64 | /* | ||
65 | * start the IRTE search from index 0. | ||
66 | */ | ||
67 | index = start_index = 0; | ||
68 | |||
69 | if (count > 1) { | ||
70 | count = __roundup_pow_of_two(count); | ||
71 | mask = ilog2(count); | ||
72 | } | ||
73 | |||
74 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | ||
75 | printk(KERN_ERR | ||
76 | "Requested mask %x exceeds the max invalidation handle" | ||
77 | " mask value %Lx\n", mask, | ||
78 | ecap_max_handle_mask(iommu->ecap)); | ||
79 | return -1; | ||
80 | } | ||
81 | |||
82 | spin_lock(&irq_2_ir_lock); | ||
83 | do { | ||
84 | for (i = index; i < index + count; i++) | ||
85 | if (table->base[i].present) | ||
86 | break; | ||
87 | /* empty index found */ | ||
88 | if (i == index + count) | ||
89 | break; | ||
90 | |||
91 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | ||
92 | |||
93 | if (index == start_index) { | ||
94 | spin_unlock(&irq_2_ir_lock); | ||
95 | printk(KERN_ERR "can't allocate an IRTE\n"); | ||
96 | return -1; | ||
97 | } | ||
98 | } while (1); | ||
99 | |||
100 | for (i = index; i < index + count; i++) | ||
101 | table->base[i].present = 1; | ||
102 | |||
103 | irq_2_iommu[irq].iommu = iommu; | ||
104 | irq_2_iommu[irq].irte_index = index; | ||
105 | irq_2_iommu[irq].sub_handle = 0; | ||
106 | irq_2_iommu[irq].irte_mask = mask; | ||
107 | |||
108 | spin_unlock(&irq_2_ir_lock); | ||
109 | |||
110 | return index; | ||
111 | } | ||
112 | |||
113 | static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | ||
114 | { | ||
115 | struct qi_desc desc; | ||
116 | |||
117 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | ||
118 | | QI_IEC_SELECTIVE; | ||
119 | desc.high = 0; | ||
120 | |||
121 | qi_submit_sync(&desc, iommu); | ||
122 | } | ||
123 | |||
124 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | ||
125 | { | ||
126 | int index; | ||
127 | |||
128 | spin_lock(&irq_2_ir_lock); | ||
129 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
130 | spin_unlock(&irq_2_ir_lock); | ||
131 | return -1; | ||
132 | } | ||
133 | |||
134 | *sub_handle = irq_2_iommu[irq].sub_handle; | ||
135 | index = irq_2_iommu[irq].irte_index; | ||
136 | spin_unlock(&irq_2_ir_lock); | ||
137 | return index; | ||
138 | } | ||
139 | |||
140 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | ||
141 | { | ||
142 | spin_lock(&irq_2_ir_lock); | ||
143 | if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) { | ||
144 | spin_unlock(&irq_2_ir_lock); | ||
145 | return -1; | ||
146 | } | ||
147 | |||
148 | irq_2_iommu[irq].iommu = iommu; | ||
149 | irq_2_iommu[irq].irte_index = index; | ||
150 | irq_2_iommu[irq].sub_handle = subhandle; | ||
151 | irq_2_iommu[irq].irte_mask = 0; | ||
152 | |||
153 | spin_unlock(&irq_2_ir_lock); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | ||
159 | { | ||
160 | spin_lock(&irq_2_ir_lock); | ||
161 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
162 | spin_unlock(&irq_2_ir_lock); | ||
163 | return -1; | ||
164 | } | ||
165 | |||
166 | irq_2_iommu[irq].iommu = NULL; | ||
167 | irq_2_iommu[irq].irte_index = 0; | ||
168 | irq_2_iommu[irq].sub_handle = 0; | ||
169 | irq_2_iommu[irq].irte_mask = 0; | ||
170 | |||
171 | spin_unlock(&irq_2_ir_lock); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | int modify_irte(int irq, struct irte *irte_modified) | ||
177 | { | ||
178 | int index; | ||
179 | struct irte *irte; | ||
180 | struct intel_iommu *iommu; | ||
181 | |||
182 | spin_lock(&irq_2_ir_lock); | ||
183 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
184 | spin_unlock(&irq_2_ir_lock); | ||
185 | return -1; | ||
186 | } | ||
187 | |||
188 | iommu = irq_2_iommu[irq].iommu; | ||
189 | |||
190 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | ||
191 | irte = &iommu->ir_table->base[index]; | ||
192 | |||
193 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); | ||
194 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | ||
195 | |||
196 | qi_flush_iec(iommu, index, 0); | ||
197 | |||
198 | spin_unlock(&irq_2_ir_lock); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | int flush_irte(int irq) | ||
203 | { | ||
204 | int index; | ||
205 | struct intel_iommu *iommu; | ||
206 | |||
207 | spin_lock(&irq_2_ir_lock); | ||
208 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
209 | spin_unlock(&irq_2_ir_lock); | ||
210 | return -1; | ||
211 | } | ||
212 | |||
213 | iommu = irq_2_iommu[irq].iommu; | ||
214 | |||
215 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | ||
216 | |||
217 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | ||
218 | spin_unlock(&irq_2_ir_lock); | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | int free_irte(int irq) | ||
224 | { | ||
225 | int index, i; | ||
226 | struct irte *irte; | ||
227 | struct intel_iommu *iommu; | ||
228 | |||
229 | spin_lock(&irq_2_ir_lock); | ||
230 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
231 | spin_unlock(&irq_2_ir_lock); | ||
232 | return -1; | ||
233 | } | ||
234 | |||
235 | iommu = irq_2_iommu[irq].iommu; | ||
236 | |||
237 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | ||
238 | irte = &iommu->ir_table->base[index]; | ||
239 | |||
240 | if (!irq_2_iommu[irq].sub_handle) { | ||
241 | for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++) | ||
242 | set_64bit((unsigned long *)irte, 0); | ||
243 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | ||
244 | } | ||
245 | |||
246 | irq_2_iommu[irq].iommu = NULL; | ||
247 | irq_2_iommu[irq].irte_index = 0; | ||
248 | irq_2_iommu[irq].sub_handle = 0; | ||
249 | irq_2_iommu[irq].irte_mask = 0; | ||
250 | |||
251 | spin_unlock(&irq_2_ir_lock); | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | |||
13 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | 256 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
14 | { | 257 | { |
15 | u64 addr; | 258 | u64 addr; |