diff options
Diffstat (limited to 'arch/sparc64/kernel/pci_fire.c')
-rw-r--r-- | arch/sparc64/kernel/pci_fire.c | 380 |
1 files changed, 103 insertions, 277 deletions
diff --git a/arch/sparc64/kernel/pci_fire.c b/arch/sparc64/kernel/pci_fire.c index 090f26579678..bcf6a5d425ab 100644 --- a/arch/sparc64/kernel/pci_fire.c +++ b/arch/sparc64/kernel/pci_fire.c | |||
@@ -161,90 +161,92 @@ struct pci_msiq_entry { | |||
161 | #define MSI_64BIT_ADDR 0x034008UL | 161 | #define MSI_64BIT_ADDR 0x034008UL |
162 | #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL | 162 | #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL |
163 | 163 | ||
164 | /* For now this just runs as a pre-handler for the real interrupt handler. | 164 | static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
165 | * So we just walk through the queue and ACK all the entries, update the | 165 | unsigned long *head) |
166 | * head pointer, and return. | 166 | { |
167 | * | 167 | *head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); |
168 | * In the longer term it would be nice to do something more integrated | 168 | return 0; |
169 | * wherein we can pass in some of this MSI info to the drivers. This | 169 | } |
170 | * would be most useful for PCIe fabric error messages, although we could | 170 | |
171 | * invoke those directly from the loop here in order to pass the info around. | 171 | static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid, |
172 | */ | 172 | unsigned long *head, unsigned long *msi) |
173 | static void pci_msi_prehandler(unsigned int ino, void *data1, void *data2) | ||
174 | { | 173 | { |
175 | unsigned long msiqid, orig_head, head, type_fmt, type; | 174 | unsigned long type_fmt, type, msi_num; |
176 | struct pci_pbm_info *pbm = data1; | ||
177 | struct pci_msiq_entry *base, *ep; | 175 | struct pci_msiq_entry *base, *ep; |
178 | 176 | ||
179 | msiqid = (unsigned long) data2; | 177 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192)); |
178 | ep = &base[*head]; | ||
180 | 179 | ||
181 | head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); | 180 | if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0) |
181 | return 0; | ||
182 | 182 | ||
183 | orig_head = head; | 183 | type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >> |
184 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192)); | 184 | MSIQ_WORD0_FMT_TYPE_SHIFT); |
185 | ep = &base[head]; | 185 | type = (type_fmt >> 3); |
186 | while ((ep->word0 & MSIQ_WORD0_FMT_TYPE) != 0) { | 186 | if (unlikely(type != MSIQ_TYPE_MSI32 && |
187 | unsigned long msi_num; | 187 | type != MSIQ_TYPE_MSI64)) |
188 | 188 | return -EINVAL; | |
189 | type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >> | ||
190 | MSIQ_WORD0_FMT_TYPE_SHIFT); | ||
191 | type = (type_fmt >>3); | ||
192 | if (unlikely(type != MSIQ_TYPE_MSI32 && | ||
193 | type != MSIQ_TYPE_MSI64)) | ||
194 | goto bad_type; | ||
195 | |||
196 | msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >> | ||
197 | MSIQ_WORD0_DATA0_SHIFT); | ||
198 | |||
199 | fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num), | ||
200 | MSI_CLEAR_EQWR_N); | ||
201 | |||
202 | /* Clear the entry. */ | ||
203 | ep->word0 &= ~MSIQ_WORD0_FMT_TYPE; | ||
204 | |||
205 | /* Go to next entry in ring. */ | ||
206 | head++; | ||
207 | if (head >= pbm->msiq_ent_count) | ||
208 | head = 0; | ||
209 | ep = &base[head]; | ||
210 | } | ||
211 | 189 | ||
212 | if (likely(head != orig_head)) { | 190 | *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >> |
213 | /* ACK entries by updating head pointer. */ | 191 | MSIQ_WORD0_DATA0_SHIFT); |
214 | fire_write(pbm->pbm_regs + | ||
215 | EVENT_QUEUE_HEAD(msiqid), | ||
216 | head); | ||
217 | } | ||
218 | return; | ||
219 | 192 | ||
220 | bad_type: | 193 | fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num), |
221 | printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); | 194 | MSI_CLEAR_EQWR_N); |
222 | return; | 195 | |
196 | /* Clear the entry. */ | ||
197 | ep->word0 &= ~MSIQ_WORD0_FMT_TYPE; | ||
198 | |||
199 | /* Go to next entry in ring. */ | ||
200 | (*head)++; | ||
201 | if (*head >= pbm->msiq_ent_count) | ||
202 | *head = 0; | ||
203 | |||
204 | return 1; | ||
223 | } | 205 | } |
224 | 206 | ||
225 | static int msi_bitmap_alloc(struct pci_pbm_info *pbm) | 207 | static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
208 | unsigned long head) | ||
226 | { | 209 | { |
227 | unsigned long size, bits_per_ulong; | 210 | fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head); |
211 | return 0; | ||
212 | } | ||
228 | 213 | ||
229 | bits_per_ulong = sizeof(unsigned long) * 8; | 214 | static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, |
230 | size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); | 215 | unsigned long msi, int is_msi64) |
231 | size /= 8; | 216 | { |
232 | BUG_ON(size % sizeof(unsigned long)); | 217 | u64 val; |
233 | 218 | ||
234 | pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); | 219 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); |
235 | if (!pbm->msi_bitmap) | 220 | val &= ~(MSI_MAP_EQNUM); |
236 | return -ENOMEM; | 221 | val |= msiqid; |
222 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | ||
223 | |||
224 | fire_write(pbm->pbm_regs + MSI_CLEAR(msi), | ||
225 | MSI_CLEAR_EQWR_N); | ||
226 | |||
227 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); | ||
228 | val |= MSI_MAP_VALID; | ||
229 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | ||
237 | 230 | ||
238 | return 0; | 231 | return 0; |
239 | } | 232 | } |
240 | 233 | ||
241 | static void msi_bitmap_free(struct pci_pbm_info *pbm) | 234 | static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) |
242 | { | 235 | { |
243 | kfree(pbm->msi_bitmap); | 236 | unsigned long msiqid; |
244 | pbm->msi_bitmap = NULL; | 237 | u64 val; |
238 | |||
239 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); | ||
240 | msiqid = (val & MSI_MAP_EQNUM); | ||
241 | |||
242 | val &= ~MSI_MAP_VALID; | ||
243 | |||
244 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | ||
245 | |||
246 | return 0; | ||
245 | } | 247 | } |
246 | 248 | ||
247 | static int msi_queue_alloc(struct pci_pbm_info *pbm) | 249 | static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm) |
248 | { | 250 | { |
249 | unsigned long pages, order, i; | 251 | unsigned long pages, order, i; |
250 | 252 | ||
@@ -279,241 +281,65 @@ static int msi_queue_alloc(struct pci_pbm_info *pbm) | |||
279 | return 0; | 281 | return 0; |
280 | } | 282 | } |
281 | 283 | ||
282 | static int alloc_msi(struct pci_pbm_info *pbm) | 284 | static void pci_fire_msiq_free(struct pci_pbm_info *pbm) |
283 | { | 285 | { |
284 | int i; | 286 | unsigned long pages, order; |
285 | 287 | ||
286 | for (i = 0; i < pbm->msi_num; i++) { | 288 | order = get_order(512 * 1024); |
287 | if (!test_and_set_bit(i, pbm->msi_bitmap)) | 289 | pages = (unsigned long) pbm->msi_queues; |
288 | return i + pbm->msi_first; | ||
289 | } | ||
290 | 290 | ||
291 | return -ENOENT; | 291 | free_pages(pages, order); |
292 | } | ||
293 | 292 | ||
294 | static void free_msi(struct pci_pbm_info *pbm, int msi_num) | 293 | pbm->msi_queues = NULL; |
295 | { | ||
296 | msi_num -= pbm->msi_first; | ||
297 | clear_bit(msi_num, pbm->msi_bitmap); | ||
298 | } | 294 | } |
299 | 295 | ||
300 | static int pci_setup_msi_irq(unsigned int *virt_irq_p, | 296 | static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm, |
301 | struct pci_dev *pdev, | 297 | unsigned long msiqid, |
302 | struct msi_desc *entry) | 298 | unsigned long devino) |
303 | { | 299 | { |
304 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | 300 | unsigned long cregs = (unsigned long) pbm->pbm_regs; |
305 | unsigned long devino, msiqid, cregs, imap_off; | 301 | unsigned long imap_reg, iclr_reg, int_ctrlr; |
306 | struct msi_msg msg; | 302 | unsigned int virt_irq; |
307 | int msi_num, err; | 303 | int fixup; |
308 | u64 val; | 304 | u64 val; |
309 | 305 | ||
310 | *virt_irq_p = 0; | 306 | imap_reg = cregs + (0x001000UL + (devino * 0x08UL)); |
311 | 307 | iclr_reg = cregs + (0x001400UL + (devino * 0x08UL)); | |
312 | msi_num = alloc_msi(pbm); | ||
313 | if (msi_num < 0) | ||
314 | return msi_num; | ||
315 | |||
316 | cregs = (unsigned long) pbm->pbm_regs; | ||
317 | 308 | ||
318 | err = sun4u_build_msi(pbm->portid, virt_irq_p, | 309 | /* XXX iterate amongst the 4 IRQ controllers XXX */ |
319 | pbm->msiq_first_devino, | 310 | int_ctrlr = (1UL << 6); |
320 | (pbm->msiq_first_devino + | ||
321 | pbm->msiq_num), | ||
322 | cregs + 0x001000UL, | ||
323 | cregs + 0x001400UL); | ||
324 | if (err < 0) | ||
325 | goto out_err; | ||
326 | devino = err; | ||
327 | 311 | ||
328 | imap_off = 0x001000UL + (devino * 0x8UL); | 312 | val = fire_read(imap_reg); |
313 | val |= (1UL << 63) | int_ctrlr; | ||
314 | fire_write(imap_reg, val); | ||
329 | 315 | ||
330 | val = fire_read(pbm->pbm_regs + imap_off); | 316 | fixup = ((pbm->portid << 6) | devino) - int_ctrlr; |
331 | val |= (1UL << 63) | (1UL << 6); | ||
332 | fire_write(pbm->pbm_regs + imap_off, val); | ||
333 | 317 | ||
334 | msiqid = ((devino - pbm->msiq_first_devino) + | 318 | virt_irq = build_irq(fixup, iclr_reg, imap_reg); |
335 | pbm->msiq_first); | 319 | if (!virt_irq) |
320 | return -ENOMEM; | ||
336 | 321 | ||
337 | fire_write(pbm->pbm_regs + | 322 | fire_write(pbm->pbm_regs + |
338 | EVENT_QUEUE_CONTROL_SET(msiqid), | 323 | EVENT_QUEUE_CONTROL_SET(msiqid), |
339 | EVENT_QUEUE_CONTROL_SET_EN); | 324 | EVENT_QUEUE_CONTROL_SET_EN); |
340 | 325 | ||
341 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num)); | 326 | return virt_irq; |
342 | val &= ~(MSI_MAP_EQNUM); | ||
343 | val |= msiqid; | ||
344 | fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val); | ||
345 | |||
346 | fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num), | ||
347 | MSI_CLEAR_EQWR_N); | ||
348 | |||
349 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num)); | ||
350 | val |= MSI_MAP_VALID; | ||
351 | fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val); | ||
352 | |||
353 | sparc64_set_msi(*virt_irq_p, msi_num); | ||
354 | |||
355 | if (entry->msi_attrib.is_64) { | ||
356 | msg.address_hi = pbm->msi64_start >> 32; | ||
357 | msg.address_lo = pbm->msi64_start & 0xffffffff; | ||
358 | } else { | ||
359 | msg.address_hi = 0; | ||
360 | msg.address_lo = pbm->msi32_start; | ||
361 | } | ||
362 | msg.data = msi_num; | ||
363 | |||
364 | set_irq_msi(*virt_irq_p, entry); | ||
365 | write_msi_msg(*virt_irq_p, &msg); | ||
366 | |||
367 | irq_install_pre_handler(*virt_irq_p, | ||
368 | pci_msi_prehandler, | ||
369 | pbm, (void *) msiqid); | ||
370 | |||
371 | return 0; | ||
372 | |||
373 | out_err: | ||
374 | free_msi(pbm, msi_num); | ||
375 | return err; | ||
376 | } | 327 | } |
377 | 328 | ||
378 | static void pci_teardown_msi_irq(unsigned int virt_irq, | 329 | static const struct sparc64_msiq_ops pci_fire_msiq_ops = { |
379 | struct pci_dev *pdev) | 330 | .get_head = pci_fire_get_head, |
380 | { | 331 | .dequeue_msi = pci_fire_dequeue_msi, |
381 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | 332 | .set_head = pci_fire_set_head, |
382 | unsigned long msiqid, msi_num; | 333 | .msi_setup = pci_fire_msi_setup, |
383 | u64 val; | 334 | .msi_teardown = pci_fire_msi_teardown, |
384 | 335 | .msiq_alloc = pci_fire_msiq_alloc, | |
385 | msi_num = sparc64_get_msi(virt_irq); | 336 | .msiq_free = pci_fire_msiq_free, |
386 | 337 | .msiq_build_irq = pci_fire_msiq_build_irq, | |
387 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num)); | 338 | }; |
388 | |||
389 | msiqid = (val & MSI_MAP_EQNUM); | ||
390 | |||
391 | val &= ~MSI_MAP_VALID; | ||
392 | fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val); | ||
393 | |||
394 | fire_write(pbm->pbm_regs + EVENT_QUEUE_CONTROL_CLEAR(msiqid), | ||
395 | EVENT_QUEUE_CONTROL_CLEAR_DIS); | ||
396 | |||
397 | free_msi(pbm, msi_num); | ||
398 | |||
399 | /* The sun4u_destroy_msi() will liberate the devino and thus the MSIQ | ||
400 | * allocation. | ||
401 | */ | ||
402 | sun4u_destroy_msi(virt_irq); | ||
403 | } | ||
404 | 339 | ||
405 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) | 340 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) |
406 | { | 341 | { |
407 | const u32 *val; | 342 | sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops); |
408 | int len; | ||
409 | |||
410 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); | ||
411 | if (!val || len != 4) | ||
412 | goto no_msi; | ||
413 | pbm->msiq_num = *val; | ||
414 | if (pbm->msiq_num) { | ||
415 | const struct msiq_prop { | ||
416 | u32 first_msiq; | ||
417 | u32 num_msiq; | ||
418 | u32 first_devino; | ||
419 | } *mqp; | ||
420 | const struct msi_range_prop { | ||
421 | u32 first_msi; | ||
422 | u32 num_msi; | ||
423 | } *mrng; | ||
424 | const struct addr_range_prop { | ||
425 | u32 msi32_high; | ||
426 | u32 msi32_low; | ||
427 | u32 msi32_len; | ||
428 | u32 msi64_high; | ||
429 | u32 msi64_low; | ||
430 | u32 msi64_len; | ||
431 | } *arng; | ||
432 | |||
433 | val = of_get_property(pbm->prom_node, "msi-eq-size", &len); | ||
434 | if (!val || len != 4) | ||
435 | goto no_msi; | ||
436 | |||
437 | pbm->msiq_ent_count = *val; | ||
438 | |||
439 | mqp = of_get_property(pbm->prom_node, | ||
440 | "msi-eq-to-devino", &len); | ||
441 | if (!mqp) | ||
442 | mqp = of_get_property(pbm->prom_node, | ||
443 | "msi-eq-devino", &len); | ||
444 | if (!mqp || len != sizeof(struct msiq_prop)) | ||
445 | goto no_msi; | ||
446 | |||
447 | pbm->msiq_first = mqp->first_msiq; | ||
448 | pbm->msiq_first_devino = mqp->first_devino; | ||
449 | |||
450 | val = of_get_property(pbm->prom_node, "#msi", &len); | ||
451 | if (!val || len != 4) | ||
452 | goto no_msi; | ||
453 | pbm->msi_num = *val; | ||
454 | |||
455 | mrng = of_get_property(pbm->prom_node, "msi-ranges", &len); | ||
456 | if (!mrng || len != sizeof(struct msi_range_prop)) | ||
457 | goto no_msi; | ||
458 | pbm->msi_first = mrng->first_msi; | ||
459 | |||
460 | val = of_get_property(pbm->prom_node, "msi-data-mask", &len); | ||
461 | if (!val || len != 4) | ||
462 | goto no_msi; | ||
463 | pbm->msi_data_mask = *val; | ||
464 | |||
465 | val = of_get_property(pbm->prom_node, "msix-data-width", &len); | ||
466 | if (!val || len != 4) | ||
467 | goto no_msi; | ||
468 | pbm->msix_data_width = *val; | ||
469 | |||
470 | arng = of_get_property(pbm->prom_node, "msi-address-ranges", | ||
471 | &len); | ||
472 | if (!arng || len != sizeof(struct addr_range_prop)) | ||
473 | goto no_msi; | ||
474 | pbm->msi32_start = ((u64)arng->msi32_high << 32) | | ||
475 | (u64) arng->msi32_low; | ||
476 | pbm->msi64_start = ((u64)arng->msi64_high << 32) | | ||
477 | (u64) arng->msi64_low; | ||
478 | pbm->msi32_len = arng->msi32_len; | ||
479 | pbm->msi64_len = arng->msi64_len; | ||
480 | |||
481 | if (msi_bitmap_alloc(pbm)) | ||
482 | goto no_msi; | ||
483 | |||
484 | if (msi_queue_alloc(pbm)) { | ||
485 | msi_bitmap_free(pbm); | ||
486 | goto no_msi; | ||
487 | } | ||
488 | |||
489 | printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " | ||
490 | "devino[0x%x]\n", | ||
491 | pbm->name, | ||
492 | pbm->msiq_first, pbm->msiq_num, | ||
493 | pbm->msiq_ent_count, | ||
494 | pbm->msiq_first_devino); | ||
495 | printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " | ||
496 | "width[%u]\n", | ||
497 | pbm->name, | ||
498 | pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, | ||
499 | pbm->msix_data_width); | ||
500 | printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] " | ||
501 | "addr64[0x%lx:0x%x]\n", | ||
502 | pbm->name, | ||
503 | pbm->msi32_start, pbm->msi32_len, | ||
504 | pbm->msi64_start, pbm->msi64_len); | ||
505 | printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n", | ||
506 | pbm->name, | ||
507 | __pa(pbm->msi_queues)); | ||
508 | } | ||
509 | pbm->setup_msi_irq = pci_setup_msi_irq; | ||
510 | pbm->teardown_msi_irq = pci_teardown_msi_irq; | ||
511 | |||
512 | return; | ||
513 | |||
514 | no_msi: | ||
515 | pbm->msiq_num = 0; | ||
516 | printk(KERN_INFO "%s: No MSI support.\n", pbm->name); | ||
517 | } | 343 | } |
518 | #else /* CONFIG_PCI_MSI */ | 344 | #else /* CONFIG_PCI_MSI */ |
519 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) | 345 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) |