diff options
author | Hoang-Nam Nguyen <hnguyen@linux.vnet.ibm.com> | 2007-02-15 11:06:33 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-02-16 16:57:34 -0500 |
commit | 78d8d5f9ef8d6179e92b94481cfdfc45d396992f (patch) | |
tree | 55ee3cecd4c0d59f418b59870cec2ac33b1b70e7 /drivers/infiniband/hw/ehca/ehca_irq.c | |
parent | 551fd6122d247d76124c4fdb6eb898cc8e3d74aa (diff) |
IB/ehca: Rework irq handler
Rework ehca interrupt handling to avoid/reduce missed irq events.
Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_irq.c')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 216 |
1 files changed, 136 insertions, 80 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 6c4f9f91b15d..b923b5d5de68 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -206,7 +206,7 @@ static void qp_event_callback(struct ehca_shca *shca, | |||
206 | } | 206 | } |
207 | 207 | ||
208 | static void cq_event_callback(struct ehca_shca *shca, | 208 | static void cq_event_callback(struct ehca_shca *shca, |
209 | u64 eqe) | 209 | u64 eqe) |
210 | { | 210 | { |
211 | struct ehca_cq *cq; | 211 | struct ehca_cq *cq; |
212 | unsigned long flags; | 212 | unsigned long flags; |
@@ -318,7 +318,7 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) | |||
318 | "disruptive port %x configuration change", port); | 318 | "disruptive port %x configuration change", port); |
319 | 319 | ||
320 | ehca_info(&shca->ib_device, | 320 | ehca_info(&shca->ib_device, |
321 | "port %x is inactive.", port); | 321 | "port %x is inactive.", port); |
322 | event.device = &shca->ib_device; | 322 | event.device = &shca->ib_device; |
323 | event.event = IB_EVENT_PORT_ERR; | 323 | event.event = IB_EVENT_PORT_ERR; |
324 | event.element.port_num = port; | 324 | event.element.port_num = port; |
@@ -326,7 +326,7 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) | |||
326 | ib_dispatch_event(&event); | 326 | ib_dispatch_event(&event); |
327 | 327 | ||
328 | ehca_info(&shca->ib_device, | 328 | ehca_info(&shca->ib_device, |
329 | "port %x is active.", port); | 329 | "port %x is active.", port); |
330 | event.device = &shca->ib_device; | 330 | event.device = &shca->ib_device; |
331 | event.event = IB_EVENT_PORT_ACTIVE; | 331 | event.event = IB_EVENT_PORT_ACTIVE; |
332 | event.element.port_num = port; | 332 | event.element.port_num = port; |
@@ -401,87 +401,143 @@ irqreturn_t ehca_interrupt_eq(int irq, void *dev_id) | |||
401 | return IRQ_HANDLED; | 401 | return IRQ_HANDLED; |
402 | } | 402 | } |
403 | 403 | ||
404 | void ehca_tasklet_eq(unsigned long data) | ||
405 | { | ||
406 | struct ehca_shca *shca = (struct ehca_shca*)data; | ||
407 | struct ehca_eqe *eqe; | ||
408 | int int_state; | ||
409 | int query_cnt = 0; | ||
410 | 404 | ||
411 | do { | 405 | static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) |
412 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | 406 | { |
413 | 407 | u64 eqe_value; | |
414 | if ((shca->hw_level >= 2) && eqe) | 408 | u32 token; |
415 | int_state = 1; | 409 | unsigned long flags; |
416 | else | 410 | struct ehca_cq *cq; |
417 | int_state = 0; | 411 | eqe_value = eqe->entry; |
418 | 412 | ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); | |
419 | while ((int_state == 1) || eqe) { | 413 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { |
420 | while (eqe) { | 414 | ehca_dbg(&shca->ib_device, "... completion event"); |
421 | u64 eqe_value = eqe->entry; | 415 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); |
422 | 416 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | |
423 | ehca_dbg(&shca->ib_device, | 417 | cq = idr_find(&ehca_cq_idr, token); |
424 | "eqe_value=%lx", eqe_value); | 418 | if (cq == NULL) { |
425 | 419 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | |
426 | /* TODO: better structure */ | 420 | ehca_err(&shca->ib_device, |
427 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, | 421 | "Invalid eqe for non-existing cq token=%x", |
428 | eqe_value)) { | 422 | token); |
429 | unsigned long flags; | 423 | return; |
430 | u32 token; | 424 | } |
431 | struct ehca_cq *cq; | 425 | reset_eq_pending(cq); |
432 | |||
433 | ehca_dbg(&shca->ib_device, | ||
434 | "... completion event"); | ||
435 | token = | ||
436 | EHCA_BMASK_GET(EQE_CQ_TOKEN, | ||
437 | eqe_value); | ||
438 | spin_lock_irqsave(&ehca_cq_idr_lock, | ||
439 | flags); | ||
440 | cq = idr_find(&ehca_cq_idr, token); | ||
441 | |||
442 | if (cq == NULL) { | ||
443 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | ||
444 | flags); | ||
445 | break; | ||
446 | } | ||
447 | |||
448 | reset_eq_pending(cq); | ||
449 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | 426 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING |
450 | queue_comp_task(cq); | 427 | queue_comp_task(cq); |
451 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | 428 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); |
452 | flags); | ||
453 | #else | 429 | #else |
454 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | 430 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); |
455 | flags); | 431 | comp_event_callback(cq); |
456 | comp_event_callback(cq); | ||
457 | #endif | 432 | #endif |
458 | } else { | 433 | } else { |
459 | ehca_dbg(&shca->ib_device, | 434 | ehca_dbg(&shca->ib_device, |
460 | "... non completion event"); | 435 | "Got non completion event"); |
461 | parse_identifier(shca, eqe_value); | 436 | parse_identifier(shca, eqe_value); |
462 | } | 437 | } |
463 | eqe = | 438 | } |
464 | (struct ehca_eqe *)ehca_poll_eq(shca, | ||
465 | &shca->eq); | ||
466 | } | ||
467 | 439 | ||
468 | if (shca->hw_level >= 2) { | 440 | void ehca_process_eq(struct ehca_shca *shca, int is_irq) |
469 | int_state = | 441 | { |
470 | hipz_h_query_int_state(shca->ipz_hca_handle, | 442 | struct ehca_eq *eq = &shca->eq; |
471 | shca->eq.ist); | 443 | struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache; |
472 | query_cnt++; | 444 | u64 eqe_value; |
473 | iosync(); | 445 | unsigned long flags; |
474 | if (query_cnt >= 100) { | 446 | int eqe_cnt, i; |
475 | query_cnt = 0; | 447 | int eq_empty = 0; |
476 | int_state = 0; | 448 | |
477 | } | 449 | spin_lock_irqsave(&eq->irq_spinlock, flags); |
478 | } | 450 | if (is_irq) { |
479 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | 451 | const int max_query_cnt = 100; |
452 | int query_cnt = 0; | ||
453 | int int_state = 1; | ||
454 | do { | ||
455 | int_state = hipz_h_query_int_state( | ||
456 | shca->ipz_hca_handle, eq->ist); | ||
457 | query_cnt++; | ||
458 | iosync(); | ||
459 | } while (int_state && query_cnt < max_query_cnt); | ||
460 | if (unlikely((query_cnt == max_query_cnt))) | ||
461 | ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x", | ||
462 | int_state, query_cnt); | ||
463 | } | ||
480 | 464 | ||
465 | /* read out all eqes */ | ||
466 | eqe_cnt = 0; | ||
467 | do { | ||
468 | u32 token; | ||
469 | eqe_cache[eqe_cnt].eqe = | ||
470 | (struct ehca_eqe *)ehca_poll_eq(shca, eq); | ||
471 | if (!eqe_cache[eqe_cnt].eqe) | ||
472 | break; | ||
473 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; | ||
474 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { | ||
475 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); | ||
476 | spin_lock(&ehca_cq_idr_lock); | ||
477 | eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); | ||
478 | if (!eqe_cache[eqe_cnt].cq) { | ||
479 | spin_unlock(&ehca_cq_idr_lock); | ||
480 | ehca_err(&shca->ib_device, | ||
481 | "Invalid eqe for non-existing cq " | ||
482 | "token=%x", token); | ||
483 | continue; | ||
484 | } | ||
485 | spin_unlock(&ehca_cq_idr_lock); | ||
486 | } else | ||
487 | eqe_cache[eqe_cnt].cq = NULL; | ||
488 | eqe_cnt++; | ||
489 | } while (eqe_cnt < EHCA_EQE_CACHE_SIZE); | ||
490 | if (!eqe_cnt) { | ||
491 | if (is_irq) | ||
492 | ehca_dbg(&shca->ib_device, | ||
493 | "No eqe found for irq event"); | ||
494 | goto unlock_irq_spinlock; | ||
495 | } else if (!is_irq) | ||
496 | ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); | ||
497 | if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE)) | ||
498 | ehca_dbg(&shca->ib_device, "too many eqes for one irq event"); | ||
499 | /* enable irq for new packets */ | ||
500 | for (i = 0; i < eqe_cnt; i++) { | ||
501 | if (eq->eqe_cache[i].cq) | ||
502 | reset_eq_pending(eq->eqe_cache[i].cq); | ||
503 | } | ||
504 | /* check eq */ | ||
505 | spin_lock(&eq->spinlock); | ||
506 | eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue)); | ||
507 | spin_unlock(&eq->spinlock); | ||
508 | /* call completion handler for cached eqes */ | ||
509 | for (i = 0; i < eqe_cnt; i++) | ||
510 | if (eq->eqe_cache[i].cq) { | ||
511 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
512 | spin_lock(&ehca_cq_idr_lock); | ||
513 | queue_comp_task(eq->eqe_cache[i].cq); | ||
514 | spin_unlock(&ehca_cq_idr_lock); | ||
515 | #else | ||
516 | comp_event_callback(eq->eqe_cache[i].cq); | ||
517 | #endif | ||
518 | } else { | ||
519 | ehca_dbg(&shca->ib_device, "Got non completion event"); | ||
520 | parse_identifier(shca, eq->eqe_cache[i].eqe->entry); | ||
481 | } | 521 | } |
482 | } while (int_state != 0); | 522 | /* poll eq if not empty */ |
523 | if (eq_empty) | ||
524 | goto unlock_irq_spinlock; | ||
525 | do { | ||
526 | struct ehca_eqe *eqe; | ||
527 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | ||
528 | if (!eqe) | ||
529 | break; | ||
530 | process_eqe(shca, eqe); | ||
531 | eqe_cnt++; | ||
532 | } while (1); | ||
533 | |||
534 | unlock_irq_spinlock: | ||
535 | spin_unlock_irqrestore(&eq->irq_spinlock, flags); | ||
536 | } | ||
483 | 537 | ||
484 | return; | 538 | void ehca_tasklet_eq(unsigned long data) |
539 | { | ||
540 | ehca_process_eq((struct ehca_shca*)data, 1); | ||
485 | } | 541 | } |
486 | 542 | ||
487 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | 543 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING |
@@ -654,11 +710,11 @@ static void take_over_work(struct ehca_comp_pool *pool, | |||
654 | list_splice_init(&cct->cq_list, &list); | 710 | list_splice_init(&cct->cq_list, &list); |
655 | 711 | ||
656 | while(!list_empty(&list)) { | 712 | while(!list_empty(&list)) { |
657 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | 713 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); |
658 | 714 | ||
659 | list_del(&cq->entry); | 715 | list_del(&cq->entry); |
660 | __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, | 716 | __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, |
661 | smp_processor_id())); | 717 | smp_processor_id())); |
662 | } | 718 | } |
663 | 719 | ||
664 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 720 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); |