diff options
author | Hoang-Nam Nguyen <hnguyen@linux.vnet.ibm.com> | 2007-02-15 11:06:33 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-02-16 16:57:34 -0500 |
commit | 78d8d5f9ef8d6179e92b94481cfdfc45d396992f (patch) | |
tree | 55ee3cecd4c0d59f418b59870cec2ac33b1b70e7 /drivers/infiniband | |
parent | 551fd6122d247d76124c4fdb6eb898cc8e3d74aa (diff) |
IB/ehca: Rework irq handler
Rework ehca interrupt handling to avoid/reduce missed irq events.
Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes.h | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_eq.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 216 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_main.c | 28 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ipz_pt_fn.h | 11 |
6 files changed, 183 insertions, 92 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index cf95ee474b0f..f08ad6f9c132 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -42,8 +42,6 @@ | |||
42 | #ifndef __EHCA_CLASSES_H__ | 42 | #ifndef __EHCA_CLASSES_H__ |
43 | #define __EHCA_CLASSES_H__ | 43 | #define __EHCA_CLASSES_H__ |
44 | 44 | ||
45 | #include "ehca_classes.h" | ||
46 | #include "ipz_pt_fn.h" | ||
47 | 45 | ||
48 | struct ehca_module; | 46 | struct ehca_module; |
49 | struct ehca_qp; | 47 | struct ehca_qp; |
@@ -54,14 +52,22 @@ struct ehca_mw; | |||
54 | struct ehca_pd; | 52 | struct ehca_pd; |
55 | struct ehca_av; | 53 | struct ehca_av; |
56 | 54 | ||
55 | #include <rdma/ib_verbs.h> | ||
56 | #include <rdma/ib_user_verbs.h> | ||
57 | |||
57 | #ifdef CONFIG_PPC64 | 58 | #ifdef CONFIG_PPC64 |
58 | #include "ehca_classes_pSeries.h" | 59 | #include "ehca_classes_pSeries.h" |
59 | #endif | 60 | #endif |
61 | #include "ipz_pt_fn.h" | ||
62 | #include "ehca_qes.h" | ||
63 | #include "ehca_irq.h" | ||
60 | 64 | ||
61 | #include <rdma/ib_verbs.h> | 65 | #define EHCA_EQE_CACHE_SIZE 20 |
62 | #include <rdma/ib_user_verbs.h> | ||
63 | 66 | ||
64 | #include "ehca_irq.h" | 67 | struct ehca_eqe_cache_entry { |
68 | struct ehca_eqe *eqe; | ||
69 | struct ehca_cq *cq; | ||
70 | }; | ||
65 | 71 | ||
66 | struct ehca_eq { | 72 | struct ehca_eq { |
67 | u32 length; | 73 | u32 length; |
@@ -74,6 +80,8 @@ struct ehca_eq { | |||
74 | spinlock_t spinlock; | 80 | spinlock_t spinlock; |
75 | struct tasklet_struct interrupt_task; | 81 | struct tasklet_struct interrupt_task; |
76 | u32 ist; | 82 | u32 ist; |
83 | spinlock_t irq_spinlock; | ||
84 | struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE]; | ||
77 | }; | 85 | }; |
78 | 86 | ||
79 | struct ehca_sport { | 87 | struct ehca_sport { |
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index 24ceab0bae4a..4961eb88827c 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c | |||
@@ -61,6 +61,7 @@ int ehca_create_eq(struct ehca_shca *shca, | |||
61 | struct ib_device *ib_dev = &shca->ib_device; | 61 | struct ib_device *ib_dev = &shca->ib_device; |
62 | 62 | ||
63 | spin_lock_init(&eq->spinlock); | 63 | spin_lock_init(&eq->spinlock); |
64 | spin_lock_init(&eq->irq_spinlock); | ||
64 | eq->is_initialized = 0; | 65 | eq->is_initialized = 0; |
65 | 66 | ||
66 | if (type != EHCA_EQ && type != EHCA_NEQ) { | 67 | if (type != EHCA_EQ && type != EHCA_NEQ) { |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 6c4f9f91b15d..b923b5d5de68 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -206,7 +206,7 @@ static void qp_event_callback(struct ehca_shca *shca, | |||
206 | } | 206 | } |
207 | 207 | ||
208 | static void cq_event_callback(struct ehca_shca *shca, | 208 | static void cq_event_callback(struct ehca_shca *shca, |
209 | u64 eqe) | 209 | u64 eqe) |
210 | { | 210 | { |
211 | struct ehca_cq *cq; | 211 | struct ehca_cq *cq; |
212 | unsigned long flags; | 212 | unsigned long flags; |
@@ -318,7 +318,7 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) | |||
318 | "disruptive port %x configuration change", port); | 318 | "disruptive port %x configuration change", port); |
319 | 319 | ||
320 | ehca_info(&shca->ib_device, | 320 | ehca_info(&shca->ib_device, |
321 | "port %x is inactive.", port); | 321 | "port %x is inactive.", port); |
322 | event.device = &shca->ib_device; | 322 | event.device = &shca->ib_device; |
323 | event.event = IB_EVENT_PORT_ERR; | 323 | event.event = IB_EVENT_PORT_ERR; |
324 | event.element.port_num = port; | 324 | event.element.port_num = port; |
@@ -326,7 +326,7 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) | |||
326 | ib_dispatch_event(&event); | 326 | ib_dispatch_event(&event); |
327 | 327 | ||
328 | ehca_info(&shca->ib_device, | 328 | ehca_info(&shca->ib_device, |
329 | "port %x is active.", port); | 329 | "port %x is active.", port); |
330 | event.device = &shca->ib_device; | 330 | event.device = &shca->ib_device; |
331 | event.event = IB_EVENT_PORT_ACTIVE; | 331 | event.event = IB_EVENT_PORT_ACTIVE; |
332 | event.element.port_num = port; | 332 | event.element.port_num = port; |
@@ -401,87 +401,143 @@ irqreturn_t ehca_interrupt_eq(int irq, void *dev_id) | |||
401 | return IRQ_HANDLED; | 401 | return IRQ_HANDLED; |
402 | } | 402 | } |
403 | 403 | ||
404 | void ehca_tasklet_eq(unsigned long data) | ||
405 | { | ||
406 | struct ehca_shca *shca = (struct ehca_shca*)data; | ||
407 | struct ehca_eqe *eqe; | ||
408 | int int_state; | ||
409 | int query_cnt = 0; | ||
410 | 404 | ||
411 | do { | 405 | static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) |
412 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | 406 | { |
413 | 407 | u64 eqe_value; | |
414 | if ((shca->hw_level >= 2) && eqe) | 408 | u32 token; |
415 | int_state = 1; | 409 | unsigned long flags; |
416 | else | 410 | struct ehca_cq *cq; |
417 | int_state = 0; | 411 | eqe_value = eqe->entry; |
418 | 412 | ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); | |
419 | while ((int_state == 1) || eqe) { | 413 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { |
420 | while (eqe) { | 414 | ehca_dbg(&shca->ib_device, "... completion event"); |
421 | u64 eqe_value = eqe->entry; | 415 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); |
422 | 416 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | |
423 | ehca_dbg(&shca->ib_device, | 417 | cq = idr_find(&ehca_cq_idr, token); |
424 | "eqe_value=%lx", eqe_value); | 418 | if (cq == NULL) { |
425 | 419 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | |
426 | /* TODO: better structure */ | 420 | ehca_err(&shca->ib_device, |
427 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, | 421 | "Invalid eqe for non-existing cq token=%x", |
428 | eqe_value)) { | 422 | token); |
429 | unsigned long flags; | 423 | return; |
430 | u32 token; | 424 | } |
431 | struct ehca_cq *cq; | 425 | reset_eq_pending(cq); |
432 | |||
433 | ehca_dbg(&shca->ib_device, | ||
434 | "... completion event"); | ||
435 | token = | ||
436 | EHCA_BMASK_GET(EQE_CQ_TOKEN, | ||
437 | eqe_value); | ||
438 | spin_lock_irqsave(&ehca_cq_idr_lock, | ||
439 | flags); | ||
440 | cq = idr_find(&ehca_cq_idr, token); | ||
441 | |||
442 | if (cq == NULL) { | ||
443 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | ||
444 | flags); | ||
445 | break; | ||
446 | } | ||
447 | |||
448 | reset_eq_pending(cq); | ||
449 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | 426 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING |
450 | queue_comp_task(cq); | 427 | queue_comp_task(cq); |
451 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | 428 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); |
452 | flags); | ||
453 | #else | 429 | #else |
454 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | 430 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); |
455 | flags); | 431 | comp_event_callback(cq); |
456 | comp_event_callback(cq); | ||
457 | #endif | 432 | #endif |
458 | } else { | 433 | } else { |
459 | ehca_dbg(&shca->ib_device, | 434 | ehca_dbg(&shca->ib_device, |
460 | "... non completion event"); | 435 | "Got non completion event"); |
461 | parse_identifier(shca, eqe_value); | 436 | parse_identifier(shca, eqe_value); |
462 | } | 437 | } |
463 | eqe = | 438 | } |
464 | (struct ehca_eqe *)ehca_poll_eq(shca, | ||
465 | &shca->eq); | ||
466 | } | ||
467 | 439 | ||
468 | if (shca->hw_level >= 2) { | 440 | void ehca_process_eq(struct ehca_shca *shca, int is_irq) |
469 | int_state = | 441 | { |
470 | hipz_h_query_int_state(shca->ipz_hca_handle, | 442 | struct ehca_eq *eq = &shca->eq; |
471 | shca->eq.ist); | 443 | struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache; |
472 | query_cnt++; | 444 | u64 eqe_value; |
473 | iosync(); | 445 | unsigned long flags; |
474 | if (query_cnt >= 100) { | 446 | int eqe_cnt, i; |
475 | query_cnt = 0; | 447 | int eq_empty = 0; |
476 | int_state = 0; | 448 | |
477 | } | 449 | spin_lock_irqsave(&eq->irq_spinlock, flags); |
478 | } | 450 | if (is_irq) { |
479 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | 451 | const int max_query_cnt = 100; |
452 | int query_cnt = 0; | ||
453 | int int_state = 1; | ||
454 | do { | ||
455 | int_state = hipz_h_query_int_state( | ||
456 | shca->ipz_hca_handle, eq->ist); | ||
457 | query_cnt++; | ||
458 | iosync(); | ||
459 | } while (int_state && query_cnt < max_query_cnt); | ||
460 | if (unlikely((query_cnt == max_query_cnt))) | ||
461 | ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x", | ||
462 | int_state, query_cnt); | ||
463 | } | ||
480 | 464 | ||
465 | /* read out all eqes */ | ||
466 | eqe_cnt = 0; | ||
467 | do { | ||
468 | u32 token; | ||
469 | eqe_cache[eqe_cnt].eqe = | ||
470 | (struct ehca_eqe *)ehca_poll_eq(shca, eq); | ||
471 | if (!eqe_cache[eqe_cnt].eqe) | ||
472 | break; | ||
473 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; | ||
474 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { | ||
475 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); | ||
476 | spin_lock(&ehca_cq_idr_lock); | ||
477 | eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); | ||
478 | if (!eqe_cache[eqe_cnt].cq) { | ||
479 | spin_unlock(&ehca_cq_idr_lock); | ||
480 | ehca_err(&shca->ib_device, | ||
481 | "Invalid eqe for non-existing cq " | ||
482 | "token=%x", token); | ||
483 | continue; | ||
484 | } | ||
485 | spin_unlock(&ehca_cq_idr_lock); | ||
486 | } else | ||
487 | eqe_cache[eqe_cnt].cq = NULL; | ||
488 | eqe_cnt++; | ||
489 | } while (eqe_cnt < EHCA_EQE_CACHE_SIZE); | ||
490 | if (!eqe_cnt) { | ||
491 | if (is_irq) | ||
492 | ehca_dbg(&shca->ib_device, | ||
493 | "No eqe found for irq event"); | ||
494 | goto unlock_irq_spinlock; | ||
495 | } else if (!is_irq) | ||
496 | ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); | ||
497 | if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE)) | ||
498 | ehca_dbg(&shca->ib_device, "too many eqes for one irq event"); | ||
499 | /* enable irq for new packets */ | ||
500 | for (i = 0; i < eqe_cnt; i++) { | ||
501 | if (eq->eqe_cache[i].cq) | ||
502 | reset_eq_pending(eq->eqe_cache[i].cq); | ||
503 | } | ||
504 | /* check eq */ | ||
505 | spin_lock(&eq->spinlock); | ||
506 | eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue)); | ||
507 | spin_unlock(&eq->spinlock); | ||
508 | /* call completion handler for cached eqes */ | ||
509 | for (i = 0; i < eqe_cnt; i++) | ||
510 | if (eq->eqe_cache[i].cq) { | ||
511 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
512 | spin_lock(&ehca_cq_idr_lock); | ||
513 | queue_comp_task(eq->eqe_cache[i].cq); | ||
514 | spin_unlock(&ehca_cq_idr_lock); | ||
515 | #else | ||
516 | comp_event_callback(eq->eqe_cache[i].cq); | ||
517 | #endif | ||
518 | } else { | ||
519 | ehca_dbg(&shca->ib_device, "Got non completion event"); | ||
520 | parse_identifier(shca, eq->eqe_cache[i].eqe->entry); | ||
481 | } | 521 | } |
482 | } while (int_state != 0); | 522 | /* poll eq if not empty */ |
523 | if (eq_empty) | ||
524 | goto unlock_irq_spinlock; | ||
525 | do { | ||
526 | struct ehca_eqe *eqe; | ||
527 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | ||
528 | if (!eqe) | ||
529 | break; | ||
530 | process_eqe(shca, eqe); | ||
531 | eqe_cnt++; | ||
532 | } while (1); | ||
533 | |||
534 | unlock_irq_spinlock: | ||
535 | spin_unlock_irqrestore(&eq->irq_spinlock, flags); | ||
536 | } | ||
483 | 537 | ||
484 | return; | 538 | void ehca_tasklet_eq(unsigned long data) |
539 | { | ||
540 | ehca_process_eq((struct ehca_shca*)data, 1); | ||
485 | } | 541 | } |
486 | 542 | ||
487 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | 543 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING |
@@ -654,11 +710,11 @@ static void take_over_work(struct ehca_comp_pool *pool, | |||
654 | list_splice_init(&cct->cq_list, &list); | 710 | list_splice_init(&cct->cq_list, &list); |
655 | 711 | ||
656 | while(!list_empty(&list)) { | 712 | while(!list_empty(&list)) { |
657 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | 713 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); |
658 | 714 | ||
659 | list_del(&cq->entry); | 715 | list_del(&cq->entry); |
660 | __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, | 716 | __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, |
661 | smp_processor_id())); | 717 | smp_processor_id())); |
662 | } | 718 | } |
663 | 719 | ||
664 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 720 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h index be579cc0adf6..6ed06ee033ed 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.h +++ b/drivers/infiniband/hw/ehca/ehca_irq.h | |||
@@ -56,6 +56,7 @@ void ehca_tasklet_neq(unsigned long data); | |||
56 | 56 | ||
57 | irqreturn_t ehca_interrupt_eq(int irq, void *dev_id); | 57 | irqreturn_t ehca_interrupt_eq(int irq, void *dev_id); |
58 | void ehca_tasklet_eq(unsigned long data); | 58 | void ehca_tasklet_eq(unsigned long data); |
59 | void ehca_process_eq(struct ehca_shca *shca, int is_irq); | ||
59 | 60 | ||
60 | struct ehca_cpu_comp_task { | 61 | struct ehca_cpu_comp_task { |
61 | wait_queue_head_t wait_queue; | 62 | wait_queue_head_t wait_queue; |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 1155bcf48212..579053421472 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -52,7 +52,7 @@ | |||
52 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); | 54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); |
55 | MODULE_VERSION("SVNEHCA_0020"); | 55 | MODULE_VERSION("SVNEHCA_0021"); |
56 | 56 | ||
57 | int ehca_open_aqp1 = 0; | 57 | int ehca_open_aqp1 = 0; |
58 | int ehca_debug_level = 0; | 58 | int ehca_debug_level = 0; |
@@ -432,8 +432,8 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport) | |||
432 | 432 | ||
433 | static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) | 433 | static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) |
434 | { | 434 | { |
435 | return snprintf(buf, PAGE_SIZE, "%d\n", | 435 | return snprintf(buf, PAGE_SIZE, "%d\n", |
436 | ehca_debug_level); | 436 | ehca_debug_level); |
437 | } | 437 | } |
438 | 438 | ||
439 | static ssize_t ehca_store_debug_level(struct device_driver *ddp, | 439 | static ssize_t ehca_store_debug_level(struct device_driver *ddp, |
@@ -778,8 +778,24 @@ void ehca_poll_eqs(unsigned long data) | |||
778 | 778 | ||
779 | spin_lock(&shca_list_lock); | 779 | spin_lock(&shca_list_lock); |
780 | list_for_each_entry(shca, &shca_list, shca_list) { | 780 | list_for_each_entry(shca, &shca_list, shca_list) { |
781 | if (shca->eq.is_initialized) | 781 | if (shca->eq.is_initialized) { |
782 | ehca_tasklet_eq((unsigned long)(void*)shca); | 782 | /* call deadman proc only if eq ptr does not change */ |
783 | struct ehca_eq *eq = &shca->eq; | ||
784 | int max = 3; | ||
785 | volatile u64 q_ofs, q_ofs2; | ||
786 | u64 flags; | ||
787 | spin_lock_irqsave(&eq->spinlock, flags); | ||
788 | q_ofs = eq->ipz_queue.current_q_offset; | ||
789 | spin_unlock_irqrestore(&eq->spinlock, flags); | ||
790 | do { | ||
791 | spin_lock_irqsave(&eq->spinlock, flags); | ||
792 | q_ofs2 = eq->ipz_queue.current_q_offset; | ||
793 | spin_unlock_irqrestore(&eq->spinlock, flags); | ||
794 | max--; | ||
795 | } while (q_ofs == q_ofs2 && max > 0); | ||
796 | if (q_ofs == q_ofs2) | ||
797 | ehca_process_eq(shca, 0); | ||
798 | } | ||
783 | } | 799 | } |
784 | mod_timer(&poll_eqs_timer, jiffies + HZ); | 800 | mod_timer(&poll_eqs_timer, jiffies + HZ); |
785 | spin_unlock(&shca_list_lock); | 801 | spin_unlock(&shca_list_lock); |
@@ -790,7 +806,7 @@ int __init ehca_module_init(void) | |||
790 | int ret; | 806 | int ret; |
791 | 807 | ||
792 | printk(KERN_INFO "eHCA Infiniband Device Driver " | 808 | printk(KERN_INFO "eHCA Infiniband Device Driver " |
793 | "(Rel.: SVNEHCA_0020)\n"); | 809 | "(Rel.: SVNEHCA_0021)\n"); |
794 | idr_init(&ehca_qp_idr); | 810 | idr_init(&ehca_qp_idr); |
795 | idr_init(&ehca_cq_idr); | 811 | idr_init(&ehca_cq_idr); |
796 | spin_lock_init(&ehca_qp_idr_lock); | 812 | spin_lock_init(&ehca_qp_idr_lock); |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h index dc3bda2634b7..8199c45768a3 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h | |||
@@ -79,7 +79,7 @@ static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) | |||
79 | if (q_offset >= queue->queue_length) | 79 | if (q_offset >= queue->queue_length) |
80 | return NULL; | 80 | return NULL; |
81 | current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; | 81 | current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; |
82 | return ¤t_page->entries[q_offset & (EHCA_PAGESIZE - 1)]; | 82 | return ¤t_page->entries[q_offset & (EHCA_PAGESIZE - 1)]; |
83 | } | 83 | } |
84 | 84 | ||
85 | /* | 85 | /* |
@@ -247,6 +247,15 @@ static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) | |||
247 | return ret; | 247 | return ret; |
248 | } | 248 | } |
249 | 249 | ||
250 | static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue) | ||
251 | { | ||
252 | void *ret = ipz_qeit_get(queue); | ||
253 | u32 qe = *(u8 *) ret; | ||
254 | if ((qe >> 7) != (queue->toggle_state & 1)) | ||
255 | return NULL; | ||
256 | return ret; | ||
257 | } | ||
258 | |||
250 | /* returns address (GX) of first queue entry */ | 259 | /* returns address (GX) of first queue entry */ |
251 | static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt) | 260 | static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt) |
252 | { | 261 | { |