aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorJoachim Fenkes <fenkes@de.ibm.com>2007-07-09 09:30:39 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-09 23:12:27 -0400
commit28db6beb420c756c61dd44d9f2786a0677159e74 (patch)
tree3d5b7cfdfcfe80268bf47d84404d2d9a2034cc65 /drivers/infiniband
parent9844b71baa60270110eabaa9589d3260443d1a71 (diff)
IB/ehca: Refactor sync between completions and destroy_cq using atomic_t
- ehca_cq.nr_events is made an atomic_t, eliminating a lot of locking. - The CQ is removed from the CQ idr first now to make sure no more completions are scheduled on that CQ. The "wait for all completions to end" code becomes much simpler this way. Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c26
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c36
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h1
5 files changed, 29 insertions, 39 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 3550047c1375..8580f2a0ea57 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -174,8 +174,8 @@ struct ehca_cq {
174 spinlock_t cb_lock; 174 spinlock_t cb_lock;
175 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 175 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
176 struct list_head entry; 176 struct list_head entry;
177 u32 nr_callbacks; /* #events assigned to cpu by scaling code */ 177 u32 nr_callbacks; /* #events assigned to cpu by scaling code */
178 u32 nr_events; /* #events seen */ 178 atomic_t nr_events; /* #events seen */
179 wait_queue_head_t wait_completion; 179 wait_queue_head_t wait_completion;
180 spinlock_t task_lock; 180 spinlock_t task_lock;
181 u32 ownpid; 181 u32 ownpid;
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 94bad273b34c..3729997457ca 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -146,6 +146,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
146 spin_lock_init(&my_cq->spinlock); 146 spin_lock_init(&my_cq->spinlock);
147 spin_lock_init(&my_cq->cb_lock); 147 spin_lock_init(&my_cq->cb_lock);
148 spin_lock_init(&my_cq->task_lock); 148 spin_lock_init(&my_cq->task_lock);
149 atomic_set(&my_cq->nr_events, 0);
149 init_waitqueue_head(&my_cq->wait_completion); 150 init_waitqueue_head(&my_cq->wait_completion);
150 my_cq->ownpid = current->tgid; 151 my_cq->ownpid = current->tgid;
151 152
@@ -303,16 +304,6 @@ create_cq_exit1:
303 return cq; 304 return cq;
304} 305}
305 306
306static int get_cq_nr_events(struct ehca_cq *my_cq)
307{
308 int ret;
309 unsigned long flags;
310 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
311 ret = my_cq->nr_events;
312 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
313 return ret;
314}
315
316int ehca_destroy_cq(struct ib_cq *cq) 307int ehca_destroy_cq(struct ib_cq *cq)
317{ 308{
318 u64 h_ret; 309 u64 h_ret;
@@ -339,17 +330,18 @@ int ehca_destroy_cq(struct ib_cq *cq)
339 } 330 }
340 } 331 }
341 332
333 /*
334 * remove the CQ from the idr first to make sure
335 * no more interrupt tasklets will touch this CQ
336 */
342 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 337 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
343 while (my_cq->nr_events) {
344 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
345 wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq));
346 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 /* recheck nr_events to assure no cqe has just arrived */
348 }
349
350 idr_remove(&ehca_cq_idr, my_cq->token); 338 idr_remove(&ehca_cq_idr, my_cq->token);
351 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 339 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
352 340
341 /* now wait until all pending events have completed */
342 wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
343
344 /* nobody's using our CQ any longer -- we can destroy it */
353 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 345 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
354 if (h_ret == H_R_STATE) { 346 if (h_ret == H_R_STATE) {
355 /* cq in err: read err data and destroy it forcibly */ 347 /* cq in err: read err data and destroy it forcibly */
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 100329ba3343..3e790a326d97 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -5,6 +5,8 @@
5 * 5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com> 6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com> 7 * Khadija Souissi <souissi@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Joachim Fenkes <fenkes@de.ibm.com>
8 * 10 *
9 * Copyright (c) 2005 IBM Corporation 11 * Copyright (c) 2005 IBM Corporation
10 * 12 *
@@ -212,6 +214,8 @@ static void cq_event_callback(struct ehca_shca *shca,
212 214
213 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 215 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
214 cq = idr_find(&ehca_cq_idr, token); 216 cq = idr_find(&ehca_cq_idr, token);
217 if (cq)
218 atomic_inc(&cq->nr_events);
215 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 219 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
216 220
217 if (!cq) 221 if (!cq)
@@ -219,6 +223,9 @@ static void cq_event_callback(struct ehca_shca *shca,
219 223
220 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle); 224 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
221 225
226 if (atomic_dec_and_test(&cq->nr_events))
227 wake_up(&cq->wait_completion);
228
222 return; 229 return;
223} 230}
224 231
@@ -414,25 +421,22 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
414 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 421 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
415 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 422 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
416 cq = idr_find(&ehca_cq_idr, token); 423 cq = idr_find(&ehca_cq_idr, token);
424 if (cq)
425 atomic_inc(&cq->nr_events);
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
417 if (cq == NULL) { 427 if (cq == NULL) {
418 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
419 ehca_err(&shca->ib_device, 428 ehca_err(&shca->ib_device,
420 "Invalid eqe for non-existing cq token=%x", 429 "Invalid eqe for non-existing cq token=%x",
421 token); 430 token);
422 return; 431 return;
423 } 432 }
424 reset_eq_pending(cq); 433 reset_eq_pending(cq);
425 cq->nr_events++;
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
427 if (ehca_scaling_code) 434 if (ehca_scaling_code)
428 queue_comp_task(cq); 435 queue_comp_task(cq);
429 else { 436 else {
430 comp_event_callback(cq); 437 comp_event_callback(cq);
431 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 438 if (atomic_dec_and_test(&cq->nr_events))
432 cq->nr_events--;
433 if (!cq->nr_events)
434 wake_up(&cq->wait_completion); 439 wake_up(&cq->wait_completion);
435 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
436 } 440 }
437 } else { 441 } else {
438 ehca_dbg(&shca->ib_device, "Got non completion event"); 442 ehca_dbg(&shca->ib_device, "Got non completion event");
@@ -478,15 +482,15 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
478 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 482 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
479 spin_lock(&ehca_cq_idr_lock); 483 spin_lock(&ehca_cq_idr_lock);
480 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); 484 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
485 if (eqe_cache[eqe_cnt].cq)
486 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
487 spin_unlock(&ehca_cq_idr_lock);
481 if (!eqe_cache[eqe_cnt].cq) { 488 if (!eqe_cache[eqe_cnt].cq) {
482 spin_unlock(&ehca_cq_idr_lock);
483 ehca_err(&shca->ib_device, 489 ehca_err(&shca->ib_device,
484 "Invalid eqe for non-existing cq " 490 "Invalid eqe for non-existing cq "
485 "token=%x", token); 491 "token=%x", token);
486 continue; 492 continue;
487 } 493 }
488 eqe_cache[eqe_cnt].cq->nr_events++;
489 spin_unlock(&ehca_cq_idr_lock);
490 } else 494 } else
491 eqe_cache[eqe_cnt].cq = NULL; 495 eqe_cache[eqe_cnt].cq = NULL;
492 eqe_cnt++; 496 eqe_cnt++;
@@ -517,11 +521,8 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
517 else { 521 else {
518 struct ehca_cq *cq = eq->eqe_cache[i].cq; 522 struct ehca_cq *cq = eq->eqe_cache[i].cq;
519 comp_event_callback(cq); 523 comp_event_callback(cq);
520 spin_lock(&ehca_cq_idr_lock); 524 if (atomic_dec_and_test(&cq->nr_events))
521 cq->nr_events--;
522 if (!cq->nr_events)
523 wake_up(&cq->wait_completion); 525 wake_up(&cq->wait_completion);
524 spin_unlock(&ehca_cq_idr_lock);
525 } 526 }
526 } else { 527 } else {
527 ehca_dbg(&shca->ib_device, "Got non completion event"); 528 ehca_dbg(&shca->ib_device, "Got non completion event");
@@ -621,13 +622,10 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
621 while (!list_empty(&cct->cq_list)) { 622 while (!list_empty(&cct->cq_list)) {
622 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 623 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
623 spin_unlock_irqrestore(&cct->task_lock, flags); 624 spin_unlock_irqrestore(&cct->task_lock, flags);
624 comp_event_callback(cq);
625 625
626 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 626 comp_event_callback(cq);
627 cq->nr_events--; 627 if (atomic_dec_and_test(&cq->nr_events))
628 if (!cq->nr_events)
629 wake_up(&cq->wait_completion); 628 wake_up(&cq->wait_completion);
630 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
631 629
632 spin_lock_irqsave(&cct->task_lock, flags); 630 spin_lock_irqsave(&cct->task_lock, flags);
633 spin_lock(&cq->task_lock); 631 spin_lock(&cq->task_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h
index 6ed06ee033ed..3346cb06cea6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.h
+++ b/drivers/infiniband/hw/ehca/ehca_irq.h
@@ -47,7 +47,6 @@ struct ehca_shca;
47 47
48#include <linux/interrupt.h> 48#include <linux/interrupt.h>
49#include <linux/types.h> 49#include <linux/types.h>
50#include <asm/atomic.h>
51 50
52int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource); 51int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
53 52
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 973c4b591545..03b185f873da 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -59,6 +59,7 @@
59#include <linux/cpu.h> 59#include <linux/cpu.h>
60#include <linux/device.h> 60#include <linux/device.h>
61 61
62#include <asm/atomic.h>
62#include <asm/abs_addr.h> 63#include <asm/abs_addr.h>
63#include <asm/ibmebus.h> 64#include <asm/ibmebus.h>
64#include <asm/io.h> 65#include <asm/io.h>