diff options
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_irq.c')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 140 |
1 files changed, 81 insertions, 59 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 100329ba3343..96eba3830754 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -5,6 +5,8 @@ | |||
5 | * | 5 | * |
6 | * Authors: Heiko J Schick <schickhj@de.ibm.com> | 6 | * Authors: Heiko J Schick <schickhj@de.ibm.com> |
7 | * Khadija Souissi <souissi@de.ibm.com> | 7 | * Khadija Souissi <souissi@de.ibm.com> |
8 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
9 | * Joachim Fenkes <fenkes@de.ibm.com> | ||
8 | * | 10 | * |
9 | * Copyright (c) 2005 IBM Corporation | 11 | * Copyright (c) 2005 IBM Corporation |
10 | * | 12 | * |
@@ -59,6 +61,7 @@ | |||
59 | #define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7) | 61 | #define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7) |
60 | #define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15) | 62 | #define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15) |
61 | #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16) | 63 | #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16) |
64 | #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16,16) | ||
62 | 65 | ||
63 | #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63) | 66 | #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63) |
64 | #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7) | 67 | #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7) |
@@ -178,12 +181,11 @@ static void qp_event_callback(struct ehca_shca *shca, | |||
178 | { | 181 | { |
179 | struct ib_event event; | 182 | struct ib_event event; |
180 | struct ehca_qp *qp; | 183 | struct ehca_qp *qp; |
181 | unsigned long flags; | ||
182 | u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); | 184 | u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); |
183 | 185 | ||
184 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | 186 | read_lock(&ehca_qp_idr_lock); |
185 | qp = idr_find(&ehca_qp_idr, token); | 187 | qp = idr_find(&ehca_qp_idr, token); |
186 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | 188 | read_unlock(&ehca_qp_idr_lock); |
187 | 189 | ||
188 | 190 | ||
189 | if (!qp) | 191 | if (!qp) |
@@ -207,18 +209,22 @@ static void cq_event_callback(struct ehca_shca *shca, | |||
207 | u64 eqe) | 209 | u64 eqe) |
208 | { | 210 | { |
209 | struct ehca_cq *cq; | 211 | struct ehca_cq *cq; |
210 | unsigned long flags; | ||
211 | u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); | 212 | u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); |
212 | 213 | ||
213 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 214 | read_lock(&ehca_cq_idr_lock); |
214 | cq = idr_find(&ehca_cq_idr, token); | 215 | cq = idr_find(&ehca_cq_idr, token); |
215 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | 216 | if (cq) |
217 | atomic_inc(&cq->nr_events); | ||
218 | read_unlock(&ehca_cq_idr_lock); | ||
216 | 219 | ||
217 | if (!cq) | 220 | if (!cq) |
218 | return; | 221 | return; |
219 | 222 | ||
220 | ehca_error_data(shca, cq, cq->ipz_cq_handle.handle); | 223 | ehca_error_data(shca, cq, cq->ipz_cq_handle.handle); |
221 | 224 | ||
225 | if (atomic_dec_and_test(&cq->nr_events)) | ||
226 | wake_up(&cq->wait_completion); | ||
227 | |||
222 | return; | 228 | return; |
223 | } | 229 | } |
224 | 230 | ||
@@ -281,30 +287,61 @@ static void parse_identifier(struct ehca_shca *shca, u64 eqe) | |||
281 | return; | 287 | return; |
282 | } | 288 | } |
283 | 289 | ||
284 | static void parse_ec(struct ehca_shca *shca, u64 eqe) | 290 | static void dispatch_port_event(struct ehca_shca *shca, int port_num, |
291 | enum ib_event_type type, const char *msg) | ||
285 | { | 292 | { |
286 | struct ib_event event; | 293 | struct ib_event event; |
294 | |||
295 | ehca_info(&shca->ib_device, "port %d %s.", port_num, msg); | ||
296 | event.device = &shca->ib_device; | ||
297 | event.event = type; | ||
298 | event.element.port_num = port_num; | ||
299 | ib_dispatch_event(&event); | ||
300 | } | ||
301 | |||
302 | static void notify_port_conf_change(struct ehca_shca *shca, int port_num) | ||
303 | { | ||
304 | struct ehca_sma_attr new_attr; | ||
305 | struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr; | ||
306 | |||
307 | ehca_query_sma_attr(shca, port_num, &new_attr); | ||
308 | |||
309 | if (new_attr.sm_sl != old_attr->sm_sl || | ||
310 | new_attr.sm_lid != old_attr->sm_lid) | ||
311 | dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE, | ||
312 | "SM changed"); | ||
313 | |||
314 | if (new_attr.lid != old_attr->lid || | ||
315 | new_attr.lmc != old_attr->lmc) | ||
316 | dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE, | ||
317 | "LID changed"); | ||
318 | |||
319 | if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len || | ||
320 | memcmp(new_attr.pkeys, old_attr->pkeys, | ||
321 | sizeof(u16) * new_attr.pkey_tbl_len)) | ||
322 | dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE, | ||
323 | "P_Key changed"); | ||
324 | |||
325 | *old_attr = new_attr; | ||
326 | } | ||
327 | |||
328 | static void parse_ec(struct ehca_shca *shca, u64 eqe) | ||
329 | { | ||
287 | u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); | 330 | u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); |
288 | u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); | 331 | u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); |
289 | 332 | ||
290 | switch (ec) { | 333 | switch (ec) { |
291 | case 0x30: /* port availability change */ | 334 | case 0x30: /* port availability change */ |
292 | if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { | 335 | if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { |
293 | ehca_info(&shca->ib_device, | ||
294 | "port %x is active.", port); | ||
295 | event.device = &shca->ib_device; | ||
296 | event.event = IB_EVENT_PORT_ACTIVE; | ||
297 | event.element.port_num = port; | ||
298 | shca->sport[port - 1].port_state = IB_PORT_ACTIVE; | 336 | shca->sport[port - 1].port_state = IB_PORT_ACTIVE; |
299 | ib_dispatch_event(&event); | 337 | dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, |
338 | "is active"); | ||
339 | ehca_query_sma_attr(shca, port, | ||
340 | &shca->sport[port - 1].saved_attr); | ||
300 | } else { | 341 | } else { |
301 | ehca_info(&shca->ib_device, | ||
302 | "port %x is inactive.", port); | ||
303 | event.device = &shca->ib_device; | ||
304 | event.event = IB_EVENT_PORT_ERR; | ||
305 | event.element.port_num = port; | ||
306 | shca->sport[port - 1].port_state = IB_PORT_DOWN; | 342 | shca->sport[port - 1].port_state = IB_PORT_DOWN; |
307 | ib_dispatch_event(&event); | 343 | dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, |
344 | "is inactive"); | ||
308 | } | 345 | } |
309 | break; | 346 | break; |
310 | case 0x31: | 347 | case 0x31: |
@@ -312,24 +349,19 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) | |||
312 | * disruptive change is caused by | 349 | * disruptive change is caused by |
313 | * LID, PKEY or SM change | 350 | * LID, PKEY or SM change |
314 | */ | 351 | */ |
315 | ehca_warn(&shca->ib_device, | 352 | if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) { |
316 | "disruptive port %x configuration change", port); | 353 | ehca_warn(&shca->ib_device, "disruptive port " |
317 | 354 | "%d configuration change", port); | |
318 | ehca_info(&shca->ib_device, | 355 | |
319 | "port %x is inactive.", port); | 356 | shca->sport[port - 1].port_state = IB_PORT_DOWN; |
320 | event.device = &shca->ib_device; | 357 | dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, |
321 | event.event = IB_EVENT_PORT_ERR; | 358 | "is inactive"); |
322 | event.element.port_num = port; | 359 | |
323 | shca->sport[port - 1].port_state = IB_PORT_DOWN; | 360 | shca->sport[port - 1].port_state = IB_PORT_ACTIVE; |
324 | ib_dispatch_event(&event); | 361 | dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, |
325 | 362 | "is active"); | |
326 | ehca_info(&shca->ib_device, | 363 | } else |
327 | "port %x is active.", port); | 364 | notify_port_conf_change(shca, port); |
328 | event.device = &shca->ib_device; | ||
329 | event.event = IB_EVENT_PORT_ACTIVE; | ||
330 | event.element.port_num = port; | ||
331 | shca->sport[port - 1].port_state = IB_PORT_ACTIVE; | ||
332 | ib_dispatch_event(&event); | ||
333 | break; | 365 | break; |
334 | case 0x32: /* adapter malfunction */ | 366 | case 0x32: /* adapter malfunction */ |
335 | ehca_err(&shca->ib_device, "Adapter malfunction."); | 367 | ehca_err(&shca->ib_device, "Adapter malfunction."); |
@@ -404,7 +436,6 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) | |||
404 | { | 436 | { |
405 | u64 eqe_value; | 437 | u64 eqe_value; |
406 | u32 token; | 438 | u32 token; |
407 | unsigned long flags; | ||
408 | struct ehca_cq *cq; | 439 | struct ehca_cq *cq; |
409 | 440 | ||
410 | eqe_value = eqe->entry; | 441 | eqe_value = eqe->entry; |
@@ -412,27 +443,24 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) | |||
412 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { | 443 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { |
413 | ehca_dbg(&shca->ib_device, "Got completion event"); | 444 | ehca_dbg(&shca->ib_device, "Got completion event"); |
414 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); | 445 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); |
415 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 446 | read_lock(&ehca_cq_idr_lock); |
416 | cq = idr_find(&ehca_cq_idr, token); | 447 | cq = idr_find(&ehca_cq_idr, token); |
448 | if (cq) | ||
449 | atomic_inc(&cq->nr_events); | ||
450 | read_unlock(&ehca_cq_idr_lock); | ||
417 | if (cq == NULL) { | 451 | if (cq == NULL) { |
418 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
419 | ehca_err(&shca->ib_device, | 452 | ehca_err(&shca->ib_device, |
420 | "Invalid eqe for non-existing cq token=%x", | 453 | "Invalid eqe for non-existing cq token=%x", |
421 | token); | 454 | token); |
422 | return; | 455 | return; |
423 | } | 456 | } |
424 | reset_eq_pending(cq); | 457 | reset_eq_pending(cq); |
425 | cq->nr_events++; | ||
426 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
427 | if (ehca_scaling_code) | 458 | if (ehca_scaling_code) |
428 | queue_comp_task(cq); | 459 | queue_comp_task(cq); |
429 | else { | 460 | else { |
430 | comp_event_callback(cq); | 461 | comp_event_callback(cq); |
431 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 462 | if (atomic_dec_and_test(&cq->nr_events)) |
432 | cq->nr_events--; | ||
433 | if (!cq->nr_events) | ||
434 | wake_up(&cq->wait_completion); | 463 | wake_up(&cq->wait_completion); |
435 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
436 | } | 464 | } |
437 | } else { | 465 | } else { |
438 | ehca_dbg(&shca->ib_device, "Got non completion event"); | 466 | ehca_dbg(&shca->ib_device, "Got non completion event"); |
@@ -476,17 +504,17 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) | |||
476 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; | 504 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; |
477 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { | 505 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { |
478 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); | 506 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); |
479 | spin_lock(&ehca_cq_idr_lock); | 507 | read_lock(&ehca_cq_idr_lock); |
480 | eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); | 508 | eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); |
509 | if (eqe_cache[eqe_cnt].cq) | ||
510 | atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events); | ||
511 | read_unlock(&ehca_cq_idr_lock); | ||
481 | if (!eqe_cache[eqe_cnt].cq) { | 512 | if (!eqe_cache[eqe_cnt].cq) { |
482 | spin_unlock(&ehca_cq_idr_lock); | ||
483 | ehca_err(&shca->ib_device, | 513 | ehca_err(&shca->ib_device, |
484 | "Invalid eqe for non-existing cq " | 514 | "Invalid eqe for non-existing cq " |
485 | "token=%x", token); | 515 | "token=%x", token); |
486 | continue; | 516 | continue; |
487 | } | 517 | } |
488 | eqe_cache[eqe_cnt].cq->nr_events++; | ||
489 | spin_unlock(&ehca_cq_idr_lock); | ||
490 | } else | 518 | } else |
491 | eqe_cache[eqe_cnt].cq = NULL; | 519 | eqe_cache[eqe_cnt].cq = NULL; |
492 | eqe_cnt++; | 520 | eqe_cnt++; |
@@ -517,11 +545,8 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) | |||
517 | else { | 545 | else { |
518 | struct ehca_cq *cq = eq->eqe_cache[i].cq; | 546 | struct ehca_cq *cq = eq->eqe_cache[i].cq; |
519 | comp_event_callback(cq); | 547 | comp_event_callback(cq); |
520 | spin_lock(&ehca_cq_idr_lock); | 548 | if (atomic_dec_and_test(&cq->nr_events)) |
521 | cq->nr_events--; | ||
522 | if (!cq->nr_events) | ||
523 | wake_up(&cq->wait_completion); | 549 | wake_up(&cq->wait_completion); |
524 | spin_unlock(&ehca_cq_idr_lock); | ||
525 | } | 550 | } |
526 | } else { | 551 | } else { |
527 | ehca_dbg(&shca->ib_device, "Got non completion event"); | 552 | ehca_dbg(&shca->ib_device, "Got non completion event"); |
@@ -621,13 +646,10 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct) | |||
621 | while (!list_empty(&cct->cq_list)) { | 646 | while (!list_empty(&cct->cq_list)) { |
622 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | 647 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); |
623 | spin_unlock_irqrestore(&cct->task_lock, flags); | 648 | spin_unlock_irqrestore(&cct->task_lock, flags); |
624 | comp_event_callback(cq); | ||
625 | 649 | ||
626 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 650 | comp_event_callback(cq); |
627 | cq->nr_events--; | 651 | if (atomic_dec_and_test(&cq->nr_events)) |
628 | if (!cq->nr_events) | ||
629 | wake_up(&cq->wait_completion); | 652 | wake_up(&cq->wait_completion); |
630 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
631 | 653 | ||
632 | spin_lock_irqsave(&cct->task_lock, flags); | 654 | spin_lock_irqsave(&cct->task_lock, flags); |
633 | spin_lock(&cq->task_lock); | 655 | spin_lock(&cq->task_lock); |