diff options
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_irq.c')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 307 |
1 files changed, 178 insertions, 129 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 6c4f9f91b15d..3ec53c687d08 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -63,15 +63,11 @@ | |||
63 | #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63) | 63 | #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63) |
64 | #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7) | 64 | #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7) |
65 | 65 | ||
66 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
67 | |||
68 | static void queue_comp_task(struct ehca_cq *__cq); | 66 | static void queue_comp_task(struct ehca_cq *__cq); |
69 | 67 | ||
70 | static struct ehca_comp_pool* pool; | 68 | static struct ehca_comp_pool* pool; |
71 | static struct notifier_block comp_pool_callback_nb; | 69 | static struct notifier_block comp_pool_callback_nb; |
72 | 70 | ||
73 | #endif | ||
74 | |||
75 | static inline void comp_event_callback(struct ehca_cq *cq) | 71 | static inline void comp_event_callback(struct ehca_cq *cq) |
76 | { | 72 | { |
77 | if (!cq->ib_cq.comp_handler) | 73 | if (!cq->ib_cq.comp_handler) |
@@ -206,7 +202,7 @@ static void qp_event_callback(struct ehca_shca *shca, | |||
206 | } | 202 | } |
207 | 203 | ||
208 | static void cq_event_callback(struct ehca_shca *shca, | 204 | static void cq_event_callback(struct ehca_shca *shca, |
209 | u64 eqe) | 205 | u64 eqe) |
210 | { | 206 | { |
211 | struct ehca_cq *cq; | 207 | struct ehca_cq *cq; |
212 | unsigned long flags; | 208 | unsigned long flags; |
@@ -318,7 +314,7 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) | |||
318 | "disruptive port %x configuration change", port); | 314 | "disruptive port %x configuration change", port); |
319 | 315 | ||
320 | ehca_info(&shca->ib_device, | 316 | ehca_info(&shca->ib_device, |
321 | "port %x is inactive.", port); | 317 | "port %x is inactive.", port); |
322 | event.device = &shca->ib_device; | 318 | event.device = &shca->ib_device; |
323 | event.event = IB_EVENT_PORT_ERR; | 319 | event.event = IB_EVENT_PORT_ERR; |
324 | event.element.port_num = port; | 320 | event.element.port_num = port; |
@@ -326,7 +322,7 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) | |||
326 | ib_dispatch_event(&event); | 322 | ib_dispatch_event(&event); |
327 | 323 | ||
328 | ehca_info(&shca->ib_device, | 324 | ehca_info(&shca->ib_device, |
329 | "port %x is active.", port); | 325 | "port %x is active.", port); |
330 | event.device = &shca->ib_device; | 326 | event.device = &shca->ib_device; |
331 | event.event = IB_EVENT_PORT_ACTIVE; | 327 | event.event = IB_EVENT_PORT_ACTIVE; |
332 | event.element.port_num = port; | 328 | event.element.port_num = port; |
@@ -401,115 +397,170 @@ irqreturn_t ehca_interrupt_eq(int irq, void *dev_id) | |||
401 | return IRQ_HANDLED; | 397 | return IRQ_HANDLED; |
402 | } | 398 | } |
403 | 399 | ||
404 | void ehca_tasklet_eq(unsigned long data) | ||
405 | { | ||
406 | struct ehca_shca *shca = (struct ehca_shca*)data; | ||
407 | struct ehca_eqe *eqe; | ||
408 | int int_state; | ||
409 | int query_cnt = 0; | ||
410 | 400 | ||
411 | do { | 401 | static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) |
412 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | 402 | { |
403 | u64 eqe_value; | ||
404 | u32 token; | ||
405 | unsigned long flags; | ||
406 | struct ehca_cq *cq; | ||
407 | eqe_value = eqe->entry; | ||
408 | ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); | ||
409 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { | ||
410 | ehca_dbg(&shca->ib_device, "... completion event"); | ||
411 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); | ||
412 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | ||
413 | cq = idr_find(&ehca_cq_idr, token); | ||
414 | if (cq == NULL) { | ||
415 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
416 | ehca_err(&shca->ib_device, | ||
417 | "Invalid eqe for non-existing cq token=%x", | ||
418 | token); | ||
419 | return; | ||
420 | } | ||
421 | reset_eq_pending(cq); | ||
422 | if (ehca_scaling_code) { | ||
423 | queue_comp_task(cq); | ||
424 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
425 | } else { | ||
426 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
427 | comp_event_callback(cq); | ||
428 | } | ||
429 | } else { | ||
430 | ehca_dbg(&shca->ib_device, | ||
431 | "Got non completion event"); | ||
432 | parse_identifier(shca, eqe_value); | ||
433 | } | ||
434 | } | ||
413 | 435 | ||
414 | if ((shca->hw_level >= 2) && eqe) | 436 | void ehca_process_eq(struct ehca_shca *shca, int is_irq) |
415 | int_state = 1; | 437 | { |
416 | else | 438 | struct ehca_eq *eq = &shca->eq; |
417 | int_state = 0; | 439 | struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache; |
418 | 440 | u64 eqe_value; | |
419 | while ((int_state == 1) || eqe) { | 441 | unsigned long flags; |
420 | while (eqe) { | 442 | int eqe_cnt, i; |
421 | u64 eqe_value = eqe->entry; | 443 | int eq_empty = 0; |
422 | 444 | ||
423 | ehca_dbg(&shca->ib_device, | 445 | spin_lock_irqsave(&eq->irq_spinlock, flags); |
424 | "eqe_value=%lx", eqe_value); | 446 | if (is_irq) { |
425 | 447 | const int max_query_cnt = 100; | |
426 | /* TODO: better structure */ | 448 | int query_cnt = 0; |
427 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, | 449 | int int_state = 1; |
428 | eqe_value)) { | 450 | do { |
429 | unsigned long flags; | 451 | int_state = hipz_h_query_int_state( |
430 | u32 token; | 452 | shca->ipz_hca_handle, eq->ist); |
431 | struct ehca_cq *cq; | 453 | query_cnt++; |
432 | 454 | iosync(); | |
433 | ehca_dbg(&shca->ib_device, | 455 | } while (int_state && query_cnt < max_query_cnt); |
434 | "... completion event"); | 456 | if (unlikely((query_cnt == max_query_cnt))) |
435 | token = | 457 | ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x", |
436 | EHCA_BMASK_GET(EQE_CQ_TOKEN, | 458 | int_state, query_cnt); |
437 | eqe_value); | 459 | } |
438 | spin_lock_irqsave(&ehca_cq_idr_lock, | ||
439 | flags); | ||
440 | cq = idr_find(&ehca_cq_idr, token); | ||
441 | |||
442 | if (cq == NULL) { | ||
443 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | ||
444 | flags); | ||
445 | break; | ||
446 | } | ||
447 | |||
448 | reset_eq_pending(cq); | ||
449 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
450 | queue_comp_task(cq); | ||
451 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | ||
452 | flags); | ||
453 | #else | ||
454 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | ||
455 | flags); | ||
456 | comp_event_callback(cq); | ||
457 | #endif | ||
458 | } else { | ||
459 | ehca_dbg(&shca->ib_device, | ||
460 | "... non completion event"); | ||
461 | parse_identifier(shca, eqe_value); | ||
462 | } | ||
463 | eqe = | ||
464 | (struct ehca_eqe *)ehca_poll_eq(shca, | ||
465 | &shca->eq); | ||
466 | } | ||
467 | 460 | ||
468 | if (shca->hw_level >= 2) { | 461 | /* read out all eqes */ |
469 | int_state = | 462 | eqe_cnt = 0; |
470 | hipz_h_query_int_state(shca->ipz_hca_handle, | 463 | do { |
471 | shca->eq.ist); | 464 | u32 token; |
472 | query_cnt++; | 465 | eqe_cache[eqe_cnt].eqe = |
473 | iosync(); | 466 | (struct ehca_eqe *)ehca_poll_eq(shca, eq); |
474 | if (query_cnt >= 100) { | 467 | if (!eqe_cache[eqe_cnt].eqe) |
475 | query_cnt = 0; | 468 | break; |
476 | int_state = 0; | 469 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; |
477 | } | 470 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { |
471 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); | ||
472 | spin_lock(&ehca_cq_idr_lock); | ||
473 | eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); | ||
474 | if (!eqe_cache[eqe_cnt].cq) { | ||
475 | spin_unlock(&ehca_cq_idr_lock); | ||
476 | ehca_err(&shca->ib_device, | ||
477 | "Invalid eqe for non-existing cq " | ||
478 | "token=%x", token); | ||
479 | continue; | ||
478 | } | 480 | } |
479 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | 481 | spin_unlock(&ehca_cq_idr_lock); |
480 | 482 | } else | |
483 | eqe_cache[eqe_cnt].cq = NULL; | ||
484 | eqe_cnt++; | ||
485 | } while (eqe_cnt < EHCA_EQE_CACHE_SIZE); | ||
486 | if (!eqe_cnt) { | ||
487 | if (is_irq) | ||
488 | ehca_dbg(&shca->ib_device, | ||
489 | "No eqe found for irq event"); | ||
490 | goto unlock_irq_spinlock; | ||
491 | } else if (!is_irq) | ||
492 | ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); | ||
493 | if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE)) | ||
494 | ehca_dbg(&shca->ib_device, "too many eqes for one irq event"); | ||
495 | /* enable irq for new packets */ | ||
496 | for (i = 0; i < eqe_cnt; i++) { | ||
497 | if (eq->eqe_cache[i].cq) | ||
498 | reset_eq_pending(eq->eqe_cache[i].cq); | ||
499 | } | ||
500 | /* check eq */ | ||
501 | spin_lock(&eq->spinlock); | ||
502 | eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue)); | ||
503 | spin_unlock(&eq->spinlock); | ||
504 | /* call completion handler for cached eqes */ | ||
505 | for (i = 0; i < eqe_cnt; i++) | ||
506 | if (eq->eqe_cache[i].cq) { | ||
507 | if (ehca_scaling_code) { | ||
508 | spin_lock(&ehca_cq_idr_lock); | ||
509 | queue_comp_task(eq->eqe_cache[i].cq); | ||
510 | spin_unlock(&ehca_cq_idr_lock); | ||
511 | } else | ||
512 | comp_event_callback(eq->eqe_cache[i].cq); | ||
513 | } else { | ||
514 | ehca_dbg(&shca->ib_device, "Got non completion event"); | ||
515 | parse_identifier(shca, eq->eqe_cache[i].eqe->entry); | ||
481 | } | 516 | } |
482 | } while (int_state != 0); | 517 | /* poll eq if not empty */ |
483 | 518 | if (eq_empty) | |
484 | return; | 519 | goto unlock_irq_spinlock; |
520 | do { | ||
521 | struct ehca_eqe *eqe; | ||
522 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | ||
523 | if (!eqe) | ||
524 | break; | ||
525 | process_eqe(shca, eqe); | ||
526 | eqe_cnt++; | ||
527 | } while (1); | ||
528 | |||
529 | unlock_irq_spinlock: | ||
530 | spin_unlock_irqrestore(&eq->irq_spinlock, flags); | ||
485 | } | 531 | } |
486 | 532 | ||
487 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | 533 | void ehca_tasklet_eq(unsigned long data) |
534 | { | ||
535 | ehca_process_eq((struct ehca_shca*)data, 1); | ||
536 | } | ||
488 | 537 | ||
489 | static inline int find_next_online_cpu(struct ehca_comp_pool* pool) | 538 | static inline int find_next_online_cpu(struct ehca_comp_pool* pool) |
490 | { | 539 | { |
491 | unsigned long flags_last_cpu; | 540 | int cpu; |
541 | unsigned long flags; | ||
492 | 542 | ||
543 | WARN_ON_ONCE(!in_interrupt()); | ||
493 | if (ehca_debug_level) | 544 | if (ehca_debug_level) |
494 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 545 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); |
495 | 546 | ||
496 | spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu); | 547 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
497 | pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map); | 548 | cpu = next_cpu(pool->last_cpu, cpu_online_map); |
498 | if (pool->last_cpu == NR_CPUS) | 549 | if (cpu == NR_CPUS) |
499 | pool->last_cpu = first_cpu(cpu_online_map); | 550 | cpu = first_cpu(cpu_online_map); |
500 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu); | 551 | pool->last_cpu = cpu; |
552 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); | ||
501 | 553 | ||
502 | return pool->last_cpu; | 554 | return cpu; |
503 | } | 555 | } |
504 | 556 | ||
505 | static void __queue_comp_task(struct ehca_cq *__cq, | 557 | static void __queue_comp_task(struct ehca_cq *__cq, |
506 | struct ehca_cpu_comp_task *cct) | 558 | struct ehca_cpu_comp_task *cct) |
507 | { | 559 | { |
508 | unsigned long flags_cct; | 560 | unsigned long flags; |
509 | unsigned long flags_cq; | ||
510 | 561 | ||
511 | spin_lock_irqsave(&cct->task_lock, flags_cct); | 562 | spin_lock_irqsave(&cct->task_lock, flags); |
512 | spin_lock_irqsave(&__cq->task_lock, flags_cq); | 563 | spin_lock(&__cq->task_lock); |
513 | 564 | ||
514 | if (__cq->nr_callbacks == 0) { | 565 | if (__cq->nr_callbacks == 0) { |
515 | __cq->nr_callbacks++; | 566 | __cq->nr_callbacks++; |
@@ -520,8 +571,8 @@ static void __queue_comp_task(struct ehca_cq *__cq, | |||
520 | else | 571 | else |
521 | __cq->nr_callbacks++; | 572 | __cq->nr_callbacks++; |
522 | 573 | ||
523 | spin_unlock_irqrestore(&__cq->task_lock, flags_cq); | 574 | spin_unlock(&__cq->task_lock); |
524 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 575 | spin_unlock_irqrestore(&cct->task_lock, flags); |
525 | } | 576 | } |
526 | 577 | ||
527 | static void queue_comp_task(struct ehca_cq *__cq) | 578 | static void queue_comp_task(struct ehca_cq *__cq) |
@@ -532,69 +583,69 @@ static void queue_comp_task(struct ehca_cq *__cq) | |||
532 | 583 | ||
533 | cpu = get_cpu(); | 584 | cpu = get_cpu(); |
534 | cpu_id = find_next_online_cpu(pool); | 585 | cpu_id = find_next_online_cpu(pool); |
535 | |||
536 | BUG_ON(!cpu_online(cpu_id)); | 586 | BUG_ON(!cpu_online(cpu_id)); |
537 | 587 | ||
538 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); | 588 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); |
589 | BUG_ON(!cct); | ||
539 | 590 | ||
540 | if (cct->cq_jobs > 0) { | 591 | if (cct->cq_jobs > 0) { |
541 | cpu_id = find_next_online_cpu(pool); | 592 | cpu_id = find_next_online_cpu(pool); |
542 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); | 593 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); |
594 | BUG_ON(!cct); | ||
543 | } | 595 | } |
544 | 596 | ||
545 | __queue_comp_task(__cq, cct); | 597 | __queue_comp_task(__cq, cct); |
546 | |||
547 | put_cpu(); | ||
548 | |||
549 | return; | ||
550 | } | 598 | } |
551 | 599 | ||
552 | static void run_comp_task(struct ehca_cpu_comp_task* cct) | 600 | static void run_comp_task(struct ehca_cpu_comp_task* cct) |
553 | { | 601 | { |
554 | struct ehca_cq *cq; | 602 | struct ehca_cq *cq; |
555 | unsigned long flags_cct; | 603 | unsigned long flags; |
556 | unsigned long flags_cq; | ||
557 | 604 | ||
558 | spin_lock_irqsave(&cct->task_lock, flags_cct); | 605 | spin_lock_irqsave(&cct->task_lock, flags); |
559 | 606 | ||
560 | while (!list_empty(&cct->cq_list)) { | 607 | while (!list_empty(&cct->cq_list)) { |
561 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | 608 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); |
562 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 609 | spin_unlock_irqrestore(&cct->task_lock, flags); |
563 | comp_event_callback(cq); | 610 | comp_event_callback(cq); |
564 | spin_lock_irqsave(&cct->task_lock, flags_cct); | 611 | spin_lock_irqsave(&cct->task_lock, flags); |
565 | 612 | ||
566 | spin_lock_irqsave(&cq->task_lock, flags_cq); | 613 | spin_lock(&cq->task_lock); |
567 | cq->nr_callbacks--; | 614 | cq->nr_callbacks--; |
568 | if (cq->nr_callbacks == 0) { | 615 | if (cq->nr_callbacks == 0) { |
569 | list_del_init(cct->cq_list.next); | 616 | list_del_init(cct->cq_list.next); |
570 | cct->cq_jobs--; | 617 | cct->cq_jobs--; |
571 | } | 618 | } |
572 | spin_unlock_irqrestore(&cq->task_lock, flags_cq); | 619 | spin_unlock(&cq->task_lock); |
573 | |||
574 | } | 620 | } |
575 | 621 | ||
576 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 622 | spin_unlock_irqrestore(&cct->task_lock, flags); |
577 | |||
578 | return; | ||
579 | } | 623 | } |
580 | 624 | ||
581 | static int comp_task(void *__cct) | 625 | static int comp_task(void *__cct) |
582 | { | 626 | { |
583 | struct ehca_cpu_comp_task* cct = __cct; | 627 | struct ehca_cpu_comp_task* cct = __cct; |
628 | int cql_empty; | ||
584 | DECLARE_WAITQUEUE(wait, current); | 629 | DECLARE_WAITQUEUE(wait, current); |
585 | 630 | ||
586 | set_current_state(TASK_INTERRUPTIBLE); | 631 | set_current_state(TASK_INTERRUPTIBLE); |
587 | while(!kthread_should_stop()) { | 632 | while(!kthread_should_stop()) { |
588 | add_wait_queue(&cct->wait_queue, &wait); | 633 | add_wait_queue(&cct->wait_queue, &wait); |
589 | 634 | ||
590 | if (list_empty(&cct->cq_list)) | 635 | spin_lock_irq(&cct->task_lock); |
636 | cql_empty = list_empty(&cct->cq_list); | ||
637 | spin_unlock_irq(&cct->task_lock); | ||
638 | if (cql_empty) | ||
591 | schedule(); | 639 | schedule(); |
592 | else | 640 | else |
593 | __set_current_state(TASK_RUNNING); | 641 | __set_current_state(TASK_RUNNING); |
594 | 642 | ||
595 | remove_wait_queue(&cct->wait_queue, &wait); | 643 | remove_wait_queue(&cct->wait_queue, &wait); |
596 | 644 | ||
597 | if (!list_empty(&cct->cq_list)) | 645 | spin_lock_irq(&cct->task_lock); |
646 | cql_empty = list_empty(&cct->cq_list); | ||
647 | spin_unlock_irq(&cct->task_lock); | ||
648 | if (!cql_empty) | ||
598 | run_comp_task(__cct); | 649 | run_comp_task(__cct); |
599 | 650 | ||
600 | set_current_state(TASK_INTERRUPTIBLE); | 651 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -637,8 +688,6 @@ static void destroy_comp_task(struct ehca_comp_pool *pool, | |||
637 | 688 | ||
638 | if (task) | 689 | if (task) |
639 | kthread_stop(task); | 690 | kthread_stop(task); |
640 | |||
641 | return; | ||
642 | } | 691 | } |
643 | 692 | ||
644 | static void take_over_work(struct ehca_comp_pool *pool, | 693 | static void take_over_work(struct ehca_comp_pool *pool, |
@@ -654,11 +703,11 @@ static void take_over_work(struct ehca_comp_pool *pool, | |||
654 | list_splice_init(&cct->cq_list, &list); | 703 | list_splice_init(&cct->cq_list, &list); |
655 | 704 | ||
656 | while(!list_empty(&list)) { | 705 | while(!list_empty(&list)) { |
657 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | 706 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); |
658 | 707 | ||
659 | list_del(&cq->entry); | 708 | list_del(&cq->entry); |
660 | __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, | 709 | __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, |
661 | smp_processor_id())); | 710 | smp_processor_id())); |
662 | } | 711 | } |
663 | 712 | ||
664 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 713 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); |
@@ -708,14 +757,14 @@ static int comp_pool_callback(struct notifier_block *nfb, | |||
708 | return NOTIFY_OK; | 757 | return NOTIFY_OK; |
709 | } | 758 | } |
710 | 759 | ||
711 | #endif | ||
712 | |||
713 | int ehca_create_comp_pool(void) | 760 | int ehca_create_comp_pool(void) |
714 | { | 761 | { |
715 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
716 | int cpu; | 762 | int cpu; |
717 | struct task_struct *task; | 763 | struct task_struct *task; |
718 | 764 | ||
765 | if (!ehca_scaling_code) | ||
766 | return 0; | ||
767 | |||
719 | pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL); | 768 | pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL); |
720 | if (pool == NULL) | 769 | if (pool == NULL) |
721 | return -ENOMEM; | 770 | return -ENOMEM; |
@@ -740,16 +789,19 @@ int ehca_create_comp_pool(void) | |||
740 | comp_pool_callback_nb.notifier_call = comp_pool_callback; | 789 | comp_pool_callback_nb.notifier_call = comp_pool_callback; |
741 | comp_pool_callback_nb.priority =0; | 790 | comp_pool_callback_nb.priority =0; |
742 | register_cpu_notifier(&comp_pool_callback_nb); | 791 | register_cpu_notifier(&comp_pool_callback_nb); |
743 | #endif | 792 | |
793 | printk(KERN_INFO "eHCA scaling code enabled\n"); | ||
744 | 794 | ||
745 | return 0; | 795 | return 0; |
746 | } | 796 | } |
747 | 797 | ||
748 | void ehca_destroy_comp_pool(void) | 798 | void ehca_destroy_comp_pool(void) |
749 | { | 799 | { |
750 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
751 | int i; | 800 | int i; |
752 | 801 | ||
802 | if (!ehca_scaling_code) | ||
803 | return; | ||
804 | |||
753 | unregister_cpu_notifier(&comp_pool_callback_nb); | 805 | unregister_cpu_notifier(&comp_pool_callback_nb); |
754 | 806 | ||
755 | for (i = 0; i < NR_CPUS; i++) { | 807 | for (i = 0; i < NR_CPUS; i++) { |
@@ -758,7 +810,4 @@ void ehca_destroy_comp_pool(void) | |||
758 | } | 810 | } |
759 | free_percpu(pool->cpu_comp_tasks); | 811 | free_percpu(pool->cpu_comp_tasks); |
760 | kfree(pool); | 812 | kfree(pool); |
761 | #endif | ||
762 | |||
763 | return; | ||
764 | } | 813 | } |