aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2016-10-19 08:07:20 -0400
committerDoug Ledford <dledford@redhat.com>2016-12-14 12:16:11 -0500
commitf5eabf5e5129e8ab5b3e7f50b24444aca1680e64 (patch)
treed4e67e68a300111713fb5a12132bd060733b8687
parent6efaf10f163d9a60d1d4b2a049b194a53537ba1b (diff)
IB/rdmavt: Handle the kthread worker using the new API
Use the new API to create and destroy the cq kthread worker. The API hides some implementation details. In particular, kthread_create_worker() allocates and initializes struct kthread_worker. It runs the kthread the right way and stores task_struct into the worker structure. In addition, the *on_cpu() variant binds the kthread to the given cpu and the related memory node. kthread_destroy_worker() flushes all pending works, stops the kthread and frees the structure. This patch does not change the existing behavior. Note that we must use the on_cpu() variant because the function starts the kthread and it must bind it to the right CPU before waking. The numa node is associated for given CPU as well. Signed-off-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c34
1 files changed, 11 insertions, 23 deletions
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 223ec4589fc7..4d0b6992e847 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -503,33 +503,23 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
503 */ 503 */
504int rvt_driver_cq_init(struct rvt_dev_info *rdi) 504int rvt_driver_cq_init(struct rvt_dev_info *rdi)
505{ 505{
506 int ret = 0;
507 int cpu; 506 int cpu;
508 struct task_struct *task; 507 struct kthread_worker *worker;
509 508
510 if (rdi->worker) 509 if (rdi->worker)
511 return 0; 510 return 0;
511
512 spin_lock_init(&rdi->n_cqs_lock); 512 spin_lock_init(&rdi->n_cqs_lock);
513 rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
514 if (!rdi->worker)
515 return -ENOMEM;
516 kthread_init_worker(rdi->worker);
517 task = kthread_create_on_node(
518 kthread_worker_fn,
519 rdi->worker,
520 rdi->dparms.node,
521 "%s", rdi->dparms.cq_name);
522 if (IS_ERR(task)) {
523 kfree(rdi->worker);
524 rdi->worker = NULL;
525 return PTR_ERR(task);
526 }
527 513
528 set_user_nice(task, MIN_NICE);
529 cpu = cpumask_first(cpumask_of_node(rdi->dparms.node)); 514 cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
530 kthread_bind(task, cpu); 515 worker = kthread_create_worker_on_cpu(cpu, 0,
531 wake_up_process(task); 516 "%s", rdi->dparms.cq_name);
532 return ret; 517 if (IS_ERR(worker))
518 return PTR_ERR(worker);
519
520 set_user_nice(worker->task, MIN_NICE);
521 rdi->worker = worker;
522 return 0;
533} 523}
534 524
535/** 525/**
@@ -549,7 +539,5 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
549 rdi->worker = NULL; 539 rdi->worker = NULL;
550 spin_unlock_irq(&rdi->n_cqs_lock); 540 spin_unlock_irq(&rdi->n_cqs_lock);
551 541
552 kthread_flush_worker(worker); 542 kthread_destroy_worker(worker);
553 kthread_stop(worker->task);
554 kfree(worker);
555} 543}