diff options
| author | Dave Jiang <dave.jiang@intel.com> | 2016-03-18 19:39:47 -0400 |
|---|---|---|
| committer | Jon Mason <jdmason@kudzu.us> | 2016-03-21 19:28:30 -0400 |
| commit | 838850ee0bb97fc60ca8f1de3bf12ed0854f6173 (patch) | |
| tree | 491ac2ac043f47b83dea17dee54020115b76bfdc | |
| parent | ddc8f6feec76b5deea8090db015920a283006044 (diff) | |
NTB: Fix incorrect clean up routine in ntb_perf
The clean up routine when we failed to allocate kthread is not cleaning
up all the threads, only the same one over and over again.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Allen Hubbe <Allen.Hubbe@emc.com>
Signed-off-by: Jon Mason <jdmason@kudzu.us>
| -rw-r--r-- | drivers/ntb/test/ntb_perf.c | 42 |
1 files changed, 24 insertions, 18 deletions
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index d82d10791e7d..8dfce9c9aad0 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c | |||
| @@ -559,6 +559,21 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, | |||
| 559 | return ret; | 559 | return ret; |
| 560 | } | 560 | } |
| 561 | 561 | ||
| 562 | static void threads_cleanup(struct perf_ctx *perf) | ||
| 563 | { | ||
| 564 | struct pthr_ctx *pctx; | ||
| 565 | int i; | ||
| 566 | |||
| 567 | perf->run = false; | ||
| 568 | for (i = 0; i < MAX_THREADS; i++) { | ||
| 569 | pctx = &perf->pthr_ctx[i]; | ||
| 570 | if (pctx->thread) { | ||
| 571 | kthread_stop(pctx->thread); | ||
| 572 | pctx->thread = NULL; | ||
| 573 | } | ||
| 574 | } | ||
| 575 | } | ||
| 576 | |||
| 562 | static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, | 577 | static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, |
| 563 | size_t count, loff_t *offp) | 578 | size_t count, loff_t *offp) |
| 564 | { | 579 | { |
| @@ -574,17 +589,9 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, | |||
| 574 | if (atomic_read(&perf->tsync) == 0) | 589 | if (atomic_read(&perf->tsync) == 0) |
| 575 | perf->run = false; | 590 | perf->run = false; |
| 576 | 591 | ||
| 577 | if (perf->run) { | 592 | if (perf->run) |
| 578 | /* lets stop the threads */ | 593 | threads_cleanup(perf); |
| 579 | perf->run = false; | 594 | else { |
| 580 | for (i = 0; i < MAX_THREADS; i++) { | ||
| 581 | if (perf->pthr_ctx[i].thread) { | ||
| 582 | kthread_stop(perf->pthr_ctx[i].thread); | ||
| 583 | perf->pthr_ctx[i].thread = NULL; | ||
| 584 | } else | ||
| 585 | break; | ||
| 586 | } | ||
| 587 | } else { | ||
| 588 | perf->run = true; | 595 | perf->run = true; |
| 589 | 596 | ||
| 590 | if (perf->perf_threads > MAX_THREADS) { | 597 | if (perf->perf_threads > MAX_THREADS) { |
| @@ -616,13 +623,8 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, | |||
| 616 | (void *)pctx, | 623 | (void *)pctx, |
| 617 | node, "ntb_perf %d", i); | 624 | node, "ntb_perf %d", i); |
| 618 | if (IS_ERR(pctx->thread)) { | 625 | if (IS_ERR(pctx->thread)) { |
| 619 | perf->run = false; | 626 | pctx->thread = NULL; |
| 620 | for (i = 0; i < MAX_THREADS; i++) { | 627 | goto err; |
| 621 | if (pctx->thread) { | ||
| 622 | kthread_stop(pctx->thread); | ||
| 623 | pctx->thread = NULL; | ||
| 624 | } | ||
| 625 | } | ||
| 626 | } else | 628 | } else |
| 627 | wake_up_process(pctx->thread); | 629 | wake_up_process(pctx->thread); |
| 628 | 630 | ||
| @@ -633,6 +635,10 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, | |||
| 633 | } | 635 | } |
| 634 | 636 | ||
| 635 | return count; | 637 | return count; |
| 638 | |||
| 639 | err: | ||
| 640 | threads_cleanup(perf); | ||
| 641 | return -ENXIO; | ||
| 636 | } | 642 | } |
| 637 | 643 | ||
| 638 | static const struct file_operations ntb_perf_debugfs_run = { | 644 | static const struct file_operations ntb_perf_debugfs_run = { |
