aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac/edac_mc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac/edac_mc.c')
-rw-r--r--drivers/edac/edac_mc.c64
1 files changed, 13 insertions, 51 deletions
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 8adfc167c2e3..1472f48c8ac6 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -535,60 +535,21 @@ static void edac_mc_workq_function(struct work_struct *work_req)
535 535
536 mutex_lock(&mem_ctls_mutex); 536 mutex_lock(&mem_ctls_mutex);
537 537
538 /* if this control struct has movd to offline state, we are done */ 538 if (mci->op_state != OP_RUNNING_POLL) {
539 if (mci->op_state == OP_OFFLINE) {
540 mutex_unlock(&mem_ctls_mutex); 539 mutex_unlock(&mem_ctls_mutex);
541 return; 540 return;
542 } 541 }
543 542
544 /* Only poll controllers that are running polled and have a check */ 543 if (edac_mc_assert_error_check_and_clear())
545 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
546 mci->edac_check(mci); 544 mci->edac_check(mci);
547 545
548 mutex_unlock(&mem_ctls_mutex); 546 mutex_unlock(&mem_ctls_mutex);
549 547
550 /* Reschedule */ 548 /* Queue ourselves again. */
551 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); 549 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
552} 550}
553 551
554/* 552/*
555 * edac_mc_workq_setup
556 * initialize a workq item for this mci
557 * passing in the new delay period in msec
558 *
559 * locking model:
560 *
561 * called with the mem_ctls_mutex held
562 */
563static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
564{
565 edac_dbg(0, "\n");
566
567 /* if this instance is not in the POLL state, then simply return */
568 if (mci->op_state != OP_RUNNING_POLL)
569 return;
570
571 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
572
573 edac_queue_work(&mci->work, msecs_to_jiffies(msec));
574}
575
576/*
577 * edac_mc_workq_teardown
578 * stop the workq processing on this mci
579 *
580 * locking model:
581 *
582 * called WITHOUT lock held
583 */
584static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
585{
586 mci->op_state = OP_OFFLINE;
587
588 edac_stop_work(&mci->work);
589}
590
591/*
592 * edac_mc_reset_delay_period(unsigned long value) 553 * edac_mc_reset_delay_period(unsigned long value)
593 * 554 *
594 * user space has updated our poll period value, need to 555 * user space has updated our poll period value, need to
@@ -771,12 +732,12 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
771 goto fail1; 732 goto fail1;
772 } 733 }
773 734
774 /* If there IS a check routine, then we are running POLLED */ 735 if (mci->edac_check) {
775 if (mci->edac_check != NULL) {
776 /* This instance is NOW RUNNING */
777 mci->op_state = OP_RUNNING_POLL; 736 mci->op_state = OP_RUNNING_POLL;
778 737
779 edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); 738 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
739 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
740
780 } else { 741 } else {
781 mci->op_state = OP_RUNNING_INTERRUPT; 742 mci->op_state = OP_RUNNING_INTERRUPT;
782 } 743 }
@@ -823,15 +784,16 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
823 return NULL; 784 return NULL;
824 } 785 }
825 786
787 /* mark MCI offline: */
788 mci->op_state = OP_OFFLINE;
789
826 if (!del_mc_from_global_list(mci)) 790 if (!del_mc_from_global_list(mci))
827 edac_mc_owner = NULL; 791 edac_mc_owner = NULL;
828 mutex_unlock(&mem_ctls_mutex);
829 792
830 /* flush workq processes */ 793 mutex_unlock(&mem_ctls_mutex);
831 edac_mc_workq_teardown(mci);
832 794
833 /* marking MCI offline */ 795 if (mci->edac_check)
834 mci->op_state = OP_OFFLINE; 796 edac_stop_work(&mci->work);
835 797
836 /* remove from sysfs */ 798 /* remove from sysfs */
837 edac_remove_sysfs_mci_device(mci); 799 edac_remove_sysfs_mci_device(mci);