diff options
Diffstat (limited to 'drivers/edac/edac_device.c')
-rw-r--r-- | drivers/edac/edac_device.c | 56 |
1 files changed, 47 insertions, 9 deletions
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 173f4ba0f7c8..7e3723768aca 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -32,7 +32,9 @@ | |||
32 | #include "edac_core.h" | 32 | #include "edac_core.h" |
33 | #include "edac_module.h" | 33 | #include "edac_module.h" |
34 | 34 | ||
35 | /* lock to memory controller's control array 'edac_device_list' */ | 35 | /* lock for the list: 'edac_device_list', manipulation of this list |
36 | * is protected by the 'device_ctls_mutex' lock | ||
37 | */ | ||
36 | static DEFINE_MUTEX(device_ctls_mutex); | 38 | static DEFINE_MUTEX(device_ctls_mutex); |
37 | static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list); | 39 | static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list); |
38 | 40 | ||
@@ -386,6 +388,14 @@ EXPORT_SYMBOL_GPL(edac_device_find); | |||
386 | /* | 388 | /* |
387 | * edac_device_workq_function | 389 | * edac_device_workq_function |
388 | * performs the operation scheduled by a workq request | 390 | * performs the operation scheduled by a workq request |
391 | * | ||
392 | * this workq is embedded within an edac_device_ctl_info | ||
393 | * structure, that needs to be polled for possible error events. | ||
394 | * | ||
395 | * This operation is to acquire the list mutex lock | ||
396 | * (thus preventing insertation or deletion) | ||
397 | * and then call the device's poll function IFF this device is | ||
398 | * running polled and there is a poll function defined. | ||
389 | */ | 399 | */ |
390 | static void edac_device_workq_function(struct work_struct *work_req) | 400 | static void edac_device_workq_function(struct work_struct *work_req) |
391 | { | 401 | { |
@@ -403,8 +413,17 @@ static void edac_device_workq_function(struct work_struct *work_req) | |||
403 | 413 | ||
404 | mutex_unlock(&device_ctls_mutex); | 414 | mutex_unlock(&device_ctls_mutex); |
405 | 415 | ||
406 | /* Reschedule */ | 416 | /* Reschedule the workq for the next time period to start again |
407 | queue_delayed_work(edac_workqueue, &edac_dev->work, edac_dev->delay); | 417 | * if the number of msec is for 1 sec, then adjust to the next |
418 | * whole one second to save timers fireing all over the period | ||
419 | * between integral seconds | ||
420 | */ | ||
421 | if (edac_dev->poll_msec == 1000) | ||
422 | queue_delayed_work(edac_workqueue, &edac_dev->work, | ||
423 | round_jiffies(edac_dev->delay)); | ||
424 | else | ||
425 | queue_delayed_work(edac_workqueue, &edac_dev->work, | ||
426 | edac_dev->delay); | ||
408 | } | 427 | } |
409 | 428 | ||
410 | /* | 429 | /* |
@@ -417,11 +436,26 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, | |||
417 | { | 436 | { |
418 | debugf0("%s()\n", __func__); | 437 | debugf0("%s()\n", __func__); |
419 | 438 | ||
439 | /* take the arg 'msec' and set it into the control structure | ||
440 | * to used in the time period calculation | ||
441 | * then calc the number of jiffies that represents | ||
442 | */ | ||
420 | edac_dev->poll_msec = msec; | 443 | edac_dev->poll_msec = msec; |
421 | edac_dev->delay = msecs_to_jiffies(msec); /* Calc delay jiffies */ | 444 | edac_dev->delay = msecs_to_jiffies(msec); |
422 | 445 | ||
423 | INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function); | 446 | INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function); |
424 | queue_delayed_work(edac_workqueue, &edac_dev->work, edac_dev->delay); | 447 | |
448 | /* optimize here for the 1 second case, which will be normal value, to | ||
449 | * fire ON the 1 second time event. This helps reduce all sorts of | ||
450 | * timers firing on sub-second basis, while they are happy | ||
451 | * to fire together on the 1 second exactly | ||
452 | */ | ||
453 | if (edac_dev->poll_msec == 1000) | ||
454 | queue_delayed_work(edac_workqueue, &edac_dev->work, | ||
455 | round_jiffies(edac_dev->delay)); | ||
456 | else | ||
457 | queue_delayed_work(edac_workqueue, &edac_dev->work, | ||
458 | edac_dev->delay); | ||
425 | } | 459 | } |
426 | 460 | ||
427 | /* | 461 | /* |
@@ -441,16 +475,20 @@ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) | |||
441 | 475 | ||
442 | /* | 476 | /* |
443 | * edac_device_reset_delay_period | 477 | * edac_device_reset_delay_period |
478 | * | ||
479 | * need to stop any outstanding workq queued up at this time | ||
480 | * because we will be resetting the sleep time. | ||
481 | * Then restart the workq on the new delay | ||
444 | */ | 482 | */ |
445 | |||
446 | void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, | 483 | void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, |
447 | unsigned long value) | 484 | unsigned long value) |
448 | { | 485 | { |
449 | mutex_lock(&device_ctls_mutex); | 486 | /* cancel the current workq request, without the mutex lock */ |
450 | |||
451 | /* cancel the current workq request */ | ||
452 | edac_device_workq_teardown(edac_dev); | 487 | edac_device_workq_teardown(edac_dev); |
453 | 488 | ||
489 | /* acquire the mutex before doing the workq setup */ | ||
490 | mutex_lock(&device_ctls_mutex); | ||
491 | |||
454 | /* restart the workq request, with new delay value */ | 492 | /* restart the workq request, with new delay value */ |
455 | edac_device_workq_setup(edac_dev, value); | 493 | edac_device_workq_setup(edac_dev, value); |
456 | 494 | ||