aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac/edac_mc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac/edac_mc.c')
-rw-r--r--drivers/edac/edac_mc.c64
1 files changed, 42 insertions, 22 deletions
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 4471be362599..063a1bffe38b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -214,6 +214,13 @@ void edac_mc_free(struct mem_ctl_info *mci)
214} 214}
215EXPORT_SYMBOL_GPL(edac_mc_free); 215EXPORT_SYMBOL_GPL(edac_mc_free);
216 216
217
218/*
219 * find_mci_by_dev
220 *
221 * scan list of controllers looking for the one that manages
222 * the 'dev' device
223 */
217static struct mem_ctl_info *find_mci_by_dev(struct device *dev) 224static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
218{ 225{
219 struct mem_ctl_info *mci; 226 struct mem_ctl_info *mci;
@@ -268,12 +275,6 @@ static void edac_mc_workq_function(struct work_struct *work_req)
268 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL)) 275 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
269 mci->edac_check(mci); 276 mci->edac_check(mci);
270 277
271 /*
272 * FIXME: temp place holder for PCI checks,
273 * goes away when we break out PCI
274 */
275 edac_pci_do_parity_check();
276
277 mutex_unlock(&mem_ctls_mutex); 278 mutex_unlock(&mem_ctls_mutex);
278 279
279 /* Reschedule */ 280 /* Reschedule */
@@ -314,36 +315,55 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
314{ 315{
315 int status; 316 int status;
316 317
317 /* if not running POLL, leave now */ 318 status = cancel_delayed_work(&mci->work);
318 if (mci->op_state == OP_RUNNING_POLL) { 319 if (status == 0) {
319 status = cancel_delayed_work(&mci->work); 320 debugf0("%s() not canceled, flush the queue\n",
320 if (status == 0) { 321 __func__);
321 debugf0("%s() not canceled, flush the queue\n",
322 __func__);
323 322
324 /* workq instance might be running, wait for it */ 323 /* workq instance might be running, wait for it */
325 flush_workqueue(edac_workqueue); 324 flush_workqueue(edac_workqueue);
326 }
327 } 325 }
328} 326}
329 327
330/* 328/*
331 * edac_reset_delay_period 329 * edac_mc_reset_delay_period(unsigned long value)
330 *
331 * user space has updated our poll period value, need to
332 * reset our workq delays
332 */ 333 */
333static void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value) 334void edac_mc_reset_delay_period(int value)
334{ 335{
335 /* cancel the current workq request */ 336 struct mem_ctl_info *mci;
336 edac_mc_workq_teardown(mci); 337 struct list_head *item;
337 338
338 /* lock the list of devices for the new setup */
339 mutex_lock(&mem_ctls_mutex); 339 mutex_lock(&mem_ctls_mutex);
340 340
341 /* restart the workq request, with new delay value */ 341 /* scan the list and turn off all workq timers, doing so under lock
342 edac_mc_workq_setup(mci, value); 342 */
343 list_for_each(item, &mc_devices) {
344 mci = list_entry(item, struct mem_ctl_info, link);
345
346 if (mci->op_state == OP_RUNNING_POLL)
347 cancel_delayed_work(&mci->work);
348 }
349
350 mutex_unlock(&mem_ctls_mutex);
351
352
353 /* re-walk the list, and reset the poll delay */
354 mutex_lock(&mem_ctls_mutex);
355
356 list_for_each(item, &mc_devices) {
357 mci = list_entry(item, struct mem_ctl_info, link);
358
359 edac_mc_workq_setup(mci, (unsigned long) value);
360 }
343 361
344 mutex_unlock(&mem_ctls_mutex); 362 mutex_unlock(&mem_ctls_mutex);
345} 363}
346 364
365
366
347/* Return 0 on success, 1 on failure. 367/* Return 0 on success, 1 on failure.
348 * Before calling this function, caller must 368 * Before calling this function, caller must
349 * assign a unique value to mci->mc_idx. 369 * assign a unique value to mci->mc_idx.