diff options
Diffstat (limited to 'drivers/edac/edac_mc.c')
-rw-r--r-- | drivers/edac/edac_mc.c | 119 |
1 files changed, 119 insertions, 0 deletions
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index d324e1eadd3c..3474ca9d90a4 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -184,6 +184,8 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, | |||
184 | } | 184 | } |
185 | } | 185 | } |
186 | 186 | ||
187 | mci->op_state = OP_ALLOC; | ||
188 | |||
187 | return mci; | 189 | return mci; |
188 | } | 190 | } |
189 | EXPORT_SYMBOL_GPL(edac_mc_alloc); | 191 | EXPORT_SYMBOL_GPL(edac_mc_alloc); |
@@ -215,6 +217,107 @@ static struct mem_ctl_info *find_mci_by_dev(struct device *dev) | |||
215 | return NULL; | 217 | return NULL; |
216 | } | 218 | } |
217 | 219 | ||
220 | /* | ||
221 | * handler for EDAC to check if NMI type handler has asserted interrupt | ||
222 | */ | ||
223 | static int edac_mc_assert_error_check_and_clear(void) | ||
224 | { | ||
225 | int vreg; | ||
226 | |||
227 | if(edac_op_state == EDAC_OPSTATE_POLL) | ||
228 | return 1; | ||
229 | |||
230 | vreg = atomic_read(&edac_err_assert); | ||
231 | if(vreg) { | ||
232 | atomic_set(&edac_err_assert, 0); | ||
233 | return 1; | ||
234 | } | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * edac_mc_workq_function | ||
241 | * performs the operation scheduled by a workq request | ||
242 | */ | ||
243 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) | ||
244 | static void edac_mc_workq_function(struct work_struct *work_req) | ||
245 | { | ||
246 | struct delayed_work *d_work = (struct delayed_work*) work_req; | ||
247 | struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); | ||
248 | #else | ||
249 | static void edac_mc_workq_function(void *ptr) | ||
250 | { | ||
251 | struct mem_ctl_info *mci = (struct mem_ctl_info *) ptr; | ||
252 | #endif | ||
253 | |||
254 | mutex_lock(&mem_ctls_mutex); | ||
255 | |||
256 | /* Only poll controllers that are running polled and have a check */ | ||
257 | if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL)) | ||
258 | mci->edac_check(mci); | ||
259 | |||
260 | /* | ||
261 | * FIXME: temp place holder for PCI checks, | ||
262 | * goes away when we break out PCI | ||
263 | */ | ||
264 | edac_pci_do_parity_check(); | ||
265 | |||
266 | mutex_unlock(&mem_ctls_mutex); | ||
267 | |||
268 | /* Reschedule */ | ||
269 | queue_delayed_work(edac_workqueue, &mci->work, edac_mc_get_poll_msec()); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * edac_mc_workq_setup | ||
274 | * initialize a workq item for this mci | ||
275 | * passing in the new delay period in msec | ||
276 | */ | ||
277 | void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) | ||
278 | { | ||
279 | debugf0("%s()\n", __func__); | ||
280 | |||
281 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) | ||
282 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); | ||
283 | #else | ||
284 | INIT_WORK(&mci->work, edac_mc_workq_function, mci); | ||
285 | #endif | ||
286 | queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * edac_mc_workq_teardown | ||
291 | * stop the workq processing on this mci | ||
292 | */ | ||
293 | void edac_mc_workq_teardown(struct mem_ctl_info *mci) | ||
294 | { | ||
295 | int status; | ||
296 | |||
297 | status = cancel_delayed_work(&mci->work); | ||
298 | if (status == 0) { | ||
299 | /* workq instance might be running, wait for it */ | ||
300 | flush_workqueue(edac_workqueue); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * edac_reset_delay_period | ||
306 | */ | ||
307 | |||
308 | void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value) | ||
309 | { | ||
310 | mutex_lock(&mem_ctls_mutex); | ||
311 | |||
312 | /* cancel the current workq request */ | ||
313 | edac_mc_workq_teardown(mci); | ||
314 | |||
315 | /* restart the workq request, with new delay value */ | ||
316 | edac_mc_workq_setup(mci, value); | ||
317 | |||
318 | mutex_unlock(&mem_ctls_mutex); | ||
319 | } | ||
320 | |||
218 | /* Return 0 on success, 1 on failure. | 321 | /* Return 0 on success, 1 on failure. |
219 | * Before calling this function, caller must | 322 | * Before calling this function, caller must |
220 | * assign a unique value to mci->mc_idx. | 323 | * assign a unique value to mci->mc_idx. |
@@ -351,6 +454,16 @@ int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx) | |||
351 | goto fail1; | 454 | goto fail1; |
352 | } | 455 | } |
353 | 456 | ||
457 | /* If there IS a check routine, then we are running POLLED */ | ||
458 | if (mci->edac_check != NULL) { | ||
459 | /* This instance is NOW RUNNING */ | ||
460 | mci->op_state = OP_RUNNING_POLL; | ||
461 | |||
462 | edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); | ||
463 | } else { | ||
464 | mci->op_state = OP_RUNNING_INTERRUPT; | ||
465 | } | ||
466 | |||
354 | /* Report action taken */ | 467 | /* Report action taken */ |
355 | edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n", | 468 | edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n", |
356 | mci->mod_name, mci->ctl_name, dev_name(mci)); | 469 | mci->mod_name, mci->ctl_name, dev_name(mci)); |
@@ -386,6 +499,12 @@ struct mem_ctl_info * edac_mc_del_mc(struct device *dev) | |||
386 | return NULL; | 499 | return NULL; |
387 | } | 500 | } |
388 | 501 | ||
502 | /* marking MCI offline */ | ||
503 | mci->op_state = OP_OFFLINE; | ||
504 | |||
505 | /* flush workq processes */ | ||
506 | edac_mc_workq_teardown(mci); | ||
507 | |||
389 | edac_remove_sysfs_mci_device(mci); | 508 | edac_remove_sysfs_mci_device(mci); |
390 | del_mc_from_global_list(mci); | 509 | del_mc_from_global_list(mci); |
391 | mutex_unlock(&mem_ctls_mutex); | 510 | mutex_unlock(&mem_ctls_mutex); |