aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/edac/edac_core.h14
-rw-r--r--drivers/edac/edac_device.c36
-rw-r--r--drivers/edac/edac_mc.c119
-rw-r--r--drivers/edac/edac_mc_sysfs.c14
-rw-r--r--drivers/edac/edac_module.c86
-rw-r--r--drivers/edac/edac_module.h5
6 files changed, 177 insertions, 97 deletions
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index f34ebb609d55..b73d659a4bb2 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -382,6 +382,15 @@ struct mem_ctl_info {
382 /* edac sysfs device control */ 382 /* edac sysfs device control */
383 struct kobject edac_mci_kobj; 383 struct kobject edac_mci_kobj;
384 struct completion kobj_complete; 384 struct completion kobj_complete;
385
386 /* work struct for this MC */
387#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
388 struct delayed_work work;
389#else
390 struct work_struct work;
391#endif
392 /* the internal state of this controller instance */
393 int op_state;
385}; 394};
386 395
387/* 396/*
@@ -573,6 +582,9 @@ struct edac_device_ctl_info {
573}; 582};
574 583
575/* To get from the instance's wq to the beginning of the ctl structure */ 584/* To get from the instance's wq to the beginning of the ctl structure */
585#define to_edac_mem_ctl_work(w) \
586 container_of(w, struct mem_ctl_info, work)
587
576#define to_edac_device_ctl_work(w) \ 588#define to_edac_device_ctl_work(w) \
577 container_of(w,struct edac_device_ctl_info,work) 589 container_of(w,struct edac_device_ctl_info,work)
578 590
@@ -584,6 +596,8 @@ static inline void edac_device_calc_delay(
584 edac_dev->delay = edac_dev->poll_msec * HZ / 1000; 596 edac_dev->delay = edac_dev->poll_msec * HZ / 1000;
585} 597}
586 598
599#define edac_calc_delay(dev) dev->delay = dev->poll_msec * HZ / 1000;
600
587/* 601/*
588 * The alloc() and free() functions for the 'edac_device' control info 602 * The alloc() and free() functions for the 'edac_device' control info
589 * structure. A MC driver will allocate one of these for each edac_device 603 * structure. A MC driver will allocate one of these for each edac_device
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 52db1b14fff5..3f4c8a28154a 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -332,17 +332,17 @@ EXPORT_SYMBOL(edac_device_find);
332 332
333 333
334/* 334/*
335 * edac_workq_function 335 * edac_device_workq_function
336 * performs the operation scheduled by a workq request 336 * performs the operation scheduled by a workq request
337 */ 337 */
338#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) 338#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
339static void edac_workq_function(struct work_struct *work_req) 339static void edac_device_workq_function(struct work_struct *work_req)
340{ 340{
341 struct delayed_work *d_work = (struct delayed_work*) work_req; 341 struct delayed_work *d_work = (struct delayed_work*) work_req;
342 struct edac_device_ctl_info *edac_dev = 342 struct edac_device_ctl_info *edac_dev =
343 to_edac_device_ctl_work(d_work); 343 to_edac_device_ctl_work(d_work);
344#else 344#else
345static void edac_workq_function(void *ptr) 345static void edac_device_workq_function(void *ptr)
346{ 346{
347 struct edac_device_ctl_info *edac_dev = 347 struct edac_device_ctl_info *edac_dev =
348 (struct edac_device_ctl_info *) ptr; 348 (struct edac_device_ctl_info *) ptr;
@@ -364,30 +364,31 @@ static void edac_workq_function(void *ptr)
364} 364}
365 365
366/* 366/*
367 * edac_workq_setup 367 * edac_device_workq_setup
368 * initialize a workq item for this edac_device instance 368 * initialize a workq item for this edac_device instance
369 * passing in the new delay period in msec 369 * passing in the new delay period in msec
370 */ 370 */
371void edac_workq_setup(struct edac_device_ctl_info *edac_dev, unsigned msec) 371void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
372 unsigned msec)
372{ 373{
373 debugf0("%s()\n", __func__); 374 debugf0("%s()\n", __func__);
374 375
375 edac_dev->poll_msec = msec; 376 edac_dev->poll_msec = msec;
376 edac_device_calc_delay(edac_dev); /* Calc delay jiffies */ 377 edac_calc_delay(edac_dev); /* Calc delay jiffies */
377 378
378#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) 379#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
379 INIT_DELAYED_WORK(&edac_dev->work,edac_workq_function); 380 INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
380#else 381#else
381 INIT_WORK(&edac_dev->work,edac_workq_function,edac_dev); 382 INIT_WORK(&edac_dev->work, edac_device_workq_function, edac_dev);
382#endif 383#endif
383 queue_delayed_work(edac_workqueue,&edac_dev->work, edac_dev->delay); 384 queue_delayed_work(edac_workqueue, &edac_dev->work, edac_dev->delay);
384} 385}
385 386
386/* 387/*
387 * edac_workq_teardown 388 * edac_device_workq_teardown
388 * stop the workq processing on this edac_dev 389 * stop the workq processing on this edac_dev
389 */ 390 */
390void edac_workq_teardown(struct edac_device_ctl_info *edac_dev) 391void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
391{ 392{
392 int status; 393 int status;
393 394
@@ -409,10 +410,10 @@ void edac_device_reset_delay_period(
409 lock_device_list(); 410 lock_device_list();
410 411
411 /* cancel the current workq request */ 412 /* cancel the current workq request */
412 edac_workq_teardown(edac_dev); 413 edac_device_workq_teardown(edac_dev);
413 414
414 /* restart the workq request, with new delay value */ 415 /* restart the workq request, with new delay value */
415 edac_workq_setup(edac_dev, value); 416 edac_device_workq_setup(edac_dev, value);
416 417
417 unlock_device_list(); 418 unlock_device_list();
418} 419}
@@ -479,8 +480,11 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev, int edac_idx)
479 /* This instance is NOW RUNNING */ 480 /* This instance is NOW RUNNING */
480 edac_dev->op_state = OP_RUNNING_POLL; 481 edac_dev->op_state = OP_RUNNING_POLL;
481 482
482 /* enable workq processing on this instance, default = 1000 msec */ 483 /*
483 edac_workq_setup(edac_dev, 1000); 484 * enable workq processing on this instance,
485 * default = 1000 msec
486 */
487 edac_device_workq_setup(edac_dev, 1000);
484 } else { 488 } else {
485 edac_dev->op_state = OP_RUNNING_INTERRUPT; 489 edac_dev->op_state = OP_RUNNING_INTERRUPT;
486 } 490 }
@@ -538,7 +542,7 @@ struct edac_device_ctl_info * edac_device_del_device(struct device *dev)
538 edac_dev->op_state = OP_OFFLINE; 542 edac_dev->op_state = OP_OFFLINE;
539 543
540 /* clear workq processing on this instance */ 544 /* clear workq processing on this instance */
541 edac_workq_teardown(edac_dev); 545 edac_device_workq_teardown(edac_dev);
542 546
543 /* Tear down the sysfs entries for this instance */ 547 /* Tear down the sysfs entries for this instance */
544 edac_device_remove_sysfs(edac_dev); 548 edac_device_remove_sysfs(edac_dev);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d324e1eadd3c..3474ca9d90a4 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -184,6 +184,8 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
184 } 184 }
185 } 185 }
186 186
187 mci->op_state = OP_ALLOC;
188
187 return mci; 189 return mci;
188} 190}
189EXPORT_SYMBOL_GPL(edac_mc_alloc); 191EXPORT_SYMBOL_GPL(edac_mc_alloc);
@@ -215,6 +217,107 @@ static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
215 return NULL; 217 return NULL;
216} 218}
217 219
220/*
221 * handler for EDAC to check if NMI type handler has asserted interrupt
222 */
223static int edac_mc_assert_error_check_and_clear(void)
224{
225 int vreg;
226
227 if(edac_op_state == EDAC_OPSTATE_POLL)
228 return 1;
229
230 vreg = atomic_read(&edac_err_assert);
231 if(vreg) {
232 atomic_set(&edac_err_assert, 0);
233 return 1;
234 }
235
236 return 0;
237}
238
239/*
240 * edac_mc_workq_function
241 * performs the operation scheduled by a workq request
242 */
243#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
244static void edac_mc_workq_function(struct work_struct *work_req)
245{
246 struct delayed_work *d_work = (struct delayed_work*) work_req;
247 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
248#else
249static void edac_mc_workq_function(void *ptr)
250{
251 struct mem_ctl_info *mci = (struct mem_ctl_info *) ptr;
252#endif
253
254 mutex_lock(&mem_ctls_mutex);
255
256 /* Only poll controllers that are running polled and have a check */
257 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
258 mci->edac_check(mci);
259
260 /*
261 * FIXME: temp place holder for PCI checks,
262 * goes away when we break out PCI
263 */
264 edac_pci_do_parity_check();
265
266 mutex_unlock(&mem_ctls_mutex);
267
268 /* Reschedule */
269 queue_delayed_work(edac_workqueue, &mci->work, edac_mc_get_poll_msec());
270}
271
272/*
273 * edac_mc_workq_setup
274 * initialize a workq item for this mci
275 * passing in the new delay period in msec
276 */
277void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
278{
279 debugf0("%s()\n", __func__);
280
281#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
282 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
283#else
284 INIT_WORK(&mci->work, edac_mc_workq_function, mci);
285#endif
286 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
287}
288
289/*
290 * edac_mc_workq_teardown
291 * stop the workq processing on this mci
292 */
293void edac_mc_workq_teardown(struct mem_ctl_info *mci)
294{
295 int status;
296
297 status = cancel_delayed_work(&mci->work);
298 if (status == 0) {
299 /* workq instance might be running, wait for it */
300 flush_workqueue(edac_workqueue);
301 }
302}
303
304/*
305 * edac_reset_delay_period
306 */
307
308void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
309{
310 mutex_lock(&mem_ctls_mutex);
311
312 /* cancel the current workq request */
313 edac_mc_workq_teardown(mci);
314
315 /* restart the workq request, with new delay value */
316 edac_mc_workq_setup(mci, value);
317
318 mutex_unlock(&mem_ctls_mutex);
319}
320
218/* Return 0 on success, 1 on failure. 321/* Return 0 on success, 1 on failure.
219 * Before calling this function, caller must 322 * Before calling this function, caller must
220 * assign a unique value to mci->mc_idx. 323 * assign a unique value to mci->mc_idx.
@@ -351,6 +454,16 @@ int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
351 goto fail1; 454 goto fail1;
352 } 455 }
353 456
457 /* If there IS a check routine, then we are running POLLED */
458 if (mci->edac_check != NULL) {
459 /* This instance is NOW RUNNING */
460 mci->op_state = OP_RUNNING_POLL;
461
462 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
463 } else {
464 mci->op_state = OP_RUNNING_INTERRUPT;
465 }
466
354 /* Report action taken */ 467 /* Report action taken */
355 edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n", 468 edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n",
356 mci->mod_name, mci->ctl_name, dev_name(mci)); 469 mci->mod_name, mci->ctl_name, dev_name(mci));
@@ -386,6 +499,12 @@ struct mem_ctl_info * edac_mc_del_mc(struct device *dev)
386 return NULL; 499 return NULL;
387 } 500 }
388 501
502 /* marking MCI offline */
503 mci->op_state = OP_OFFLINE;
504
505 /* flush workq processes */
506 edac_mc_workq_teardown(mci);
507
389 edac_remove_sysfs_mci_device(mci); 508 edac_remove_sysfs_mci_device(mci);
390 del_mc_from_global_list(mci); 509 del_mc_from_global_list(mci);
391 mutex_unlock(&mem_ctls_mutex); 510 mutex_unlock(&mem_ctls_mutex);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 6e2785bd011c..6b2217b741fb 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -22,22 +22,28 @@ static int panic_on_ue;
22static int poll_msec = 1000; 22static int poll_msec = 1000;
23 23
24/* Getter functions for above */ 24/* Getter functions for above */
25int edac_get_log_ue() 25int edac_get_log_ue(void)
26{ 26{
27 return log_ue; 27 return log_ue;
28} 28}
29 29
30int edac_get_log_ce() 30int edac_get_log_ce(void)
31{ 31{
32 return log_ce; 32 return log_ce;
33} 33}
34 34
35int edac_get_panic_on_ue() 35int edac_get_panic_on_ue(void)
36{ 36{
37 return panic_on_ue; 37 return panic_on_ue;
38} 38}
39 39
40int edac_get_poll_msec() 40/* this is temporary */
41int edac_mc_get_poll_msec(void)
42{
43 return edac_get_poll_msec();
44}
45
46int edac_get_poll_msec(void)
41{ 47{
42 return poll_msec; 48 return poll_msec;
43} 49}
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 2f84f0d035be..dc900ed75178 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -1,6 +1,14 @@
1 1/*
2#include <linux/freezer.h> 2 * edac_module.c
3#include <linux/kthread.h> 3 *
4 * (C) 2007 www.douglaskthompson.com
5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any
7 * warranty of any kind, whether express or implied.
8 *
9 * Author: Doug Thompson <norsk5@xmission.com>
10 *
11 */
4#include <linux/edac.h> 12#include <linux/edac.h>
5 13
6#include "edac_core.h" 14#include "edac_core.h"
@@ -17,10 +25,6 @@ EXPORT_SYMBOL_GPL(edac_debug_level);
17/* scope is to module level only */ 25/* scope is to module level only */
18struct workqueue_struct *edac_workqueue; 26struct workqueue_struct *edac_workqueue;
19 27
20/* private to this file */
21static struct task_struct *edac_thread;
22
23
24/* 28/*
25 * sysfs object: /sys/devices/system/edac 29 * sysfs object: /sys/devices/system/edac
26 * need to export to other files in this modules 30 * need to export to other files in this modules
@@ -84,63 +88,6 @@ static void edac_unregister_sysfs_edac_name(void)
84 edac_class_valid = 0; 88 edac_class_valid = 0;
85} 89}
86 90
87
88/*
89 * Check MC status every edac_get_poll_msec().
90 * Check PCI status every edac_get_poll_msec() as well.
91 *
92 * This where the work gets done for edac.
93 *
94 * SMP safe, doesn't use NMI, and auto-rate-limits.
95 */
96static void do_edac_check(void)
97{
98 debugf3("%s()\n", __func__);
99
100 /* perform the poll activities */
101 edac_check_mc_devices();
102 edac_pci_do_parity_check();
103}
104
105/*
106 * handler for EDAC to check if NMI type handler has asserted interrupt
107 */
108static int edac_assert_error_check_and_clear(void)
109{
110 int vreg;
111
112 if(edac_op_state == EDAC_OPSTATE_POLL)
113 return 1;
114
115 vreg = atomic_read(&edac_err_assert);
116 if(vreg) {
117 atomic_set(&edac_err_assert, 0);
118 return 1;
119 }
120
121 return 0;
122}
123
124/*
125 * Action thread for EDAC to perform the POLL operations
126 */
127static int edac_kernel_thread(void *arg)
128{
129 int msec;
130
131 while (!kthread_should_stop()) {
132 if(edac_assert_error_check_and_clear())
133 do_edac_check();
134
135 /* goto sleep for the interval */
136 msec = (HZ * edac_get_poll_msec()) / 1000;
137 schedule_timeout_interruptible(msec);
138 try_to_freeze();
139 }
140
141 return 0;
142}
143
144/* 91/*
145 * edac_workqueue_setup 92 * edac_workqueue_setup
146 * initialize the edac work queue for polling operations 93 * initialize the edac work queue for polling operations
@@ -221,19 +168,9 @@ static int __init edac_init(void)
221 goto error_pci; 168 goto error_pci;
222 } 169 }
223 170
224 /* create our kernel thread */
225 edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac");
226
227 if (IS_ERR(edac_thread)) {
228 err = PTR_ERR(edac_thread);
229 goto error_work;
230 }
231
232 return 0; 171 return 0;
233 172
234 /* Error teardown stack */ 173 /* Error teardown stack */
235error_work:
236 edac_workqueue_teardown();
237error_pci: 174error_pci:
238 edac_sysfs_pci_teardown(); 175 edac_sysfs_pci_teardown();
239error_mem: 176error_mem:
@@ -251,7 +188,6 @@ error:
251static void __exit edac_exit(void) 188static void __exit edac_exit(void)
252{ 189{
253 debugf0("%s()\n", __func__); 190 debugf0("%s()\n", __func__);
254 kthread_stop(edac_thread);
255 191
256 /* tear down the various subsystems*/ 192 /* tear down the various subsystems*/
257 edac_workqueue_teardown(); 193 edac_workqueue_teardown();
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 2758d03c3e03..22c52e43131d 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -28,6 +28,7 @@ extern int edac_get_log_ue(void);
28extern int edac_get_log_ce(void); 28extern int edac_get_log_ce(void);
29extern int edac_get_panic_on_ue(void); 29extern int edac_get_panic_on_ue(void);
30extern int edac_get_poll_msec(void); 30extern int edac_get_poll_msec(void);
31extern int edac_mc_get_poll_msec(void);
31 32
32extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev); 33extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev);
33extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev); 34extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev);
@@ -35,9 +36,9 @@ extern struct sysdev_class *edac_get_edac_class(void);
35 36
36/* edac core workqueue: single CPU mode */ 37/* edac core workqueue: single CPU mode */
37extern struct workqueue_struct *edac_workqueue; 38extern struct workqueue_struct *edac_workqueue;
38extern void edac_workq_setup(struct edac_device_ctl_info *edac_dev, 39extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
39 unsigned msec); 40 unsigned msec);
40extern void edac_workq_teardown(struct edac_device_ctl_info *edac_dev); 41extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
41extern void edac_device_reset_delay_period( 42extern void edac_device_reset_delay_period(
42 struct edac_device_ctl_info *edac_dev, 43 struct edac_device_ctl_info *edac_dev,
43 unsigned long value); 44 unsigned long value);