aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiu, Jinsong <jinsong.liu@intel.com>2012-06-12 11:11:16 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-07-19 15:51:40 -0400
commit1b2a05516e42149a5e9f0f5aeba2c7fa9574b3f4 (patch)
treeb3ebd18e79b2ef3db67dc9d457898c1d791d0fca
parentf65c9bb3fb725551d3e405f4d092caf24929cebe (diff)
xen/mce: schedule a workqueue to avoid sleep in atomic context
copy_to_user might sleep and print a stack trace if it is executed in an atomic spinlock context. Like this: (XEN) CMCI: send CMCI to DOM0 through virq BUG: sleeping function called from invalid context at /home/konradinux/kernel.h:199 in_atomic(): 1, irqs_disabled(): 0, pid: 4581, name: mcelog Pid: 4581, comm: mcelog Tainted: G O 3.5.0-rc1upstream-00003-g149000b-dirty #1 [<ffffffff8109ad9a>] __might_sleep+0xda/0x100 [<ffffffff81329b0b>] xen_mce_chrdev_read+0xab/0x140 [<ffffffff81148945>] vfs_read+0xc5/0x190 [<ffffffff81148b0c>] sys_read+0x4c/0x90 [<ffffffff815bd039>] system_call_fastpath+0x16 This patch schedule a workqueue for IRQ handler to poll the data, and use mutex instead of spinlock, so copy_to_user sleep in atomic context would not occur. Reported-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Suggested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--drivers/xen/mcelog.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index 72e87d2f1929..804aa3c181c2 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -55,7 +55,7 @@ static struct mc_info g_mi;
55static struct mcinfo_logical_cpu *g_physinfo; 55static struct mcinfo_logical_cpu *g_physinfo;
56static uint32_t ncpus; 56static uint32_t ncpus;
57 57
58static DEFINE_SPINLOCK(mcelog_lock); 58static DEFINE_MUTEX(mcelog_lock);
59 59
60static struct xen_mce_log xen_mcelog = { 60static struct xen_mce_log xen_mcelog = {
61 .signature = XEN_MCE_LOG_SIGNATURE, 61 .signature = XEN_MCE_LOG_SIGNATURE,
@@ -106,7 +106,7 @@ static ssize_t xen_mce_chrdev_read(struct file *filp, char __user *ubuf,
106 unsigned num; 106 unsigned num;
107 int i, err; 107 int i, err;
108 108
109 spin_lock(&mcelog_lock); 109 mutex_lock(&mcelog_lock);
110 110
111 num = xen_mcelog.next; 111 num = xen_mcelog.next;
112 112
@@ -130,7 +130,7 @@ static ssize_t xen_mce_chrdev_read(struct file *filp, char __user *ubuf,
130 err = -EFAULT; 130 err = -EFAULT;
131 131
132out: 132out:
133 spin_unlock(&mcelog_lock); 133 mutex_unlock(&mcelog_lock);
134 134
135 return err ? err : buf - ubuf; 135 return err ? err : buf - ubuf;
136} 136}
@@ -310,12 +310,11 @@ static int mc_queue_handle(uint32_t flags)
310} 310}
311 311
312/* virq handler for machine check error info*/ 312/* virq handler for machine check error info*/
313static irqreturn_t xen_mce_interrupt(int irq, void *dev_id) 313static void xen_mce_work_fn(struct work_struct *work)
314{ 314{
315 int err; 315 int err;
316 unsigned long tmp;
317 316
318 spin_lock_irqsave(&mcelog_lock, tmp); 317 mutex_lock(&mcelog_lock);
319 318
320 /* urgent mc_info */ 319 /* urgent mc_info */
321 err = mc_queue_handle(XEN_MC_URGENT); 320 err = mc_queue_handle(XEN_MC_URGENT);
@@ -330,8 +329,13 @@ static irqreturn_t xen_mce_interrupt(int irq, void *dev_id)
330 pr_err(XEN_MCELOG 329 pr_err(XEN_MCELOG
331 "Failed to handle nonurgent mc_info queue.\n"); 330 "Failed to handle nonurgent mc_info queue.\n");
332 331
333 spin_unlock_irqrestore(&mcelog_lock, tmp); 332 mutex_unlock(&mcelog_lock);
333}
334static DECLARE_WORK(xen_mce_work, xen_mce_work_fn);
334 335
336static irqreturn_t xen_mce_interrupt(int irq, void *dev_id)
337{
338 schedule_work(&xen_mce_work);
335 return IRQ_HANDLED; 339 return IRQ_HANDLED;
336} 340}
337 341