aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/printk.c
diff options
context:
space:
mode:
authorliu chuansheng <chuansheng.liu@intel.com>2012-07-06 12:50:08 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-07-06 12:50:08 -0400
commit5c53d819c71c63fdc91f30a59164583f68e2d63a (patch)
treee54c65ff9106c7ac29e9636310722db9d7bb170f /kernel/printk.c
parent6887a4131da3adaab011613776d865f4bcfb5678 (diff)
printk: replacing the raw_spin_lock/unlock with raw_spin_lock/unlock_irq
In function devkmsg_read/writev/llseek/poll/open()..., the function raw_spin_lock/unlock is used, there is potential deadlock case happening. CPU1: thread1 doing the cat /dev/kmsg: raw_spin_lock(&logbuf_lock); while (user->seq == log_next_seq) { when thread1 run here, at this time one interrupt is coming on CPU1 and running based on this thread,if the interrupt handle called the printk which need the logbuf_lock spin also, it will cause deadlock. So we should use raw_spin_lock/unlock_irq here. Acked-by: Kay Sievers <kay@vrfy.org> Signed-off-by: liu chuansheng <chuansheng.liu@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/printk.c')
-rw-r--r--kernel/printk.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/printk.c b/kernel/printk.c
index dba18211685e..12886cd19cd9 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -430,20 +430,20 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
430 ret = mutex_lock_interruptible(&user->lock); 430 ret = mutex_lock_interruptible(&user->lock);
431 if (ret) 431 if (ret)
432 return ret; 432 return ret;
433 raw_spin_lock(&logbuf_lock); 433 raw_spin_lock_irq(&logbuf_lock);
434 while (user->seq == log_next_seq) { 434 while (user->seq == log_next_seq) {
435 if (file->f_flags & O_NONBLOCK) { 435 if (file->f_flags & O_NONBLOCK) {
436 ret = -EAGAIN; 436 ret = -EAGAIN;
437 raw_spin_unlock(&logbuf_lock); 437 raw_spin_unlock_irq(&logbuf_lock);
438 goto out; 438 goto out;
439 } 439 }
440 440
441 raw_spin_unlock(&logbuf_lock); 441 raw_spin_unlock_irq(&logbuf_lock);
442 ret = wait_event_interruptible(log_wait, 442 ret = wait_event_interruptible(log_wait,
443 user->seq != log_next_seq); 443 user->seq != log_next_seq);
444 if (ret) 444 if (ret)
445 goto out; 445 goto out;
446 raw_spin_lock(&logbuf_lock); 446 raw_spin_lock_irq(&logbuf_lock);
447 } 447 }
448 448
449 if (user->seq < log_first_seq) { 449 if (user->seq < log_first_seq) {
@@ -451,7 +451,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
451 user->idx = log_first_idx; 451 user->idx = log_first_idx;
452 user->seq = log_first_seq; 452 user->seq = log_first_seq;
453 ret = -EPIPE; 453 ret = -EPIPE;
454 raw_spin_unlock(&logbuf_lock); 454 raw_spin_unlock_irq(&logbuf_lock);
455 goto out; 455 goto out;
456 } 456 }
457 457
@@ -501,7 +501,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
501 501
502 user->idx = log_next(user->idx); 502 user->idx = log_next(user->idx);
503 user->seq++; 503 user->seq++;
504 raw_spin_unlock(&logbuf_lock); 504 raw_spin_unlock_irq(&logbuf_lock);
505 505
506 if (len > count) { 506 if (len > count) {
507 ret = -EINVAL; 507 ret = -EINVAL;
@@ -528,7 +528,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
528 if (offset) 528 if (offset)
529 return -ESPIPE; 529 return -ESPIPE;
530 530
531 raw_spin_lock(&logbuf_lock); 531 raw_spin_lock_irq(&logbuf_lock);
532 switch (whence) { 532 switch (whence) {
533 case SEEK_SET: 533 case SEEK_SET:
534 /* the first record */ 534 /* the first record */
@@ -552,7 +552,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
552 default: 552 default:
553 ret = -EINVAL; 553 ret = -EINVAL;
554 } 554 }
555 raw_spin_unlock(&logbuf_lock); 555 raw_spin_unlock_irq(&logbuf_lock);
556 return ret; 556 return ret;
557} 557}
558 558
@@ -566,14 +566,14 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
566 566
567 poll_wait(file, &log_wait, wait); 567 poll_wait(file, &log_wait, wait);
568 568
569 raw_spin_lock(&logbuf_lock); 569 raw_spin_lock_irq(&logbuf_lock);
570 if (user->seq < log_next_seq) { 570 if (user->seq < log_next_seq) {
571 /* return error when data has vanished underneath us */ 571 /* return error when data has vanished underneath us */
572 if (user->seq < log_first_seq) 572 if (user->seq < log_first_seq)
573 ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI; 573 ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
574 ret = POLLIN|POLLRDNORM; 574 ret = POLLIN|POLLRDNORM;
575 } 575 }
576 raw_spin_unlock(&logbuf_lock); 576 raw_spin_unlock_irq(&logbuf_lock);
577 577
578 return ret; 578 return ret;
579} 579}
@@ -597,10 +597,10 @@ static int devkmsg_open(struct inode *inode, struct file *file)
597 597
598 mutex_init(&user->lock); 598 mutex_init(&user->lock);
599 599
600 raw_spin_lock(&logbuf_lock); 600 raw_spin_lock_irq(&logbuf_lock);
601 user->idx = log_first_idx; 601 user->idx = log_first_idx;
602 user->seq = log_first_seq; 602 user->seq = log_first_seq;
603 raw_spin_unlock(&logbuf_lock); 603 raw_spin_unlock_irq(&logbuf_lock);
604 604
605 file->private_data = user; 605 file->private_data = user;
606 return 0; 606 return 0;