aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/apei
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2011-02-21 00:54:41 -0500
committerLen Brown <len.brown@intel.com>2011-03-21 22:59:06 -0400
commit885b976fada5bc6595a9fd3e67e3cb1a3d11f50b (patch)
tree8b598bd3b265c0bbe8237e129410fdc80fe7847a /drivers/acpi/apei
parentdd9c1549edef02290edced639f67b54a25abbe0e (diff)
ACPI, APEI, Add ERST record ID cache
APEI ERST firmware interface and implementation has no multiple users in mind. For example, if there is four records in storage with ID: 1, 2, 3 and 4, if two ERST readers enumerate the records via GET_NEXT_RECORD_ID as follow, reader 1 reader 2 1 2 3 4 -1 -1 where -1 signals there is no more record ID. Reader 1 has no chance to check record 2 and 4, while reader 2 has no chance to check record 1 and 3. And any other GET_NEXT_RECORD_ID will return -1, that is, other readers will has no chance to check any record even they are not cleared by anyone. This makes raw GET_NEXT_RECORD_ID not suitable for used by multiple users. To solve the issue, an in-memory ERST record ID cache is designed and implemented. When enumerating record ID, the ID returned by GET_NEXT_RECORD_ID is added into cache in addition to be returned to caller. So other readers can check the cache to get all record ID available. Signed-off-by: Huang Ying <ying.huang@intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/apei')
-rw-r--r--drivers/acpi/apei/erst-dbg.c24
-rw-r--r--drivers/acpi/apei/erst.c235
2 files changed, 212 insertions, 47 deletions
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index de73caf3cebc..a4cfb64c86a1 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -43,12 +43,27 @@ static DEFINE_MUTEX(erst_dbg_mutex);
43 43
44static int erst_dbg_open(struct inode *inode, struct file *file) 44static int erst_dbg_open(struct inode *inode, struct file *file)
45{ 45{
46 int rc, *pos;
47
46 if (erst_disable) 48 if (erst_disable)
47 return -ENODEV; 49 return -ENODEV;
48 50
51 pos = (int *)&file->private_data;
52
53 rc = erst_get_record_id_begin(pos);
54 if (rc)
55 return rc;
56
49 return nonseekable_open(inode, file); 57 return nonseekable_open(inode, file);
50} 58}
51 59
60static int erst_dbg_release(struct inode *inode, struct file *file)
61{
62 erst_get_record_id_end();
63
64 return 0;
65}
66
52static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 67static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
53{ 68{
54 int rc; 69 int rc;
@@ -79,18 +94,20 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
79static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf, 94static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf,
80 size_t usize, loff_t *off) 95 size_t usize, loff_t *off)
81{ 96{
82 int rc; 97 int rc, *pos;
83 ssize_t len = 0; 98 ssize_t len = 0;
84 u64 id; 99 u64 id;
85 100
86 if (*off != 0) 101 if (*off)
87 return -EINVAL; 102 return -EINVAL;
88 103
89 if (mutex_lock_interruptible(&erst_dbg_mutex) != 0) 104 if (mutex_lock_interruptible(&erst_dbg_mutex) != 0)
90 return -EINTR; 105 return -EINTR;
91 106
107 pos = (int *)&filp->private_data;
108
92retry_next: 109retry_next:
93 rc = erst_get_next_record_id(&id); 110 rc = erst_get_record_id_next(pos, &id);
94 if (rc) 111 if (rc)
95 goto out; 112 goto out;
96 /* no more record */ 113 /* no more record */
@@ -181,6 +198,7 @@ out:
181static const struct file_operations erst_dbg_ops = { 198static const struct file_operations erst_dbg_ops = {
182 .owner = THIS_MODULE, 199 .owner = THIS_MODULE,
183 .open = erst_dbg_open, 200 .open = erst_dbg_open,
201 .release = erst_dbg_release,
184 .read = erst_dbg_read, 202 .read = erst_dbg_read,
185 .write = erst_dbg_write, 203 .write = erst_dbg_write,
186 .unlocked_ioctl = erst_dbg_ioctl, 204 .unlocked_ioctl = erst_dbg_ioctl,
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index cf6db6b7662a..8ff8c32fef58 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -429,6 +429,22 @@ ssize_t erst_get_record_count(void)
429} 429}
430EXPORT_SYMBOL_GPL(erst_get_record_count); 430EXPORT_SYMBOL_GPL(erst_get_record_count);
431 431
432#define ERST_RECORD_ID_CACHE_SIZE_MIN 16
433#define ERST_RECORD_ID_CACHE_SIZE_MAX 1024
434
435struct erst_record_id_cache {
436 struct mutex lock;
437 u64 *entries;
438 int len;
439 int size;
440 int refcount;
441};
442
443static struct erst_record_id_cache erst_record_id_cache = {
444 .lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock),
445 .refcount = 0,
446};
447
432static int __erst_get_next_record_id(u64 *record_id) 448static int __erst_get_next_record_id(u64 *record_id)
433{ 449{
434 struct apei_exec_context ctx; 450 struct apei_exec_context ctx;
@@ -443,26 +459,179 @@ static int __erst_get_next_record_id(u64 *record_id)
443 return 0; 459 return 0;
444} 460}
445 461
462int erst_get_record_id_begin(int *pos)
463{
464 int rc;
465
466 if (erst_disable)
467 return -ENODEV;
468
469 rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
470 if (rc)
471 return rc;
472 erst_record_id_cache.refcount++;
473 mutex_unlock(&erst_record_id_cache.lock);
474
475 *pos = 0;
476
477 return 0;
478}
479EXPORT_SYMBOL_GPL(erst_get_record_id_begin);
480
481/* erst_record_id_cache.lock must be held by caller */
482static int __erst_record_id_cache_add_one(void)
483{
484 u64 id, prev_id, first_id;
485 int i, rc;
486 u64 *entries;
487 unsigned long flags;
488
489 id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID;
490retry:
491 raw_spin_lock_irqsave(&erst_lock, flags);
492 rc = __erst_get_next_record_id(&id);
493 raw_spin_unlock_irqrestore(&erst_lock, flags);
494 if (rc == -ENOENT)
495 return 0;
496 if (rc)
497 return rc;
498 if (id == APEI_ERST_INVALID_RECORD_ID)
499 return 0;
500 /* can not skip current ID, or loop back to first ID */
501 if (id == prev_id || id == first_id)
502 return 0;
503 if (first_id == APEI_ERST_INVALID_RECORD_ID)
504 first_id = id;
505 prev_id = id;
506
507 entries = erst_record_id_cache.entries;
508 for (i = 0; i < erst_record_id_cache.len; i++) {
509 if (entries[i] == id)
510 break;
511 }
512 /* record id already in cache, try next */
513 if (i < erst_record_id_cache.len)
514 goto retry;
515 if (erst_record_id_cache.len >= erst_record_id_cache.size) {
516 int new_size, alloc_size;
517 u64 *new_entries;
518
519 new_size = erst_record_id_cache.size * 2;
520 new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN,
521 ERST_RECORD_ID_CACHE_SIZE_MAX);
522 if (new_size <= erst_record_id_cache.size) {
523 if (printk_ratelimit())
524 pr_warning(FW_WARN ERST_PFX
525 "too many record ID!\n");
526 return 0;
527 }
528 alloc_size = new_size * sizeof(entries[0]);
529 if (alloc_size < PAGE_SIZE)
530 new_entries = kmalloc(alloc_size, GFP_KERNEL);
531 else
532 new_entries = vmalloc(alloc_size);
533 if (!new_entries)
534 return -ENOMEM;
535 memcpy(new_entries, entries,
536 erst_record_id_cache.len * sizeof(entries[0]));
537 if (erst_record_id_cache.size < PAGE_SIZE)
538 kfree(entries);
539 else
540 vfree(entries);
541 erst_record_id_cache.entries = entries = new_entries;
542 erst_record_id_cache.size = new_size;
543 }
544 entries[i] = id;
545 erst_record_id_cache.len++;
546
547 return 1;
548}
549
446/* 550/*
447 * Get the record ID of an existing error record on the persistent 551 * Get the record ID of an existing error record on the persistent
448 * storage. If there is no error record on the persistent storage, the 552 * storage. If there is no error record on the persistent storage, the
449 * returned record_id is APEI_ERST_INVALID_RECORD_ID. 553 * returned record_id is APEI_ERST_INVALID_RECORD_ID.
450 */ 554 */
451int erst_get_next_record_id(u64 *record_id) 555int erst_get_record_id_next(int *pos, u64 *record_id)
452{ 556{
453 int rc; 557 int rc = 0;
454 unsigned long flags; 558 u64 *entries;
455 559
456 if (erst_disable) 560 if (erst_disable)
457 return -ENODEV; 561 return -ENODEV;
458 562
459 raw_spin_lock_irqsave(&erst_lock, flags); 563 /* must be enclosed by erst_get_record_id_begin/end */
460 rc = __erst_get_next_record_id(record_id); 564 BUG_ON(!erst_record_id_cache.refcount);
461 raw_spin_unlock_irqrestore(&erst_lock, flags); 565 BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len);
566
567 mutex_lock(&erst_record_id_cache.lock);
568 entries = erst_record_id_cache.entries;
569 for (; *pos < erst_record_id_cache.len; (*pos)++)
570 if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID)
571 break;
572 /* found next record id in cache */
573 if (*pos < erst_record_id_cache.len) {
574 *record_id = entries[*pos];
575 (*pos)++;
576 goto out_unlock;
577 }
578
579 /* Try to add one more record ID to cache */
580 rc = __erst_record_id_cache_add_one();
581 if (rc < 0)
582 goto out_unlock;
583 /* successfully add one new ID */
584 if (rc == 1) {
585 *record_id = erst_record_id_cache.entries[*pos];
586 (*pos)++;
587 rc = 0;
588 } else {
589 *pos = -1;
590 *record_id = APEI_ERST_INVALID_RECORD_ID;
591 }
592out_unlock:
593 mutex_unlock(&erst_record_id_cache.lock);
462 594
463 return rc; 595 return rc;
464} 596}
465EXPORT_SYMBOL_GPL(erst_get_next_record_id); 597EXPORT_SYMBOL_GPL(erst_get_record_id_next);
598
599/* erst_record_id_cache.lock must be held by caller */
600static void __erst_record_id_cache_compact(void)
601{
602 int i, wpos = 0;
603 u64 *entries;
604
605 if (erst_record_id_cache.refcount)
606 return;
607
608 entries = erst_record_id_cache.entries;
609 for (i = 0; i < erst_record_id_cache.len; i++) {
610 if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
611 continue;
612 if (wpos != i)
613 memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
614 wpos++;
615 }
616 erst_record_id_cache.len = wpos;
617}
618
619void erst_get_record_id_end(void)
620{
621 /*
622 * erst_disable != 0 should be detected by invoker via the
623 * return value of erst_get_record_id_begin/next, so this
624 * function should not be called for erst_disable != 0.
625 */
626 BUG_ON(erst_disable);
627
628 mutex_lock(&erst_record_id_cache.lock);
629 erst_record_id_cache.refcount--;
630 BUG_ON(erst_record_id_cache.refcount < 0);
631 __erst_record_id_cache_compact();
632 mutex_unlock(&erst_record_id_cache.lock);
633}
634EXPORT_SYMBOL_GPL(erst_get_record_id_end);
466 635
467static int __erst_write_to_storage(u64 offset) 636static int __erst_write_to_storage(u64 offset)
468{ 637{
@@ -703,56 +872,34 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record,
703} 872}
704EXPORT_SYMBOL_GPL(erst_read); 873EXPORT_SYMBOL_GPL(erst_read);
705 874
706/*
707 * If return value > buflen, the buffer size is not big enough,
708 * else if return value = 0, there is no more record to read,
709 * else if return value < 0, something goes wrong,
710 * else everything is OK, and return value is record length
711 */
712ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
713{
714 int rc;
715 ssize_t len;
716 unsigned long flags;
717 u64 record_id;
718
719 if (erst_disable)
720 return -ENODEV;
721
722 raw_spin_lock_irqsave(&erst_lock, flags);
723 rc = __erst_get_next_record_id(&record_id);
724 if (rc) {
725 raw_spin_unlock_irqrestore(&erst_lock, flags);
726 return rc;
727 }
728 /* no more record */
729 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
730 raw_spin_unlock_irqrestore(&erst_lock, flags);
731 return 0;
732 }
733
734 len = __erst_read(record_id, record, buflen);
735 raw_spin_unlock_irqrestore(&erst_lock, flags);
736
737 return len;
738}
739EXPORT_SYMBOL_GPL(erst_read_next);
740
741int erst_clear(u64 record_id) 875int erst_clear(u64 record_id)
742{ 876{
743 int rc; 877 int rc, i;
744 unsigned long flags; 878 unsigned long flags;
879 u64 *entries;
745 880
746 if (erst_disable) 881 if (erst_disable)
747 return -ENODEV; 882 return -ENODEV;
748 883
884 rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
885 if (rc)
886 return rc;
749 raw_spin_lock_irqsave(&erst_lock, flags); 887 raw_spin_lock_irqsave(&erst_lock, flags);
750 if (erst_erange.attr & ERST_RANGE_NVRAM) 888 if (erst_erange.attr & ERST_RANGE_NVRAM)
751 rc = __erst_clear_from_nvram(record_id); 889 rc = __erst_clear_from_nvram(record_id);
752 else 890 else
753 rc = __erst_clear_from_storage(record_id); 891 rc = __erst_clear_from_storage(record_id);
754 raw_spin_unlock_irqrestore(&erst_lock, flags); 892 raw_spin_unlock_irqrestore(&erst_lock, flags);
755 893 if (rc)
894 goto out;
895 entries = erst_record_id_cache.entries;
896 for (i = 0; i < erst_record_id_cache.len; i++) {
897 if (entries[i] == record_id)
898 entries[i] = APEI_ERST_INVALID_RECORD_ID;
899 }
900 __erst_record_id_cache_compact();
901out:
902 mutex_unlock(&erst_record_id_cache.lock);
756 return rc; 903 return rc;
757} 904}
758EXPORT_SYMBOL_GPL(erst_clear); 905EXPORT_SYMBOL_GPL(erst_clear);