aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/apei/erst.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/acpi/apei/erst.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/acpi/apei/erst.c')
-rw-r--r--drivers/acpi/apei/erst.c424
1 files changed, 368 insertions, 56 deletions
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 1211c03149e8..e6cef8e1b534 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -34,6 +34,7 @@
34#include <linux/cper.h> 34#include <linux/cper.h>
35#include <linux/nmi.h> 35#include <linux/nmi.h>
36#include <linux/hardirq.h> 36#include <linux/hardirq.h>
37#include <linux/pstore.h>
37#include <acpi/apei.h> 38#include <acpi/apei.h>
38 39
39#include "apei-internal.h" 40#include "apei-internal.h"
@@ -53,7 +54,7 @@
53 sizeof(struct acpi_table_erst))) 54 sizeof(struct acpi_table_erst)))
54 55
55#define SPIN_UNIT 100 /* 100ns */ 56#define SPIN_UNIT 100 /* 100ns */
56/* Firmware should respond within 1 miliseconds */ 57/* Firmware should respond within 1 milliseconds */
57#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) 58#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
58#define FIRMWARE_MAX_STALL 50 /* 50us */ 59#define FIRMWARE_MAX_STALL 50 /* 50us */
59 60
@@ -86,7 +87,7 @@ static struct erst_erange {
86 * It is used to provide exclusive accessing for ERST Error Log 87 * It is used to provide exclusive accessing for ERST Error Log
87 * Address Range too. 88 * Address Range too.
88 */ 89 */
89static DEFINE_SPINLOCK(erst_lock); 90static DEFINE_RAW_SPINLOCK(erst_lock);
90 91
91static inline int erst_errno(int command_status) 92static inline int erst_errno(int command_status)
92{ 93{
@@ -421,14 +422,30 @@ ssize_t erst_get_record_count(void)
421 if (erst_disable) 422 if (erst_disable)
422 return -ENODEV; 423 return -ENODEV;
423 424
424 spin_lock_irqsave(&erst_lock, flags); 425 raw_spin_lock_irqsave(&erst_lock, flags);
425 count = __erst_get_record_count(); 426 count = __erst_get_record_count();
426 spin_unlock_irqrestore(&erst_lock, flags); 427 raw_spin_unlock_irqrestore(&erst_lock, flags);
427 428
428 return count; 429 return count;
429} 430}
430EXPORT_SYMBOL_GPL(erst_get_record_count); 431EXPORT_SYMBOL_GPL(erst_get_record_count);
431 432
433#define ERST_RECORD_ID_CACHE_SIZE_MIN 16
434#define ERST_RECORD_ID_CACHE_SIZE_MAX 1024
435
436struct erst_record_id_cache {
437 struct mutex lock;
438 u64 *entries;
439 int len;
440 int size;
441 int refcount;
442};
443
444static struct erst_record_id_cache erst_record_id_cache = {
445 .lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock),
446 .refcount = 0,
447};
448
432static int __erst_get_next_record_id(u64 *record_id) 449static int __erst_get_next_record_id(u64 *record_id)
433{ 450{
434 struct apei_exec_context ctx; 451 struct apei_exec_context ctx;
@@ -443,26 +460,179 @@ static int __erst_get_next_record_id(u64 *record_id)
443 return 0; 460 return 0;
444} 461}
445 462
463int erst_get_record_id_begin(int *pos)
464{
465 int rc;
466
467 if (erst_disable)
468 return -ENODEV;
469
470 rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
471 if (rc)
472 return rc;
473 erst_record_id_cache.refcount++;
474 mutex_unlock(&erst_record_id_cache.lock);
475
476 *pos = 0;
477
478 return 0;
479}
480EXPORT_SYMBOL_GPL(erst_get_record_id_begin);
481
482/* erst_record_id_cache.lock must be held by caller */
483static int __erst_record_id_cache_add_one(void)
484{
485 u64 id, prev_id, first_id;
486 int i, rc;
487 u64 *entries;
488 unsigned long flags;
489
490 id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID;
491retry:
492 raw_spin_lock_irqsave(&erst_lock, flags);
493 rc = __erst_get_next_record_id(&id);
494 raw_spin_unlock_irqrestore(&erst_lock, flags);
495 if (rc == -ENOENT)
496 return 0;
497 if (rc)
498 return rc;
499 if (id == APEI_ERST_INVALID_RECORD_ID)
500 return 0;
501 /* can not skip current ID, or loop back to first ID */
502 if (id == prev_id || id == first_id)
503 return 0;
504 if (first_id == APEI_ERST_INVALID_RECORD_ID)
505 first_id = id;
506 prev_id = id;
507
508 entries = erst_record_id_cache.entries;
509 for (i = 0; i < erst_record_id_cache.len; i++) {
510 if (entries[i] == id)
511 break;
512 }
513 /* record id already in cache, try next */
514 if (i < erst_record_id_cache.len)
515 goto retry;
516 if (erst_record_id_cache.len >= erst_record_id_cache.size) {
517 int new_size, alloc_size;
518 u64 *new_entries;
519
520 new_size = erst_record_id_cache.size * 2;
521 new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN,
522 ERST_RECORD_ID_CACHE_SIZE_MAX);
523 if (new_size <= erst_record_id_cache.size) {
524 if (printk_ratelimit())
525 pr_warning(FW_WARN ERST_PFX
526 "too many record ID!\n");
527 return 0;
528 }
529 alloc_size = new_size * sizeof(entries[0]);
530 if (alloc_size < PAGE_SIZE)
531 new_entries = kmalloc(alloc_size, GFP_KERNEL);
532 else
533 new_entries = vmalloc(alloc_size);
534 if (!new_entries)
535 return -ENOMEM;
536 memcpy(new_entries, entries,
537 erst_record_id_cache.len * sizeof(entries[0]));
538 if (erst_record_id_cache.size < PAGE_SIZE)
539 kfree(entries);
540 else
541 vfree(entries);
542 erst_record_id_cache.entries = entries = new_entries;
543 erst_record_id_cache.size = new_size;
544 }
545 entries[i] = id;
546 erst_record_id_cache.len++;
547
548 return 1;
549}
550
446/* 551/*
447 * Get the record ID of an existing error record on the persistent 552 * Get the record ID of an existing error record on the persistent
448 * storage. If there is no error record on the persistent storage, the 553 * storage. If there is no error record on the persistent storage, the
449 * returned record_id is APEI_ERST_INVALID_RECORD_ID. 554 * returned record_id is APEI_ERST_INVALID_RECORD_ID.
450 */ 555 */
451int erst_get_next_record_id(u64 *record_id) 556int erst_get_record_id_next(int *pos, u64 *record_id)
452{ 557{
453 int rc; 558 int rc = 0;
454 unsigned long flags; 559 u64 *entries;
455 560
456 if (erst_disable) 561 if (erst_disable)
457 return -ENODEV; 562 return -ENODEV;
458 563
459 spin_lock_irqsave(&erst_lock, flags); 564 /* must be enclosed by erst_get_record_id_begin/end */
460 rc = __erst_get_next_record_id(record_id); 565 BUG_ON(!erst_record_id_cache.refcount);
461 spin_unlock_irqrestore(&erst_lock, flags); 566 BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len);
567
568 mutex_lock(&erst_record_id_cache.lock);
569 entries = erst_record_id_cache.entries;
570 for (; *pos < erst_record_id_cache.len; (*pos)++)
571 if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID)
572 break;
573 /* found next record id in cache */
574 if (*pos < erst_record_id_cache.len) {
575 *record_id = entries[*pos];
576 (*pos)++;
577 goto out_unlock;
578 }
579
580 /* Try to add one more record ID to cache */
581 rc = __erst_record_id_cache_add_one();
582 if (rc < 0)
583 goto out_unlock;
584 /* successfully add one new ID */
585 if (rc == 1) {
586 *record_id = erst_record_id_cache.entries[*pos];
587 (*pos)++;
588 rc = 0;
589 } else {
590 *pos = -1;
591 *record_id = APEI_ERST_INVALID_RECORD_ID;
592 }
593out_unlock:
594 mutex_unlock(&erst_record_id_cache.lock);
462 595
463 return rc; 596 return rc;
464} 597}
465EXPORT_SYMBOL_GPL(erst_get_next_record_id); 598EXPORT_SYMBOL_GPL(erst_get_record_id_next);
599
600/* erst_record_id_cache.lock must be held by caller */
601static void __erst_record_id_cache_compact(void)
602{
603 int i, wpos = 0;
604 u64 *entries;
605
606 if (erst_record_id_cache.refcount)
607 return;
608
609 entries = erst_record_id_cache.entries;
610 for (i = 0; i < erst_record_id_cache.len; i++) {
611 if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
612 continue;
613 if (wpos != i)
614 memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
615 wpos++;
616 }
617 erst_record_id_cache.len = wpos;
618}
619
620void erst_get_record_id_end(void)
621{
622 /*
623 * erst_disable != 0 should be detected by invoker via the
624 * return value of erst_get_record_id_begin/next, so this
625 * function should not be called for erst_disable != 0.
626 */
627 BUG_ON(erst_disable);
628
629 mutex_lock(&erst_record_id_cache.lock);
630 erst_record_id_cache.refcount--;
631 BUG_ON(erst_record_id_cache.refcount < 0);
632 __erst_record_id_cache_compact();
633 mutex_unlock(&erst_record_id_cache.lock);
634}
635EXPORT_SYMBOL_GPL(erst_get_record_id_end);
466 636
467static int __erst_write_to_storage(u64 offset) 637static int __erst_write_to_storage(u64 offset)
468{ 638{
@@ -624,17 +794,17 @@ int erst_write(const struct cper_record_header *record)
624 return -EINVAL; 794 return -EINVAL;
625 795
626 if (erst_erange.attr & ERST_RANGE_NVRAM) { 796 if (erst_erange.attr & ERST_RANGE_NVRAM) {
627 if (!spin_trylock_irqsave(&erst_lock, flags)) 797 if (!raw_spin_trylock_irqsave(&erst_lock, flags))
628 return -EBUSY; 798 return -EBUSY;
629 rc = __erst_write_to_nvram(record); 799 rc = __erst_write_to_nvram(record);
630 spin_unlock_irqrestore(&erst_lock, flags); 800 raw_spin_unlock_irqrestore(&erst_lock, flags);
631 return rc; 801 return rc;
632 } 802 }
633 803
634 if (record->record_length > erst_erange.size) 804 if (record->record_length > erst_erange.size)
635 return -EINVAL; 805 return -EINVAL;
636 806
637 if (!spin_trylock_irqsave(&erst_lock, flags)) 807 if (!raw_spin_trylock_irqsave(&erst_lock, flags))
638 return -EBUSY; 808 return -EBUSY;
639 memcpy(erst_erange.vaddr, record, record->record_length); 809 memcpy(erst_erange.vaddr, record, record->record_length);
640 rcd_erange = erst_erange.vaddr; 810 rcd_erange = erst_erange.vaddr;
@@ -642,7 +812,7 @@ int erst_write(const struct cper_record_header *record)
642 memcpy(&rcd_erange->persistence_information, "ER", 2); 812 memcpy(&rcd_erange->persistence_information, "ER", 2);
643 813
644 rc = __erst_write_to_storage(0); 814 rc = __erst_write_to_storage(0);
645 spin_unlock_irqrestore(&erst_lock, flags); 815 raw_spin_unlock_irqrestore(&erst_lock, flags);
646 816
647 return rc; 817 return rc;
648} 818}
@@ -696,63 +866,41 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record,
696 if (erst_disable) 866 if (erst_disable)
697 return -ENODEV; 867 return -ENODEV;
698 868
699 spin_lock_irqsave(&erst_lock, flags); 869 raw_spin_lock_irqsave(&erst_lock, flags);
700 len = __erst_read(record_id, record, buflen); 870 len = __erst_read(record_id, record, buflen);
701 spin_unlock_irqrestore(&erst_lock, flags); 871 raw_spin_unlock_irqrestore(&erst_lock, flags);
702 return len; 872 return len;
703} 873}
704EXPORT_SYMBOL_GPL(erst_read); 874EXPORT_SYMBOL_GPL(erst_read);
705 875
706/*
707 * If return value > buflen, the buffer size is not big enough,
708 * else if return value = 0, there is no more record to read,
709 * else if return value < 0, something goes wrong,
710 * else everything is OK, and return value is record length
711 */
712ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
713{
714 int rc;
715 ssize_t len;
716 unsigned long flags;
717 u64 record_id;
718
719 if (erst_disable)
720 return -ENODEV;
721
722 spin_lock_irqsave(&erst_lock, flags);
723 rc = __erst_get_next_record_id(&record_id);
724 if (rc) {
725 spin_unlock_irqrestore(&erst_lock, flags);
726 return rc;
727 }
728 /* no more record */
729 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
730 spin_unlock_irqrestore(&erst_lock, flags);
731 return 0;
732 }
733
734 len = __erst_read(record_id, record, buflen);
735 spin_unlock_irqrestore(&erst_lock, flags);
736
737 return len;
738}
739EXPORT_SYMBOL_GPL(erst_read_next);
740
741int erst_clear(u64 record_id) 876int erst_clear(u64 record_id)
742{ 877{
743 int rc; 878 int rc, i;
744 unsigned long flags; 879 unsigned long flags;
880 u64 *entries;
745 881
746 if (erst_disable) 882 if (erst_disable)
747 return -ENODEV; 883 return -ENODEV;
748 884
749 spin_lock_irqsave(&erst_lock, flags); 885 rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
886 if (rc)
887 return rc;
888 raw_spin_lock_irqsave(&erst_lock, flags);
750 if (erst_erange.attr & ERST_RANGE_NVRAM) 889 if (erst_erange.attr & ERST_RANGE_NVRAM)
751 rc = __erst_clear_from_nvram(record_id); 890 rc = __erst_clear_from_nvram(record_id);
752 else 891 else
753 rc = __erst_clear_from_storage(record_id); 892 rc = __erst_clear_from_storage(record_id);
754 spin_unlock_irqrestore(&erst_lock, flags); 893 raw_spin_unlock_irqrestore(&erst_lock, flags);
755 894 if (rc)
895 goto out;
896 entries = erst_record_id_cache.entries;
897 for (i = 0; i < erst_record_id_cache.len; i++) {
898 if (entries[i] == record_id)
899 entries[i] = APEI_ERST_INVALID_RECORD_ID;
900 }
901 __erst_record_id_cache_compact();
902out:
903 mutex_unlock(&erst_record_id_cache.lock);
756 return rc; 904 return rc;
757} 905}
758EXPORT_SYMBOL_GPL(erst_clear); 906EXPORT_SYMBOL_GPL(erst_clear);
@@ -781,6 +929,157 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
781 return 0; 929 return 0;
782} 930}
783 931
932static int erst_open_pstore(struct pstore_info *psi);
933static int erst_close_pstore(struct pstore_info *psi);
934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
935 struct timespec *time);
936static u64 erst_writer(enum pstore_type_id type, size_t size);
937
938static struct pstore_info erst_info = {
939 .owner = THIS_MODULE,
940 .name = "erst",
941 .open = erst_open_pstore,
942 .close = erst_close_pstore,
943 .read = erst_reader,
944 .write = erst_writer,
945 .erase = erst_clear
946};
947
948#define CPER_CREATOR_PSTORE \
949 UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
950 0x64, 0x90, 0xb8, 0x9d)
951#define CPER_SECTION_TYPE_DMESG \
952 UUID_LE(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \
953 0x94, 0x19, 0xeb, 0x12)
954#define CPER_SECTION_TYPE_MCE \
955 UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
956 0x04, 0x4a, 0x38, 0xfc)
957
958struct cper_pstore_record {
959 struct cper_record_header hdr;
960 struct cper_section_descriptor sec_hdr;
961 char data[];
962} __packed;
963
964static int reader_pos;
965
966static int erst_open_pstore(struct pstore_info *psi)
967{
968 int rc;
969
970 if (erst_disable)
971 return -ENODEV;
972
973 rc = erst_get_record_id_begin(&reader_pos);
974
975 return rc;
976}
977
978static int erst_close_pstore(struct pstore_info *psi)
979{
980 erst_get_record_id_end();
981
982 return 0;
983}
984
985static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
986 struct timespec *time)
987{
988 int rc;
989 ssize_t len = 0;
990 u64 record_id;
991 struct cper_pstore_record *rcd = (struct cper_pstore_record *)
992 (erst_info.buf - sizeof(*rcd));
993
994 if (erst_disable)
995 return -ENODEV;
996
997skip:
998 rc = erst_get_record_id_next(&reader_pos, &record_id);
999 if (rc)
1000 goto out;
1001
1002 /* no more record */
1003 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
1004 rc = -1;
1005 goto out;
1006 }
1007
1008 len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
1009 erst_info.bufsize);
1010 /* The record may be cleared by others, try read next record */
1011 if (len == -ENOENT)
1012 goto skip;
1013 else if (len < 0) {
1014 rc = -1;
1015 goto out;
1016 }
1017 if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
1018 goto skip;
1019
1020 *id = record_id;
1021 if (uuid_le_cmp(rcd->sec_hdr.section_type,
1022 CPER_SECTION_TYPE_DMESG) == 0)
1023 *type = PSTORE_TYPE_DMESG;
1024 else if (uuid_le_cmp(rcd->sec_hdr.section_type,
1025 CPER_SECTION_TYPE_MCE) == 0)
1026 *type = PSTORE_TYPE_MCE;
1027 else
1028 *type = PSTORE_TYPE_UNKNOWN;
1029
1030 if (rcd->hdr.validation_bits & CPER_VALID_TIMESTAMP)
1031 time->tv_sec = rcd->hdr.timestamp;
1032 else
1033 time->tv_sec = 0;
1034 time->tv_nsec = 0;
1035
1036out:
1037 return (rc < 0) ? rc : (len - sizeof(*rcd));
1038}
1039
1040static u64 erst_writer(enum pstore_type_id type, size_t size)
1041{
1042 struct cper_pstore_record *rcd = (struct cper_pstore_record *)
1043 (erst_info.buf - sizeof(*rcd));
1044
1045 memset(rcd, 0, sizeof(*rcd));
1046 memcpy(rcd->hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
1047 rcd->hdr.revision = CPER_RECORD_REV;
1048 rcd->hdr.signature_end = CPER_SIG_END;
1049 rcd->hdr.section_count = 1;
1050 rcd->hdr.error_severity = CPER_SEV_FATAL;
1051 /* timestamp valid. platform_id, partition_id are invalid */
1052 rcd->hdr.validation_bits = CPER_VALID_TIMESTAMP;
1053 rcd->hdr.timestamp = get_seconds();
1054 rcd->hdr.record_length = sizeof(*rcd) + size;
1055 rcd->hdr.creator_id = CPER_CREATOR_PSTORE;
1056 rcd->hdr.notification_type = CPER_NOTIFY_MCE;
1057 rcd->hdr.record_id = cper_next_record_id();
1058 rcd->hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
1059
1060 rcd->sec_hdr.section_offset = sizeof(*rcd);
1061 rcd->sec_hdr.section_length = size;
1062 rcd->sec_hdr.revision = CPER_SEC_REV;
1063 /* fru_id and fru_text is invalid */
1064 rcd->sec_hdr.validation_bits = 0;
1065 rcd->sec_hdr.flags = CPER_SEC_PRIMARY;
1066 switch (type) {
1067 case PSTORE_TYPE_DMESG:
1068 rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG;
1069 break;
1070 case PSTORE_TYPE_MCE:
1071 rcd->sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
1072 break;
1073 default:
1074 return -EINVAL;
1075 }
1076 rcd->sec_hdr.section_severity = CPER_SEV_FATAL;
1077
1078 erst_write(&rcd->hdr);
1079
1080 return rcd->hdr.record_id;
1081}
1082
784static int __init erst_init(void) 1083static int __init erst_init(void)
785{ 1084{
786 int rc = 0; 1085 int rc = 0;
@@ -788,6 +1087,7 @@ static int __init erst_init(void)
788 struct apei_exec_context ctx; 1087 struct apei_exec_context ctx;
789 struct apei_resources erst_resources; 1088 struct apei_resources erst_resources;
790 struct resource *r; 1089 struct resource *r;
1090 char *buf;
791 1091
792 if (acpi_disabled) 1092 if (acpi_disabled)
793 goto err; 1093 goto err;
@@ -854,6 +1154,18 @@ static int __init erst_init(void)
854 if (!erst_erange.vaddr) 1154 if (!erst_erange.vaddr)
855 goto err_release_erange; 1155 goto err_release_erange;
856 1156
1157 buf = kmalloc(erst_erange.size, GFP_KERNEL);
1158 mutex_init(&erst_info.buf_mutex);
1159 if (buf) {
1160 erst_info.buf = buf + sizeof(struct cper_pstore_record);
1161 erst_info.bufsize = erst_erange.size -
1162 sizeof(struct cper_pstore_record);
1163 if (pstore_register(&erst_info)) {
1164 pr_info(ERST_PFX "Could not register with persistent store\n");
1165 kfree(buf);
1166 }
1167 }
1168
857 pr_info(ERST_PFX 1169 pr_info(ERST_PFX
858 "Error Record Serialization Table (ERST) support is initialized.\n"); 1170 "Error Record Serialization Table (ERST) support is initialized.\n");
859 1171