aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2011-03-22 01:41:47 -0400
committerLen Brown <len.brown@intel.com>2011-03-22 01:41:47 -0400
commit25076246e80c0c48cc4c9115335b83343b9dc727 (patch)
treec7b462c6b4f67227722135a7a419ad110a6fd93e /drivers/acpi
parent05534c9ffc9d5d950b14de8ba49a7609dc59b0b8 (diff)
parentc413d7682020a127f54744a1b30f597692aea1fd (diff)
Merge branch 'apei-release' into release
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/apei/Kconfig7
-rw-r--r--drivers/acpi/apei/cper.c18
-rw-r--r--drivers/acpi/apei/erst-dbg.c24
-rw-r--r--drivers/acpi/apei/erst.c235
4 files changed, 233 insertions, 51 deletions
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index fca34ccfd294..9ecf6feae830 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -21,6 +21,13 @@ config ACPI_APEI_GHES
21 by firmware to produce more valuable hardware error 21 by firmware to produce more valuable hardware error
22 information for Linux. 22 information for Linux.
23 23
24config ACPI_APEI_PCIEAER
25 bool "APEI PCIe AER logging/recovering support"
26 depends on ACPI_APEI && PCIEAER
27 help
28 PCIe AER errors may be reported via APEI firmware first mode.
29 Turn on this option to enable the corresponding support.
30
24config ACPI_APEI_EINJ 31config ACPI_APEI_EINJ
25 tristate "APEI Error INJection (EINJ)" 32 tristate "APEI Error INJection (EINJ)"
26 depends on ACPI_APEI && DEBUG_FS 33 depends on ACPI_APEI && DEBUG_FS
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index 31464a006d76..5d4189464d63 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -29,6 +29,7 @@
29#include <linux/time.h> 29#include <linux/time.h>
30#include <linux/cper.h> 30#include <linux/cper.h>
31#include <linux/acpi.h> 31#include <linux/acpi.h>
32#include <linux/aer.h>
32 33
33/* 34/*
34 * CPER record ID need to be unique even after reboot, because record 35 * CPER record ID need to be unique even after reboot, because record
@@ -70,8 +71,8 @@ static const char *cper_severity_str(unsigned int severity)
70 * If the output length is longer than 80, multiple line will be 71 * If the output length is longer than 80, multiple line will be
71 * printed, with @pfx is printed at the beginning of each line. 72 * printed, with @pfx is printed at the beginning of each line.
72 */ 73 */
73static void cper_print_bits(const char *pfx, unsigned int bits, 74void cper_print_bits(const char *pfx, unsigned int bits,
74 const char *strs[], unsigned int strs_size) 75 const char *strs[], unsigned int strs_size)
75{ 76{
76 int i, len = 0; 77 int i, len = 0;
77 const char *str; 78 const char *str;
@@ -81,6 +82,8 @@ static void cper_print_bits(const char *pfx, unsigned int bits,
81 if (!(bits & (1U << i))) 82 if (!(bits & (1U << i)))
82 continue; 83 continue;
83 str = strs[i]; 84 str = strs[i];
85 if (!str)
86 continue;
84 if (len && len + strlen(str) + 2 > 80) { 87 if (len && len + strlen(str) + 2 > 80) {
85 printk("%s\n", buf); 88 printk("%s\n", buf);
86 len = 0; 89 len = 0;
@@ -243,7 +246,8 @@ static const char *cper_pcie_port_type_strs[] = {
243 "root complex event collector", 246 "root complex event collector",
244}; 247};
245 248
246static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie) 249static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
250 const struct acpi_hest_generic_data *gdata)
247{ 251{
248 if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) 252 if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
249 printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, 253 printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
@@ -276,6 +280,12 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
276 printk( 280 printk(
277 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", 281 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
278 pfx, pcie->bridge.secondary_status, pcie->bridge.control); 282 pfx, pcie->bridge.secondary_status, pcie->bridge.control);
283#ifdef CONFIG_ACPI_APEI_PCIEAER
284 if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
285 struct aer_capability_regs *aer_regs = (void *)pcie->aer_info;
286 cper_print_aer(pfx, gdata->error_severity, aer_regs);
287 }
288#endif
279} 289}
280 290
281static const char *apei_estatus_section_flag_strs[] = { 291static const char *apei_estatus_section_flag_strs[] = {
@@ -322,7 +332,7 @@ static void apei_estatus_print_section(
322 struct cper_sec_pcie *pcie = (void *)(gdata + 1); 332 struct cper_sec_pcie *pcie = (void *)(gdata + 1);
323 printk("%s""section_type: PCIe error\n", pfx); 333 printk("%s""section_type: PCIe error\n", pfx);
324 if (gdata->error_data_length >= sizeof(*pcie)) 334 if (gdata->error_data_length >= sizeof(*pcie))
325 cper_print_pcie(pfx, pcie); 335 cper_print_pcie(pfx, pcie, gdata);
326 else 336 else
327 goto err_section_too_small; 337 goto err_section_too_small;
328 } else 338 } else
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index de73caf3cebc..a4cfb64c86a1 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -43,12 +43,27 @@ static DEFINE_MUTEX(erst_dbg_mutex);
43 43
44static int erst_dbg_open(struct inode *inode, struct file *file) 44static int erst_dbg_open(struct inode *inode, struct file *file)
45{ 45{
46 int rc, *pos;
47
46 if (erst_disable) 48 if (erst_disable)
47 return -ENODEV; 49 return -ENODEV;
48 50
51 pos = (int *)&file->private_data;
52
53 rc = erst_get_record_id_begin(pos);
54 if (rc)
55 return rc;
56
49 return nonseekable_open(inode, file); 57 return nonseekable_open(inode, file);
50} 58}
51 59
60static int erst_dbg_release(struct inode *inode, struct file *file)
61{
62 erst_get_record_id_end();
63
64 return 0;
65}
66
52static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 67static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
53{ 68{
54 int rc; 69 int rc;
@@ -79,18 +94,20 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
79static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf, 94static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf,
80 size_t usize, loff_t *off) 95 size_t usize, loff_t *off)
81{ 96{
82 int rc; 97 int rc, *pos;
83 ssize_t len = 0; 98 ssize_t len = 0;
84 u64 id; 99 u64 id;
85 100
86 if (*off != 0) 101 if (*off)
87 return -EINVAL; 102 return -EINVAL;
88 103
89 if (mutex_lock_interruptible(&erst_dbg_mutex) != 0) 104 if (mutex_lock_interruptible(&erst_dbg_mutex) != 0)
90 return -EINTR; 105 return -EINTR;
91 106
107 pos = (int *)&filp->private_data;
108
92retry_next: 109retry_next:
93 rc = erst_get_next_record_id(&id); 110 rc = erst_get_record_id_next(pos, &id);
94 if (rc) 111 if (rc)
95 goto out; 112 goto out;
96 /* no more record */ 113 /* no more record */
@@ -181,6 +198,7 @@ out:
181static const struct file_operations erst_dbg_ops = { 198static const struct file_operations erst_dbg_ops = {
182 .owner = THIS_MODULE, 199 .owner = THIS_MODULE,
183 .open = erst_dbg_open, 200 .open = erst_dbg_open,
201 .release = erst_dbg_release,
184 .read = erst_dbg_read, 202 .read = erst_dbg_read,
185 .write = erst_dbg_write, 203 .write = erst_dbg_write,
186 .unlocked_ioctl = erst_dbg_ioctl, 204 .unlocked_ioctl = erst_dbg_ioctl,
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index cf6db6b7662a..8ff8c32fef58 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -429,6 +429,22 @@ ssize_t erst_get_record_count(void)
429} 429}
430EXPORT_SYMBOL_GPL(erst_get_record_count); 430EXPORT_SYMBOL_GPL(erst_get_record_count);
431 431
432#define ERST_RECORD_ID_CACHE_SIZE_MIN 16
433#define ERST_RECORD_ID_CACHE_SIZE_MAX 1024
434
435struct erst_record_id_cache {
436 struct mutex lock;
437 u64 *entries;
438 int len;
439 int size;
440 int refcount;
441};
442
443static struct erst_record_id_cache erst_record_id_cache = {
444 .lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock),
445 .refcount = 0,
446};
447
432static int __erst_get_next_record_id(u64 *record_id) 448static int __erst_get_next_record_id(u64 *record_id)
433{ 449{
434 struct apei_exec_context ctx; 450 struct apei_exec_context ctx;
@@ -443,26 +459,179 @@ static int __erst_get_next_record_id(u64 *record_id)
443 return 0; 459 return 0;
444} 460}
445 461
462int erst_get_record_id_begin(int *pos)
463{
464 int rc;
465
466 if (erst_disable)
467 return -ENODEV;
468
469 rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
470 if (rc)
471 return rc;
472 erst_record_id_cache.refcount++;
473 mutex_unlock(&erst_record_id_cache.lock);
474
475 *pos = 0;
476
477 return 0;
478}
479EXPORT_SYMBOL_GPL(erst_get_record_id_begin);
480
481/* erst_record_id_cache.lock must be held by caller */
482static int __erst_record_id_cache_add_one(void)
483{
484 u64 id, prev_id, first_id;
485 int i, rc;
486 u64 *entries;
487 unsigned long flags;
488
489 id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID;
490retry:
491 raw_spin_lock_irqsave(&erst_lock, flags);
492 rc = __erst_get_next_record_id(&id);
493 raw_spin_unlock_irqrestore(&erst_lock, flags);
494 if (rc == -ENOENT)
495 return 0;
496 if (rc)
497 return rc;
498 if (id == APEI_ERST_INVALID_RECORD_ID)
499 return 0;
500 /* can not skip current ID, or loop back to first ID */
501 if (id == prev_id || id == first_id)
502 return 0;
503 if (first_id == APEI_ERST_INVALID_RECORD_ID)
504 first_id = id;
505 prev_id = id;
506
507 entries = erst_record_id_cache.entries;
508 for (i = 0; i < erst_record_id_cache.len; i++) {
509 if (entries[i] == id)
510 break;
511 }
512 /* record id already in cache, try next */
513 if (i < erst_record_id_cache.len)
514 goto retry;
515 if (erst_record_id_cache.len >= erst_record_id_cache.size) {
516 int new_size, alloc_size;
517 u64 *new_entries;
518
519 new_size = erst_record_id_cache.size * 2;
520 new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN,
521 ERST_RECORD_ID_CACHE_SIZE_MAX);
522 if (new_size <= erst_record_id_cache.size) {
523 if (printk_ratelimit())
524 pr_warning(FW_WARN ERST_PFX
525 "too many record ID!\n");
526 return 0;
527 }
528 alloc_size = new_size * sizeof(entries[0]);
529 if (alloc_size < PAGE_SIZE)
530 new_entries = kmalloc(alloc_size, GFP_KERNEL);
531 else
532 new_entries = vmalloc(alloc_size);
533 if (!new_entries)
534 return -ENOMEM;
535 memcpy(new_entries, entries,
536 erst_record_id_cache.len * sizeof(entries[0]));
537 if (erst_record_id_cache.size < PAGE_SIZE)
538 kfree(entries);
539 else
540 vfree(entries);
541 erst_record_id_cache.entries = entries = new_entries;
542 erst_record_id_cache.size = new_size;
543 }
544 entries[i] = id;
545 erst_record_id_cache.len++;
546
547 return 1;
548}
549
446/* 550/*
447 * Get the record ID of an existing error record on the persistent 551 * Get the record ID of an existing error record on the persistent
448 * storage. If there is no error record on the persistent storage, the 552 * storage. If there is no error record on the persistent storage, the
449 * returned record_id is APEI_ERST_INVALID_RECORD_ID. 553 * returned record_id is APEI_ERST_INVALID_RECORD_ID.
450 */ 554 */
451int erst_get_next_record_id(u64 *record_id) 555int erst_get_record_id_next(int *pos, u64 *record_id)
452{ 556{
453 int rc; 557 int rc = 0;
454 unsigned long flags; 558 u64 *entries;
455 559
456 if (erst_disable) 560 if (erst_disable)
457 return -ENODEV; 561 return -ENODEV;
458 562
459 raw_spin_lock_irqsave(&erst_lock, flags); 563 /* must be enclosed by erst_get_record_id_begin/end */
460 rc = __erst_get_next_record_id(record_id); 564 BUG_ON(!erst_record_id_cache.refcount);
461 raw_spin_unlock_irqrestore(&erst_lock, flags); 565 BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len);
566
567 mutex_lock(&erst_record_id_cache.lock);
568 entries = erst_record_id_cache.entries;
569 for (; *pos < erst_record_id_cache.len; (*pos)++)
570 if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID)
571 break;
572 /* found next record id in cache */
573 if (*pos < erst_record_id_cache.len) {
574 *record_id = entries[*pos];
575 (*pos)++;
576 goto out_unlock;
577 }
578
579 /* Try to add one more record ID to cache */
580 rc = __erst_record_id_cache_add_one();
581 if (rc < 0)
582 goto out_unlock;
583 /* successfully add one new ID */
584 if (rc == 1) {
585 *record_id = erst_record_id_cache.entries[*pos];
586 (*pos)++;
587 rc = 0;
588 } else {
589 *pos = -1;
590 *record_id = APEI_ERST_INVALID_RECORD_ID;
591 }
592out_unlock:
593 mutex_unlock(&erst_record_id_cache.lock);
462 594
463 return rc; 595 return rc;
464} 596}
465EXPORT_SYMBOL_GPL(erst_get_next_record_id); 597EXPORT_SYMBOL_GPL(erst_get_record_id_next);
598
599/* erst_record_id_cache.lock must be held by caller */
600static void __erst_record_id_cache_compact(void)
601{
602 int i, wpos = 0;
603 u64 *entries;
604
605 if (erst_record_id_cache.refcount)
606 return;
607
608 entries = erst_record_id_cache.entries;
609 for (i = 0; i < erst_record_id_cache.len; i++) {
610 if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
611 continue;
612 if (wpos != i)
613 memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
614 wpos++;
615 }
616 erst_record_id_cache.len = wpos;
617}
618
619void erst_get_record_id_end(void)
620{
621 /*
622 * erst_disable != 0 should be detected by invoker via the
623 * return value of erst_get_record_id_begin/next, so this
624 * function should not be called for erst_disable != 0.
625 */
626 BUG_ON(erst_disable);
627
628 mutex_lock(&erst_record_id_cache.lock);
629 erst_record_id_cache.refcount--;
630 BUG_ON(erst_record_id_cache.refcount < 0);
631 __erst_record_id_cache_compact();
632 mutex_unlock(&erst_record_id_cache.lock);
633}
634EXPORT_SYMBOL_GPL(erst_get_record_id_end);
466 635
467static int __erst_write_to_storage(u64 offset) 636static int __erst_write_to_storage(u64 offset)
468{ 637{
@@ -703,56 +872,34 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record,
703} 872}
704EXPORT_SYMBOL_GPL(erst_read); 873EXPORT_SYMBOL_GPL(erst_read);
705 874
706/*
707 * If return value > buflen, the buffer size is not big enough,
708 * else if return value = 0, there is no more record to read,
709 * else if return value < 0, something goes wrong,
710 * else everything is OK, and return value is record length
711 */
712ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
713{
714 int rc;
715 ssize_t len;
716 unsigned long flags;
717 u64 record_id;
718
719 if (erst_disable)
720 return -ENODEV;
721
722 raw_spin_lock_irqsave(&erst_lock, flags);
723 rc = __erst_get_next_record_id(&record_id);
724 if (rc) {
725 raw_spin_unlock_irqrestore(&erst_lock, flags);
726 return rc;
727 }
728 /* no more record */
729 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
730 raw_spin_unlock_irqrestore(&erst_lock, flags);
731 return 0;
732 }
733
734 len = __erst_read(record_id, record, buflen);
735 raw_spin_unlock_irqrestore(&erst_lock, flags);
736
737 return len;
738}
739EXPORT_SYMBOL_GPL(erst_read_next);
740
741int erst_clear(u64 record_id) 875int erst_clear(u64 record_id)
742{ 876{
743 int rc; 877 int rc, i;
744 unsigned long flags; 878 unsigned long flags;
879 u64 *entries;
745 880
746 if (erst_disable) 881 if (erst_disable)
747 return -ENODEV; 882 return -ENODEV;
748 883
884 rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
885 if (rc)
886 return rc;
749 raw_spin_lock_irqsave(&erst_lock, flags); 887 raw_spin_lock_irqsave(&erst_lock, flags);
750 if (erst_erange.attr & ERST_RANGE_NVRAM) 888 if (erst_erange.attr & ERST_RANGE_NVRAM)
751 rc = __erst_clear_from_nvram(record_id); 889 rc = __erst_clear_from_nvram(record_id);
752 else 890 else
753 rc = __erst_clear_from_storage(record_id); 891 rc = __erst_clear_from_storage(record_id);
754 raw_spin_unlock_irqrestore(&erst_lock, flags); 892 raw_spin_unlock_irqrestore(&erst_lock, flags);
755 893 if (rc)
894 goto out;
895 entries = erst_record_id_cache.entries;
896 for (i = 0; i < erst_record_id_cache.len; i++) {
897 if (entries[i] == record_id)
898 entries[i] = APEI_ERST_INVALID_RECORD_ID;
899 }
900 __erst_record_id_cache_compact();
901out:
902 mutex_unlock(&erst_record_id_cache.lock);
756 return rc; 903 return rc;
757} 904}
758EXPORT_SYMBOL_GPL(erst_clear); 905EXPORT_SYMBOL_GPL(erst_clear);