aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/apei/erst.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/apei/erst.c')
-rw-r--r--drivers/acpi/apei/erst.c235
1 files changed, 191 insertions, 44 deletions
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index cf6db6b7662a..8ff8c32fef58 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -429,6 +429,22 @@ ssize_t erst_get_record_count(void)
429} 429}
430EXPORT_SYMBOL_GPL(erst_get_record_count); 430EXPORT_SYMBOL_GPL(erst_get_record_count);
431 431
432#define ERST_RECORD_ID_CACHE_SIZE_MIN 16
433#define ERST_RECORD_ID_CACHE_SIZE_MAX 1024
434
435struct erst_record_id_cache {
436 struct mutex lock;
437 u64 *entries;
438 int len;
439 int size;
440 int refcount;
441};
442
443static struct erst_record_id_cache erst_record_id_cache = {
444 .lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock),
445 .refcount = 0,
446};
447
432static int __erst_get_next_record_id(u64 *record_id) 448static int __erst_get_next_record_id(u64 *record_id)
433{ 449{
434 struct apei_exec_context ctx; 450 struct apei_exec_context ctx;
@@ -443,26 +459,179 @@ static int __erst_get_next_record_id(u64 *record_id)
443 return 0; 459 return 0;
444} 460}
445 461
462int erst_get_record_id_begin(int *pos)
463{
464 int rc;
465
466 if (erst_disable)
467 return -ENODEV;
468
469 rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
470 if (rc)
471 return rc;
472 erst_record_id_cache.refcount++;
473 mutex_unlock(&erst_record_id_cache.lock);
474
475 *pos = 0;
476
477 return 0;
478}
479EXPORT_SYMBOL_GPL(erst_get_record_id_begin);
480
481/* erst_record_id_cache.lock must be held by caller */
482static int __erst_record_id_cache_add_one(void)
483{
484 u64 id, prev_id, first_id;
485 int i, rc;
486 u64 *entries;
487 unsigned long flags;
488
489 id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID;
490retry:
491 raw_spin_lock_irqsave(&erst_lock, flags);
492 rc = __erst_get_next_record_id(&id);
493 raw_spin_unlock_irqrestore(&erst_lock, flags);
494 if (rc == -ENOENT)
495 return 0;
496 if (rc)
497 return rc;
498 if (id == APEI_ERST_INVALID_RECORD_ID)
499 return 0;
500 /* can not skip current ID, or loop back to first ID */
501 if (id == prev_id || id == first_id)
502 return 0;
503 if (first_id == APEI_ERST_INVALID_RECORD_ID)
504 first_id = id;
505 prev_id = id;
506
507 entries = erst_record_id_cache.entries;
508 for (i = 0; i < erst_record_id_cache.len; i++) {
509 if (entries[i] == id)
510 break;
511 }
512 /* record id already in cache, try next */
513 if (i < erst_record_id_cache.len)
514 goto retry;
515 if (erst_record_id_cache.len >= erst_record_id_cache.size) {
516 int new_size, alloc_size;
517 u64 *new_entries;
518
519 new_size = erst_record_id_cache.size * 2;
520 new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN,
521 ERST_RECORD_ID_CACHE_SIZE_MAX);
522 if (new_size <= erst_record_id_cache.size) {
523 if (printk_ratelimit())
524 pr_warning(FW_WARN ERST_PFX
525 "too many record ID!\n");
526 return 0;
527 }
528 alloc_size = new_size * sizeof(entries[0]);
529 if (alloc_size < PAGE_SIZE)
530 new_entries = kmalloc(alloc_size, GFP_KERNEL);
531 else
532 new_entries = vmalloc(alloc_size);
533 if (!new_entries)
534 return -ENOMEM;
535 memcpy(new_entries, entries,
536 erst_record_id_cache.len * sizeof(entries[0]));
537 if (erst_record_id_cache.size < PAGE_SIZE)
538 kfree(entries);
539 else
540 vfree(entries);
541 erst_record_id_cache.entries = entries = new_entries;
542 erst_record_id_cache.size = new_size;
543 }
544 entries[i] = id;
545 erst_record_id_cache.len++;
546
547 return 1;
548}
549
446/* 550/*
447 * Get the record ID of an existing error record on the persistent 551 * Get the record ID of an existing error record on the persistent
448 * storage. If there is no error record on the persistent storage, the 552 * storage. If there is no error record on the persistent storage, the
449 * returned record_id is APEI_ERST_INVALID_RECORD_ID. 553 * returned record_id is APEI_ERST_INVALID_RECORD_ID.
450 */ 554 */
451int erst_get_next_record_id(u64 *record_id) 555int erst_get_record_id_next(int *pos, u64 *record_id)
452{ 556{
453 int rc; 557 int rc = 0;
454 unsigned long flags; 558 u64 *entries;
455 559
456 if (erst_disable) 560 if (erst_disable)
457 return -ENODEV; 561 return -ENODEV;
458 562
459 raw_spin_lock_irqsave(&erst_lock, flags); 563 /* must be enclosed by erst_get_record_id_begin/end */
460 rc = __erst_get_next_record_id(record_id); 564 BUG_ON(!erst_record_id_cache.refcount);
461 raw_spin_unlock_irqrestore(&erst_lock, flags); 565 BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len);
566
567 mutex_lock(&erst_record_id_cache.lock);
568 entries = erst_record_id_cache.entries;
569 for (; *pos < erst_record_id_cache.len; (*pos)++)
570 if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID)
571 break;
572 /* found next record id in cache */
573 if (*pos < erst_record_id_cache.len) {
574 *record_id = entries[*pos];
575 (*pos)++;
576 goto out_unlock;
577 }
578
579 /* Try to add one more record ID to cache */
580 rc = __erst_record_id_cache_add_one();
581 if (rc < 0)
582 goto out_unlock;
583 /* successfully add one new ID */
584 if (rc == 1) {
585 *record_id = erst_record_id_cache.entries[*pos];
586 (*pos)++;
587 rc = 0;
588 } else {
589 *pos = -1;
590 *record_id = APEI_ERST_INVALID_RECORD_ID;
591 }
592out_unlock:
593 mutex_unlock(&erst_record_id_cache.lock);
462 594
463 return rc; 595 return rc;
464} 596}
465EXPORT_SYMBOL_GPL(erst_get_next_record_id); 597EXPORT_SYMBOL_GPL(erst_get_record_id_next);
598
599/* erst_record_id_cache.lock must be held by caller */
600static void __erst_record_id_cache_compact(void)
601{
602 int i, wpos = 0;
603 u64 *entries;
604
605 if (erst_record_id_cache.refcount)
606 return;
607
608 entries = erst_record_id_cache.entries;
609 for (i = 0; i < erst_record_id_cache.len; i++) {
610 if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
611 continue;
612 if (wpos != i)
613 memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
614 wpos++;
615 }
616 erst_record_id_cache.len = wpos;
617}
618
619void erst_get_record_id_end(void)
620{
621 /*
622 * erst_disable != 0 should be detected by invoker via the
623 * return value of erst_get_record_id_begin/next, so this
624 * function should not be called for erst_disable != 0.
625 */
626 BUG_ON(erst_disable);
627
628 mutex_lock(&erst_record_id_cache.lock);
629 erst_record_id_cache.refcount--;
630 BUG_ON(erst_record_id_cache.refcount < 0);
631 __erst_record_id_cache_compact();
632 mutex_unlock(&erst_record_id_cache.lock);
633}
634EXPORT_SYMBOL_GPL(erst_get_record_id_end);
466 635
467static int __erst_write_to_storage(u64 offset) 636static int __erst_write_to_storage(u64 offset)
468{ 637{
@@ -703,56 +872,34 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record,
703} 872}
704EXPORT_SYMBOL_GPL(erst_read); 873EXPORT_SYMBOL_GPL(erst_read);
705 874
706/*
707 * If return value > buflen, the buffer size is not big enough,
708 * else if return value = 0, there is no more record to read,
709 * else if return value < 0, something goes wrong,
710 * else everything is OK, and return value is record length
711 */
712ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
713{
714 int rc;
715 ssize_t len;
716 unsigned long flags;
717 u64 record_id;
718
719 if (erst_disable)
720 return -ENODEV;
721
722 raw_spin_lock_irqsave(&erst_lock, flags);
723 rc = __erst_get_next_record_id(&record_id);
724 if (rc) {
725 raw_spin_unlock_irqrestore(&erst_lock, flags);
726 return rc;
727 }
728 /* no more record */
729 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
730 raw_spin_unlock_irqrestore(&erst_lock, flags);
731 return 0;
732 }
733
734 len = __erst_read(record_id, record, buflen);
735 raw_spin_unlock_irqrestore(&erst_lock, flags);
736
737 return len;
738}
739EXPORT_SYMBOL_GPL(erst_read_next);
740
741int erst_clear(u64 record_id) 875int erst_clear(u64 record_id)
742{ 876{
743 int rc; 877 int rc, i;
744 unsigned long flags; 878 unsigned long flags;
879 u64 *entries;
745 880
746 if (erst_disable) 881 if (erst_disable)
747 return -ENODEV; 882 return -ENODEV;
748 883
884 rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
885 if (rc)
886 return rc;
749 raw_spin_lock_irqsave(&erst_lock, flags); 887 raw_spin_lock_irqsave(&erst_lock, flags);
750 if (erst_erange.attr & ERST_RANGE_NVRAM) 888 if (erst_erange.attr & ERST_RANGE_NVRAM)
751 rc = __erst_clear_from_nvram(record_id); 889 rc = __erst_clear_from_nvram(record_id);
752 else 890 else
753 rc = __erst_clear_from_storage(record_id); 891 rc = __erst_clear_from_storage(record_id);
754 raw_spin_unlock_irqrestore(&erst_lock, flags); 892 raw_spin_unlock_irqrestore(&erst_lock, flags);
755 893 if (rc)
894 goto out;
895 entries = erst_record_id_cache.entries;
896 for (i = 0; i < erst_record_id_cache.len; i++) {
897 if (entries[i] == record_id)
898 entries[i] = APEI_ERST_INVALID_RECORD_ID;
899 }
900 __erst_record_id_cache_compact();
901out:
902 mutex_unlock(&erst_record_id_cache.lock);
756 return rc; 903 return rc;
757} 904}
758EXPORT_SYMBOL_GPL(erst_clear); 905EXPORT_SYMBOL_GPL(erst_clear);