diff options
author | Takashi Iwai <tiwai@suse.de> | 2015-06-22 05:32:41 -0400 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2015-06-22 05:32:41 -0400 |
commit | 57fa8a1e22c5833fb2cae96af68fc39ec21cb017 (patch) | |
tree | b0bb4e4a6e04a24119da30253add9fe9ffbc8d22 /arch/sparc/kernel/mdesc.c | |
parent | f267f9dff8ba00a8b11f340da3634858ad50ebab (diff) | |
parent | c99d49a8f81fb35e67b0ffa45f320a75e0b5639d (diff) |
Merge tag 'asoc-v4.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next
ASoC: Further updates for v4.2
There's a bunch of additional updates and fixes that came in since my
orignal pull request here, including DT support for rt5645 and fairly
large serieses of cleanups and improvements to tas2552 and rcar.
Diffstat (limited to 'arch/sparc/kernel/mdesc.c')
-rw-r--r-- | arch/sparc/kernel/mdesc.c | 136 |
1 files changed, 110 insertions, 26 deletions
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 26c80e18d7b1..6f80936e0eea 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c | |||
@@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) | |||
614 | } | 614 | } |
615 | } | 615 | } |
616 | 616 | ||
617 | static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) | 617 | static void find_back_node_value(struct mdesc_handle *hp, u64 node, |
618 | char *srch_val, | ||
619 | void (*func)(struct mdesc_handle *, u64, int), | ||
620 | u64 val, int depth) | ||
618 | { | 621 | { |
619 | u64 a; | 622 | u64 arc; |
620 | |||
621 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { | ||
622 | u64 t = mdesc_arc_target(hp, a); | ||
623 | const char *name; | ||
624 | const u64 *id; | ||
625 | 623 | ||
626 | name = mdesc_node_name(hp, t); | 624 | /* Since we have an estimate of recursion depth, do a sanity check. */ |
627 | if (!strcmp(name, "cpu")) { | 625 | if (depth == 0) |
628 | id = mdesc_get_property(hp, t, "id", NULL); | 626 | return; |
629 | if (*id < NR_CPUS) | ||
630 | cpu_data(*id).core_id = core_id; | ||
631 | } else { | ||
632 | u64 j; | ||
633 | 627 | ||
634 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { | 628 | mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) { |
635 | u64 n = mdesc_arc_target(hp, j); | 629 | u64 n = mdesc_arc_target(hp, arc); |
636 | const char *n_name; | 630 | const char *name = mdesc_node_name(hp, n); |
637 | 631 | ||
638 | n_name = mdesc_node_name(hp, n); | 632 | if (!strcmp(srch_val, name)) |
639 | if (strcmp(n_name, "cpu")) | 633 | (*func)(hp, n, val); |
640 | continue; | ||
641 | 634 | ||
642 | id = mdesc_get_property(hp, n, "id", NULL); | 635 | find_back_node_value(hp, n, srch_val, func, val, depth-1); |
643 | if (*id < NR_CPUS) | ||
644 | cpu_data(*id).core_id = core_id; | ||
645 | } | ||
646 | } | ||
647 | } | 636 | } |
648 | } | 637 | } |
649 | 638 | ||
639 | static void __mark_core_id(struct mdesc_handle *hp, u64 node, | ||
640 | int core_id) | ||
641 | { | ||
642 | const u64 *id = mdesc_get_property(hp, node, "id", NULL); | ||
643 | |||
644 | if (*id < num_possible_cpus()) | ||
645 | cpu_data(*id).core_id = core_id; | ||
646 | } | ||
647 | |||
648 | static void __mark_sock_id(struct mdesc_handle *hp, u64 node, | ||
649 | int sock_id) | ||
650 | { | ||
651 | const u64 *id = mdesc_get_property(hp, node, "id", NULL); | ||
652 | |||
653 | if (*id < num_possible_cpus()) | ||
654 | cpu_data(*id).sock_id = sock_id; | ||
655 | } | ||
656 | |||
657 | static void mark_core_ids(struct mdesc_handle *hp, u64 mp, | ||
658 | int core_id) | ||
659 | { | ||
660 | find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); | ||
661 | } | ||
662 | |||
663 | static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, | ||
664 | int sock_id) | ||
665 | { | ||
666 | find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); | ||
667 | } | ||
668 | |||
650 | static void set_core_ids(struct mdesc_handle *hp) | 669 | static void set_core_ids(struct mdesc_handle *hp) |
651 | { | 670 | { |
652 | int idx; | 671 | int idx; |
653 | u64 mp; | 672 | u64 mp; |
654 | 673 | ||
655 | idx = 1; | 674 | idx = 1; |
675 | |||
676 | /* Identify unique cores by looking for cpus backpointed to by | ||
677 | * level 1 instruction caches. | ||
678 | */ | ||
656 | mdesc_for_each_node_by_name(hp, mp, "cache") { | 679 | mdesc_for_each_node_by_name(hp, mp, "cache") { |
657 | const u64 *level; | 680 | const u64 *level; |
658 | const char *type; | 681 | const char *type; |
@@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp) | |||
667 | continue; | 690 | continue; |
668 | 691 | ||
669 | mark_core_ids(hp, mp, idx); | 692 | mark_core_ids(hp, mp, idx); |
693 | idx++; | ||
694 | } | ||
695 | } | ||
696 | |||
697 | static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) | ||
698 | { | ||
699 | u64 mp; | ||
700 | int idx = 1; | ||
701 | int fnd = 0; | ||
702 | |||
703 | /* Identify unique sockets by looking for cpus backpointed to by | ||
704 | * shared level n caches. | ||
705 | */ | ||
706 | mdesc_for_each_node_by_name(hp, mp, "cache") { | ||
707 | const u64 *cur_lvl; | ||
708 | |||
709 | cur_lvl = mdesc_get_property(hp, mp, "level", NULL); | ||
710 | if (*cur_lvl != level) | ||
711 | continue; | ||
712 | |||
713 | mark_sock_ids(hp, mp, idx); | ||
714 | idx++; | ||
715 | fnd = 1; | ||
716 | } | ||
717 | return fnd; | ||
718 | } | ||
719 | |||
720 | static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp) | ||
721 | { | ||
722 | int idx = 1; | ||
670 | 723 | ||
724 | mdesc_for_each_node_by_name(hp, mp, "socket") { | ||
725 | u64 a; | ||
726 | |||
727 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { | ||
728 | u64 t = mdesc_arc_target(hp, a); | ||
729 | const char *name; | ||
730 | const u64 *id; | ||
731 | |||
732 | name = mdesc_node_name(hp, t); | ||
733 | if (strcmp(name, "cpu")) | ||
734 | continue; | ||
735 | |||
736 | id = mdesc_get_property(hp, t, "id", NULL); | ||
737 | if (*id < num_possible_cpus()) | ||
738 | cpu_data(*id).sock_id = idx; | ||
739 | } | ||
671 | idx++; | 740 | idx++; |
672 | } | 741 | } |
673 | } | 742 | } |
674 | 743 | ||
744 | static void set_sock_ids(struct mdesc_handle *hp) | ||
745 | { | ||
746 | u64 mp; | ||
747 | |||
748 | /* If machine description exposes sockets data use it. | ||
749 | * Otherwise fallback to use shared L3 or L2 caches. | ||
750 | */ | ||
751 | mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); | ||
752 | if (mp != MDESC_NODE_NULL) | ||
753 | return set_sock_ids_by_socket(hp, mp); | ||
754 | |||
755 | if (!set_sock_ids_by_cache(hp, 3)) | ||
756 | set_sock_ids_by_cache(hp, 2); | ||
757 | } | ||
758 | |||
675 | static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) | 759 | static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) |
676 | { | 760 | { |
677 | u64 a; | 761 | u64 a; |
@@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) | |||
707 | continue; | 791 | continue; |
708 | 792 | ||
709 | mark_proc_ids(hp, mp, idx); | 793 | mark_proc_ids(hp, mp, idx); |
710 | |||
711 | idx++; | 794 | idx++; |
712 | } | 795 | } |
713 | } | 796 | } |
@@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask) | |||
900 | 983 | ||
901 | set_core_ids(hp); | 984 | set_core_ids(hp); |
902 | set_proc_ids(hp); | 985 | set_proc_ids(hp); |
986 | set_sock_ids(hp); | ||
903 | 987 | ||
904 | mdesc_release(hp); | 988 | mdesc_release(hp); |
905 | 989 | ||