aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2016-04-29 17:54:07 -0400
committerIngo Molnar <mingo@kernel.org>2016-05-04 02:48:47 -0400
commitc443c03dd0d97620022483be6705ff611695a29c (patch)
tree3e682a202887ce48c06bba4f691242505c7191e3 /arch/x86
parent7563421b13da21dd7a947f658b5299e65ed95cbe (diff)
x86/platform/UV: Prep for UV4 MMR updates
Cleanup patch to rearrange code and modify some defines so the next patch, the new UV4 MMR definitions can be merged cleanly. * Clean up the M/N related address constants (M is # of address bits per blade, N is the # of blade selection bits per SSI/partition). * Fix the lookup of the alias overlay addresses and NMI definitions to allow for flexibility in newer UV architecture types. Tested-by: John Estabrook <estabrook@sgi.com> Tested-by: Gary Kroening <gfk@sgi.com> Tested-by: Nathan Zimmer <nzimmer@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Reviewed-by: Dimitri Sivanich <sivanich@sgi.com> Cc: Andrew Banman <abanman@sgi.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Len Brown <len.brown@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russ Anderson <rja@sgi.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20160429215403.401604203@asylum.americas.sgi.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c208
2 files changed, 129 insertions, 84 deletions
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 6660c09af3f8..ffbc1c70d7af 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -635,9 +635,14 @@ extern void uv_nmi_setup(void);
635/* Newer SMM NMI handler, not present in all systems */ 635/* Newer SMM NMI handler, not present in all systems */
636#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0 636#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
637#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS 637#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
638
639#ifdef UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT
640#define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT
641#else
638#define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \ 642#define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \
639 UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\ 643 UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\
640 UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT) 644 UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
645#endif
641#define UVH_NMI_MMRX_TYPE "EXTIO_INT0" 646#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
642 647
643/* Non-zero indicates newer SMM NMI handler present */ 648/* Non-zero indicates newer SMM NMI handler present */
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 85c38494c188..3853c7b82399 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -460,45 +460,38 @@ static __init int boot_pnode_to_blade(int pnode)
460 BUG(); 460 BUG();
461} 461}
462 462
463struct redir_addr { 463#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3
464 unsigned long redirect;
465 unsigned long alias;
466};
467
468#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 464#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
469 465
470static __initdata struct redir_addr redir_addrs[] = {
471 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
472 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
473 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
474};
475
476static unsigned char get_n_lshift(int m_val)
477{
478 union uv3h_gr0_gam_gr_config_u m_gr_config;
479
480 if (is_uv1_hub())
481 return m_val;
482
483 if (is_uv2_hub())
484 return m_val == 40 ? 40 : 39;
485
486 m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
487 return m_gr_config.s3.m_skt;
488}
489
490static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) 466static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
491{ 467{
492 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias; 468 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
493 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; 469 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
470 unsigned long m_redirect;
471 unsigned long m_overlay;
494 int i; 472 int i;
495 473
496 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { 474 for (i = 0; i < UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH; i++) {
497 alias.v = uv_read_local_mmr(redir_addrs[i].alias); 475 switch (i) {
476 case 0:
477 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR;
478 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
479 break;
480 case 1:
481 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR;
482 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
483 break;
484 case 2:
485 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR;
486 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
487 break;
488 }
489 alias.v = uv_read_local_mmr(m_overlay);
498 if (alias.s.enable && alias.s.base == 0) { 490 if (alias.s.enable && alias.s.base == 0) {
499 *size = (1UL << alias.s.m_alias); 491 *size = (1UL << alias.s.m_alias);
500 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); 492 redirect.v = uv_read_local_mmr(m_redirect);
501 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; 493 *base = (unsigned long)redirect.s.dest_base
494 << DEST_SHIFT;
502 return; 495 return;
503 } 496 }
504 } 497 }
@@ -561,6 +554,8 @@ static __init void map_gru_high(int max_pnode)
561{ 554{
562 union uvh_rh_gam_gru_overlay_config_mmr_u gru; 555 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
563 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; 556 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
557 unsigned long mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK;
558 unsigned long base;
564 559
565 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 560 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
566 if (!gru.s.enable) { 561 if (!gru.s.enable) {
@@ -572,8 +567,9 @@ static __init void map_gru_high(int max_pnode)
572 map_gru_distributed(gru.v); 567 map_gru_distributed(gru.v);
573 return; 568 return;
574 } 569 }
575 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); 570 base = (gru.v & mask) >> shift;
576 gru_start_paddr = ((u64)gru.s.base << shift); 571 map_high("GRU", base, shift, shift, max_pnode, map_wb);
572 gru_start_paddr = ((u64)base << shift);
577 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); 573 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
578} 574}
579 575
@@ -888,16 +884,89 @@ void uv_cpu_init(void)
888 set_x2apic_extra_bits(uv_hub_info->pnode); 884 set_x2apic_extra_bits(uv_hub_info->pnode);
889} 885}
890 886
891void __init uv_system_init(void) 887struct mn {
888 unsigned char m_val;
889 unsigned char n_val;
890 unsigned char m_shift;
891 unsigned char n_lshift;
892};
893
894static void get_mn(struct mn *mnp)
892{ 895{
893 union uvh_rh_gam_config_mmr_u m_n_config; 896 union uvh_rh_gam_config_mmr_u m_n_config;
897 union uv3h_gr0_gam_gr_config_u m_gr_config;
898
899 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
900 mnp->n_val = m_n_config.s.n_skt;
901 if (is_uv4_hub()) {
902 mnp->m_val = 0;
903 mnp->n_lshift = 0;
904 } else if (is_uv3_hub()) {
905 mnp->m_val = m_n_config.s3.m_skt;
906 m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
907 mnp->n_lshift = m_gr_config.s3.m_skt;
908 } else if (is_uv2_hub()) {
909 mnp->m_val = m_n_config.s2.m_skt;
910 mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
911 } else if (is_uv1_hub()) {
912 mnp->m_val = m_n_config.s1.m_skt;
913 mnp->n_lshift = mnp->m_val;
914 }
915 mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
916}
917
918void __init uv_init_hub_info(struct uv_hub_info_s *hub_info)
919{
920 struct mn mn = {0}; /* avoid unitialized warnings */
894 union uvh_node_id_u node_id; 921 union uvh_node_id_u node_id;
895 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 922
896 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 923 get_mn(&mn);
897 int gnode_extra, min_pnode = 999999, max_pnode = -1; 924 hub_info->m_val = mn.m_val;
898 unsigned long mmr_base, present, paddr; 925 hub_info->n_val = mn.n_val;
899 unsigned short pnode_mask; 926 hub_info->m_shift = mn.m_shift;
900 unsigned char n_lshift; 927 hub_info->n_lshift = mn.n_lshift;
928
929 hub_info->hub_revision = uv_hub_info->hub_revision;
930 hub_info->pnode_mask = (1 << mn.n_val) - 1;
931 hub_info->gpa_mask = (1UL << (mn.m_val + mn.n_val)) - 1;
932
933 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
934 hub_info->gnode_extra =
935 (node_id.s.node_id & ~((1 << mn.n_val) - 1)) >> 1;
936
937 hub_info->gnode_upper =
938 ((unsigned long)hub_info->gnode_extra << mn.m_val);
939
940 hub_info->global_mmr_base =
941 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
942 ~UV_MMR_ENABLE;
943
944 get_lowmem_redirect(
945 &hub_info->lowmem_remap_base, &hub_info->lowmem_remap_top);
946
947 hub_info->apic_pnode_shift = uvh_apicid.s.pnode_shift;
948
949 /* show system specific info */
950 pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n",
951 hub_info->n_val, hub_info->m_val,
952 hub_info->m_shift, hub_info->n_lshift);
953
954 pr_info("UV: pnode_mask:0x%x gpa_mask:0x%lx apic_pns:%d\n",
955 hub_info->pnode_mask, hub_info->gpa_mask,
956 hub_info->apic_pnode_shift);
957
958 pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n",
959 hub_info->gnode_upper, hub_info->gnode_extra);
960
961 pr_info("UV: global MMR base 0x%lx\n", hub_info->global_mmr_base);
962
963}
964
965void __init uv_system_init(void)
966{
967 struct uv_hub_info_s hub_info = {0};
968 int bytes, nid, cpu, pnode, blade, i, j;
969 int min_pnode = 999999, max_pnode = -1;
901 char *hub = is_uv4_hub() ? "UV400" : 970 char *hub = is_uv4_hub() ? "UV400" :
902 is_uv3_hub() ? "UV300" : 971 is_uv3_hub() ? "UV300" :
903 is_uv2_hub() ? "UV2000/3000" : 972 is_uv2_hub() ? "UV2000/3000" :
@@ -913,23 +982,7 @@ void __init uv_system_init(void)
913 if (is_uv1_hub()) 982 if (is_uv1_hub())
914 map_low_mmrs(); 983 map_low_mmrs();
915 984
916 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); 985 uv_init_hub_info(&hub_info);
917 m_val = m_n_config.s.m_skt;
918 n_val = m_n_config.s.n_skt;
919 pnode_mask = (1 << n_val) - 1;
920 n_lshift = get_n_lshift(m_val);
921 mmr_base =
922 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
923 ~UV_MMR_ENABLE;
924
925 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
926 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
927 gnode_upper = ((unsigned long)gnode_extra << m_val);
928 pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x n_lshift 0x%x\n",
929 n_val, m_val, pnode_mask, gnode_upper, gnode_extra,
930 n_lshift);
931
932 pr_info("UV: global MMR base 0x%lx\n", mmr_base);
933 986
934 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) 987 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
935 uv_possible_blades += 988 uv_possible_blades +=
@@ -937,8 +990,9 @@ void __init uv_system_init(void)
937 990
938 /* uv_num_possible_blades() is really the hub count */ 991 /* uv_num_possible_blades() is really the hub count */
939 pr_info("UV: Found %d blades, %d hubs\n", 992 pr_info("UV: Found %d blades, %d hubs\n",
940 is_uv1_hub() ? uv_num_possible_blades() : 993 is_uv1_hub() ?
941 (uv_num_possible_blades() + 1) / 2, 994 uv_num_possible_blades() :
995 (uv_num_possible_blades() + 1) / 2,
942 uv_num_possible_blades()); 996 uv_num_possible_blades());
943 997
944 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 998 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
@@ -948,7 +1002,6 @@ void __init uv_system_init(void)
948 for (blade = 0; blade < uv_num_possible_blades(); blade++) 1002 for (blade = 0; blade < uv_num_possible_blades(); blade++)
949 uv_blade_info[blade].memory_nid = -1; 1003 uv_blade_info[blade].memory_nid = -1;
950 1004
951 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
952 1005
953 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); 1006 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
954 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); 1007 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
@@ -962,11 +1015,12 @@ void __init uv_system_init(void)
962 1015
963 blade = 0; 1016 blade = 0;
964 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { 1017 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
965 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); 1018 unsigned long present =
1019 uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
966 for (j = 0; j < 64; j++) { 1020 for (j = 0; j < 64; j++) {
967 if (!test_bit(j, &present)) 1021 if (!test_bit(j, &present))
968 continue; 1022 continue;
969 pnode = (i * 64 + j) & pnode_mask; 1023 pnode = (i * 64 + j) & hub_info.pnode_mask;
970 uv_blade_info[blade].pnode = pnode; 1024 uv_blade_info[blade].pnode = pnode;
971 uv_blade_info[blade].nr_possible_cpus = 0; 1025 uv_blade_info[blade].nr_possible_cpus = 0;
972 uv_blade_info[blade].nr_online_cpus = 0; 1026 uv_blade_info[blade].nr_online_cpus = 0;
@@ -980,49 +1034,35 @@ void __init uv_system_init(void)
980 uv_bios_init(); 1034 uv_bios_init();
981 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id, 1035 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
982 &sn_region_size, &system_serial_number); 1036 &sn_region_size, &system_serial_number);
1037 hub_info.coherency_domain_number = sn_coherency_id;
983 uv_rtc_init(); 1038 uv_rtc_init();
984 1039
985 for_each_present_cpu(cpu) { 1040 for_each_present_cpu(cpu) {
986 int apicid = per_cpu(x86_cpu_to_apicid, cpu); 1041 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
1042 int nodeid = cpu_to_node(cpu);
1043 int lcpu;
987 1044
988 nid = cpu_to_node(cpu); 1045 *uv_cpu_hub_info(cpu) = hub_info; /* common hub values */
989 /*
990 * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
991 */
992 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
993 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
994 uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
995
996 uv_cpu_hub_info(cpu)->m_shift = 64 - m_val;
997 uv_cpu_hub_info(cpu)->n_lshift = n_lshift;
998
999 pnode = uv_apicid_to_pnode(apicid); 1046 pnode = uv_apicid_to_pnode(apicid);
1000 blade = boot_pnode_to_blade(pnode); 1047 blade = boot_pnode_to_blade(pnode);
1001 lcpu = uv_blade_info[blade].nr_possible_cpus; 1048 lcpu = uv_blade_info[blade].nr_possible_cpus;
1002 uv_blade_info[blade].nr_possible_cpus++; 1049 uv_blade_info[blade].nr_possible_cpus++;
1003 1050
1004 /* Any node on the blade, else will contain -1. */ 1051 /* Any node on the blade, else will contain -1. */
1005 uv_blade_info[blade].memory_nid = nid; 1052 uv_blade_info[blade].memory_nid = nodeid;
1006 1053
1007 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
1008 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
1009 uv_cpu_hub_info(cpu)->m_val = m_val;
1010 uv_cpu_hub_info(cpu)->n_val = n_val;
1011 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 1054 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
1012 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
1013 uv_cpu_hub_info(cpu)->pnode = pnode; 1055 uv_cpu_hub_info(cpu)->pnode = pnode;
1014 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
1015 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
1016 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
1017 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
1018 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
1019 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); 1056 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
1020 uv_node_to_blade[nid] = blade; 1057 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
1058 uv_node_to_blade[nodeid] = blade;
1021 uv_cpu_to_blade[cpu] = blade; 1059 uv_cpu_to_blade[cpu] = blade;
1022 } 1060 }
1023 1061
1024 /* Add blade/pnode info for nodes without cpus */ 1062 /* Add blade/pnode info for nodes without cpus */
1025 for_each_online_node(nid) { 1063 for_each_online_node(nid) {
1064 unsigned long paddr;
1065
1026 if (uv_node_to_blade[nid] >= 0) 1066 if (uv_node_to_blade[nid] >= 0)
1027 continue; 1067 continue;
1028 paddr = node_start_pfn(nid) << PAGE_SHIFT; 1068 paddr = node_start_pfn(nid) << PAGE_SHIFT;