aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-20 19:26:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-20 19:26:57 -0500
commitc945d0227d86ddc3485290fa5da1a7d2c9b759de (patch)
tree51301955fe86555e66ccdc4cc4de43c644acb3b8
parent8b5abde16bdc939d0602ea37bef0184a396ca9ea (diff)
parentd48085f0716f195ee7432de2dd110e2093c40fd5 (diff)
Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 platform updates from Ingo Molnar: "Misc platform updates: SGI UV4 support additions, intel-mid Merrifield enhancements and purge of old code" * 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits) x86/platform/UV/NMI: Fix uneccessary kABI breakage x86/platform/UV: Clean up the NMI code to match current coding style x86/platform/UV: Ensure uv_system_init is called when necessary x86/platform/UV: Initialize PCH GPP_D_0 NMI Pin to be NMI source x86/platform/UV: Verify NMI action is valid, default is standard x86/platform/UV: Add basic CPU NMI health check x86/platform/UV: Add Support for UV4 Hubless NMIs x86/platform/UV: Add Support for UV4 Hubless systems x86/platform/UV: Clean up the UV APIC code x86/platform/intel-mid: Move watchdog registration to arch_initcall() x86/platform/intel-mid: Don't shadow error code of mp_map_gsi_to_irq() x86/platform/intel-mid: Allocate RTC interrupt for Merrifield x86/ioapic: Return suitable error code in mp_map_gsi_to_irq() x86/platform/UV: Fix 2 socket config problem x86/platform/UV: Fix panic with missing UVsystab support x86/platform/intel-mid: Enable RTC on Intel Merrifield x86/platform/intel: Remove PMIC GPIO block support x86/platform/intel-mid: Make intel_scu_device_register() static x86/platform/intel-mid: Enable GPIO keys on Merrifield x86/platform/intel-mid: Get rid of duplication of IPC handler ...
-rw-r--r--arch/x86/include/asm/intel-mid.h5
-rw-r--r--arch/x86/include/asm/uv/uv.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h3
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c548
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_ipc.c68
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_ipc.h18
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c48
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c12
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c3
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c54
-rw-r--r--arch/x86/platform/intel-mid/mrfld.c1
-rw-r--r--arch/x86/platform/intel-mid/sfi.c58
-rw-r--r--arch/x86/platform/uv/uv_nmi.c459
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c326
-rw-r--r--include/linux/intel_pmic_gpio.h15
26 files changed, 796 insertions, 860 deletions
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 49da9f497b90..fe04491130ae 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -27,7 +27,6 @@ extern void intel_mid_pwr_power_off(void);
27extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev); 27extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev);
28 28
29extern int get_gpio_by_name(const char *name); 29extern int get_gpio_by_name(const char *name);
30extern void intel_scu_device_register(struct platform_device *pdev);
31extern int __init sfi_parse_mrtc(struct sfi_table_header *table); 30extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
32extern int __init sfi_parse_mtmr(struct sfi_table_header *table); 31extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
33extern int sfi_mrtc_num; 32extern int sfi_mrtc_num;
@@ -42,10 +41,8 @@ struct devs_id {
42 char name[SFI_NAME_LEN + 1]; 41 char name[SFI_NAME_LEN + 1];
43 u8 type; 42 u8 type;
44 u8 delay; 43 u8 delay;
44 u8 msic;
45 void *(*get_platform_data)(void *info); 45 void *(*get_platform_data)(void *info);
46 /* Custom handler for devices */
47 void (*device_handler)(struct sfi_device_table_entry *pentry,
48 struct devs_id *dev);
49}; 46};
50 47
51#define sfi_device(i) \ 48#define sfi_device(i) \
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 062921ef34e9..6686820feae9 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -10,6 +10,7 @@ struct mm_struct;
10 10
11extern enum uv_system_type get_uv_system_type(void); 11extern enum uv_system_type get_uv_system_type(void);
12extern int is_uv_system(void); 12extern int is_uv_system(void);
13extern int is_uv_hubless(void);
13extern void uv_cpu_init(void); 14extern void uv_cpu_init(void);
14extern void uv_nmi_init(void); 15extern void uv_nmi_init(void);
15extern void uv_system_init(void); 16extern void uv_system_init(void);
@@ -23,6 +24,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
23 24
24static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } 25static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
25static inline int is_uv_system(void) { return 0; } 26static inline int is_uv_system(void) { return 0; }
27static inline int is_uv_hubless(void) { return 0; }
26static inline void uv_cpu_init(void) { } 28static inline void uv_cpu_init(void) { }
27static inline void uv_system_init(void) { } 29static inline void uv_system_init(void) { }
28static inline const struct cpumask * 30static inline const struct cpumask *
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 097b80c989c4..72e8300b1e8a 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -772,6 +772,7 @@ static inline int uv_num_possible_blades(void)
772 772
773/* Per Hub NMI support */ 773/* Per Hub NMI support */
774extern void uv_nmi_setup(void); 774extern void uv_nmi_setup(void);
775extern void uv_nmi_setup_hubless(void);
775 776
776/* BMC sets a bit this MMR non-zero before sending an NMI */ 777/* BMC sets a bit this MMR non-zero before sending an NMI */
777#define UVH_NMI_MMR UVH_SCRATCH5 778#define UVH_NMI_MMR UVH_SCRATCH5
@@ -799,6 +800,8 @@ struct uv_hub_nmi_s {
799 atomic_t read_mmr_count; /* count of MMR reads */ 800 atomic_t read_mmr_count; /* count of MMR reads */
800 atomic_t nmi_count; /* count of true UV NMIs */ 801 atomic_t nmi_count; /* count of true UV NMIs */
801 unsigned long nmi_value; /* last value read from NMI MMR */ 802 unsigned long nmi_value; /* last value read from NMI MMR */
803 bool hub_present; /* false means UV hubless system */
804 bool pch_owner; /* indicates this hub owns PCH */
802}; 805};
803 806
804struct uv_cpu_nmi_s { 807struct uv_cpu_nmi_s {
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 52f352b063fd..347bb9f65737 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1107,12 +1107,12 @@ int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, struct irq_alloc_info *info)
1107 1107
1108 ioapic = mp_find_ioapic(gsi); 1108 ioapic = mp_find_ioapic(gsi);
1109 if (ioapic < 0) 1109 if (ioapic < 0)
1110 return -1; 1110 return -ENODEV;
1111 1111
1112 pin = mp_find_ioapic_pin(ioapic, gsi); 1112 pin = mp_find_ioapic_pin(ioapic, gsi);
1113 idx = find_irq_entry(ioapic, pin, mp_INT); 1113 idx = find_irq_entry(ioapic, pin, mp_INT);
1114 if ((flags & IOAPIC_MAP_CHECK) && idx < 0) 1114 if ((flags & IOAPIC_MAP_CHECK) && idx < 0)
1115 return -1; 1115 return -ENODEV;
1116 1116
1117 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info); 1117 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info);
1118} 1118}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 35690a168cf7..e9f8f8cdd570 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -41,40 +41,44 @@
41 41
42DEFINE_PER_CPU(int, x2apic_extra_bits); 42DEFINE_PER_CPU(int, x2apic_extra_bits);
43 43
44#define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args) 44static enum uv_system_type uv_system_type;
45 45static bool uv_hubless_system;
46static enum uv_system_type uv_system_type; 46static u64 gru_start_paddr, gru_end_paddr;
47static u64 gru_start_paddr, gru_end_paddr; 47static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
48static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr; 48static u64 gru_dist_lmask, gru_dist_umask;
49static u64 gru_dist_lmask, gru_dist_umask; 49static union uvh_apicid uvh_apicid;
50static union uvh_apicid uvh_apicid; 50
51 51/* Information derived from CPUID: */
52/* info derived from CPUID */
53static struct { 52static struct {
54 unsigned int apicid_shift; 53 unsigned int apicid_shift;
55 unsigned int apicid_mask; 54 unsigned int apicid_mask;
56 unsigned int socketid_shift; /* aka pnode_shift for UV1/2/3 */ 55 unsigned int socketid_shift; /* aka pnode_shift for UV1/2/3 */
57 unsigned int pnode_mask; 56 unsigned int pnode_mask;
58 unsigned int gpa_shift; 57 unsigned int gpa_shift;
58 unsigned int gnode_shift;
59} uv_cpuid; 59} uv_cpuid;
60 60
61int uv_min_hub_revision_id; 61int uv_min_hub_revision_id;
62EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); 62EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
63
63unsigned int uv_apicid_hibits; 64unsigned int uv_apicid_hibits;
64EXPORT_SYMBOL_GPL(uv_apicid_hibits); 65EXPORT_SYMBOL_GPL(uv_apicid_hibits);
65 66
66static struct apic apic_x2apic_uv_x; 67static struct apic apic_x2apic_uv_x;
67static struct uv_hub_info_s uv_hub_info_node0; 68static struct uv_hub_info_s uv_hub_info_node0;
68 69
69/* Set this to use hardware error handler instead of kernel panic */ 70/* Set this to use hardware error handler instead of kernel panic: */
70static int disable_uv_undefined_panic = 1; 71static int disable_uv_undefined_panic = 1;
72
71unsigned long uv_undefined(char *str) 73unsigned long uv_undefined(char *str)
72{ 74{
73 if (likely(!disable_uv_undefined_panic)) 75 if (likely(!disable_uv_undefined_panic))
74 panic("UV: error: undefined MMR: %s\n", str); 76 panic("UV: error: undefined MMR: %s\n", str);
75 else 77 else
76 pr_crit("UV: error: undefined MMR: %s\n", str); 78 pr_crit("UV: error: undefined MMR: %s\n", str);
77 return ~0ul; /* cause a machine fault */ 79
80 /* Cause a machine fault: */
81 return ~0ul;
78} 82}
79EXPORT_SYMBOL(uv_undefined); 83EXPORT_SYMBOL(uv_undefined);
80 84
@@ -85,18 +89,19 @@ static unsigned long __init uv_early_read_mmr(unsigned long addr)
85 mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr)); 89 mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
86 val = *mmr; 90 val = *mmr;
87 early_iounmap(mmr, sizeof(*mmr)); 91 early_iounmap(mmr, sizeof(*mmr));
92
88 return val; 93 return val;
89} 94}
90 95
91static inline bool is_GRU_range(u64 start, u64 end) 96static inline bool is_GRU_range(u64 start, u64 end)
92{ 97{
93 if (gru_dist_base) { 98 if (gru_dist_base) {
94 u64 su = start & gru_dist_umask; /* upper (incl pnode) bits */ 99 u64 su = start & gru_dist_umask; /* Upper (incl pnode) bits */
95 u64 sl = start & gru_dist_lmask; /* base offset bits */ 100 u64 sl = start & gru_dist_lmask; /* Base offset bits */
96 u64 eu = end & gru_dist_umask; 101 u64 eu = end & gru_dist_umask;
97 u64 el = end & gru_dist_lmask; 102 u64 el = end & gru_dist_lmask;
98 103
99 /* Must reside completely within a single GRU range */ 104 /* Must reside completely within a single GRU range: */
100 return (sl == gru_dist_base && el == gru_dist_base && 105 return (sl == gru_dist_base && el == gru_dist_base &&
101 su >= gru_first_node_paddr && 106 su >= gru_first_node_paddr &&
102 su <= gru_last_node_paddr && 107 su <= gru_last_node_paddr &&
@@ -133,13 +138,14 @@ static int __init early_get_pnodeid(void)
133 break; 138 break;
134 case UV4_HUB_PART_NUMBER: 139 case UV4_HUB_PART_NUMBER:
135 uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1; 140 uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
141 uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
136 break; 142 break;
137 } 143 }
138 144
139 uv_hub_info->hub_revision = uv_min_hub_revision_id; 145 uv_hub_info->hub_revision = uv_min_hub_revision_id;
140 uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1; 146 uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1;
141 pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask; 147 pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask;
142 uv_cpuid.gpa_shift = 46; /* default unless changed */ 148 uv_cpuid.gpa_shift = 46; /* Default unless changed */
143 149
144 pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n", 150 pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n",
145 node_id.s.revision, node_id.s.part_number, node_id.s.node_id, 151 node_id.s.revision, node_id.s.part_number, node_id.s.node_id,
@@ -147,11 +153,12 @@ static int __init early_get_pnodeid(void)
147 return pnode; 153 return pnode;
148} 154}
149 155
150/* [copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */ 156/* [Copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */
151#define SMT_LEVEL 0 /* leaf 0xb SMT level */ 157
152#define INVALID_TYPE 0 /* leaf 0xb sub-leaf types */ 158#define SMT_LEVEL 0 /* Leaf 0xb SMT level */
153#define SMT_TYPE 1 159#define INVALID_TYPE 0 /* Leaf 0xb sub-leaf types */
154#define CORE_TYPE 2 160#define SMT_TYPE 1
161#define CORE_TYPE 2
155#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) 162#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
156#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) 163#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
157 164
@@ -165,11 +172,13 @@ static void set_x2apic_bits(void)
165 pr_info("UV: CPU does not have CPUID.11\n"); 172 pr_info("UV: CPU does not have CPUID.11\n");
166 return; 173 return;
167 } 174 }
175
168 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); 176 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
169 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) { 177 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) {
170 pr_info("UV: CPUID.11 not implemented\n"); 178 pr_info("UV: CPUID.11 not implemented\n");
171 return; 179 return;
172 } 180 }
181
173 sid_shift = BITS_SHIFT_NEXT_LEVEL(eax); 182 sid_shift = BITS_SHIFT_NEXT_LEVEL(eax);
174 sub_index = 1; 183 sub_index = 1;
175 do { 184 do {
@@ -180,8 +189,9 @@ static void set_x2apic_bits(void)
180 } 189 }
181 sub_index++; 190 sub_index++;
182 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); 191 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
183 uv_cpuid.apicid_shift = 0; 192
184 uv_cpuid.apicid_mask = (~(-1 << sid_shift)); 193 uv_cpuid.apicid_shift = 0;
194 uv_cpuid.apicid_mask = (~(-1 << sid_shift));
185 uv_cpuid.socketid_shift = sid_shift; 195 uv_cpuid.socketid_shift = sid_shift;
186} 196}
187 197
@@ -192,10 +202,8 @@ static void __init early_get_apic_socketid_shift(void)
192 202
193 set_x2apic_bits(); 203 set_x2apic_bits();
194 204
195 pr_info("UV: apicid_shift:%d apicid_mask:0x%x\n", 205 pr_info("UV: apicid_shift:%d apicid_mask:0x%x\n", uv_cpuid.apicid_shift, uv_cpuid.apicid_mask);
196 uv_cpuid.apicid_shift, uv_cpuid.apicid_mask); 206 pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n", uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
197 pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n",
198 uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
199} 207}
200 208
201/* 209/*
@@ -208,10 +216,8 @@ static void __init uv_set_apicid_hibit(void)
208 union uv1h_lb_target_physical_apic_id_mask_u apicid_mask; 216 union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
209 217
210 if (is_uv1_hub()) { 218 if (is_uv1_hub()) {
211 apicid_mask.v = 219 apicid_mask.v = uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
212 uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK); 220 uv_apicid_hibits = apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
213 uv_apicid_hibits =
214 apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
215 } 221 }
216} 222}
217 223
@@ -220,20 +226,26 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
220 int pnodeid; 226 int pnodeid;
221 int uv_apic; 227 int uv_apic;
222 228
223 if (strncmp(oem_id, "SGI", 3) != 0) 229 if (strncmp(oem_id, "SGI", 3) != 0) {
230 if (strncmp(oem_id, "NSGI", 4) == 0) {
231 uv_hubless_system = true;
232 pr_info("UV: OEM IDs %s/%s, HUBLESS\n",
233 oem_id, oem_table_id);
234 }
224 return 0; 235 return 0;
236 }
225 237
226 if (numa_off) { 238 if (numa_off) {
227 pr_err("UV: NUMA is off, disabling UV support\n"); 239 pr_err("UV: NUMA is off, disabling UV support\n");
228 return 0; 240 return 0;
229 } 241 }
230 242
231 /* Setup early hub type field in uv_hub_info for Node 0 */ 243 /* Set up early hub type field in uv_hub_info for Node 0 */
232 uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0; 244 uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
233 245
234 /* 246 /*
235 * Determine UV arch type. 247 * Determine UV arch type.
236 * SGI: UV100/1000 248 * SGI: UV100/1000
237 * SGI2: UV2000/3000 249 * SGI2: UV2000/3000
238 * SGI3: UV300 (truncated to 4 chars because of different varieties) 250 * SGI3: UV300 (truncated to 4 chars because of different varieties)
239 * SGI4: UV400 (truncated to 4 chars because of different varieties) 251 * SGI4: UV400 (truncated to 4 chars because of different varieties)
@@ -249,31 +261,32 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
249 261
250 pnodeid = early_get_pnodeid(); 262 pnodeid = early_get_pnodeid();
251 early_get_apic_socketid_shift(); 263 early_get_apic_socketid_shift();
252 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 264
265 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
253 x86_platform.nmi_init = uv_nmi_init; 266 x86_platform.nmi_init = uv_nmi_init;
254 267
255 if (!strcmp(oem_table_id, "UVX")) { /* most common */ 268 if (!strcmp(oem_table_id, "UVX")) {
269 /* This is the most common hardware variant: */
256 uv_system_type = UV_X2APIC; 270 uv_system_type = UV_X2APIC;
257 uv_apic = 0; 271 uv_apic = 0;
258 272
259 } else if (!strcmp(oem_table_id, "UVH")) { /* only UV1 systems */ 273 } else if (!strcmp(oem_table_id, "UVH")) {
274 /* Only UV1 systems: */
260 uv_system_type = UV_NON_UNIQUE_APIC; 275 uv_system_type = UV_NON_UNIQUE_APIC;
261 __this_cpu_write(x2apic_extra_bits, 276 __this_cpu_write(x2apic_extra_bits, pnodeid << uvh_apicid.s.pnode_shift);
262 pnodeid << uvh_apicid.s.pnode_shift);
263 uv_set_apicid_hibit(); 277 uv_set_apicid_hibit();
264 uv_apic = 1; 278 uv_apic = 1;
265 279
266 } else if (!strcmp(oem_table_id, "UVL")) { /* only used for */ 280 } else if (!strcmp(oem_table_id, "UVL")) {
267 uv_system_type = UV_LEGACY_APIC; /* very small systems */ 281 /* Only used for very small systems: */
282 uv_system_type = UV_LEGACY_APIC;
268 uv_apic = 0; 283 uv_apic = 0;
269 284
270 } else { 285 } else {
271 goto badbios; 286 goto badbios;
272 } 287 }
273 288
274 pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n", 289 pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n", oem_id, oem_table_id, uv_system_type, uv_min_hub_revision_id, uv_apic);
275 oem_id, oem_table_id, uv_system_type,
276 uv_min_hub_revision_id, uv_apic);
277 290
278 return uv_apic; 291 return uv_apic;
279 292
@@ -294,6 +307,12 @@ int is_uv_system(void)
294} 307}
295EXPORT_SYMBOL_GPL(is_uv_system); 308EXPORT_SYMBOL_GPL(is_uv_system);
296 309
310int is_uv_hubless(void)
311{
312 return uv_hubless_system;
313}
314EXPORT_SYMBOL_GPL(is_uv_hubless);
315
297void **__uv_hub_info_list; 316void **__uv_hub_info_list;
298EXPORT_SYMBOL_GPL(__uv_hub_info_list); 317EXPORT_SYMBOL_GPL(__uv_hub_info_list);
299 318
@@ -306,16 +325,18 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
306unsigned long sn_rtc_cycles_per_second; 325unsigned long sn_rtc_cycles_per_second;
307EXPORT_SYMBOL(sn_rtc_cycles_per_second); 326EXPORT_SYMBOL(sn_rtc_cycles_per_second);
308 327
309/* the following values are used for the per node hub info struct */ 328/* The following values are used for the per node hub info struct */
310static __initdata unsigned short *_node_to_pnode; 329static __initdata unsigned short *_node_to_pnode;
311static __initdata unsigned short _min_socket, _max_socket; 330static __initdata unsigned short _min_socket, _max_socket;
312static __initdata unsigned short _min_pnode, _max_pnode, _gr_table_len; 331static __initdata unsigned short _min_pnode, _max_pnode, _gr_table_len;
313static __initdata struct uv_gam_range_entry *uv_gre_table; 332static __initdata struct uv_gam_range_entry *uv_gre_table;
314static __initdata struct uv_gam_parameters *uv_gp_table; 333static __initdata struct uv_gam_parameters *uv_gp_table;
315static __initdata unsigned short *_socket_to_node; 334static __initdata unsigned short *_socket_to_node;
316static __initdata unsigned short *_socket_to_pnode; 335static __initdata unsigned short *_socket_to_pnode;
317static __initdata unsigned short *_pnode_to_socket; 336static __initdata unsigned short *_pnode_to_socket;
318static __initdata struct uv_gam_range_s *_gr_table; 337
338static __initdata struct uv_gam_range_s *_gr_table;
339
319#define SOCK_EMPTY ((unsigned short)~0) 340#define SOCK_EMPTY ((unsigned short)~0)
320 341
321extern int uv_hub_info_version(void) 342extern int uv_hub_info_version(void)
@@ -324,7 +345,7 @@ extern int uv_hub_info_version(void)
324} 345}
325EXPORT_SYMBOL(uv_hub_info_version); 346EXPORT_SYMBOL(uv_hub_info_version);
326 347
327/* Build GAM range lookup table */ 348/* Build GAM range lookup table: */
328static __init void build_uv_gr_table(void) 349static __init void build_uv_gr_table(void)
329{ 350{
330 struct uv_gam_range_entry *gre = uv_gre_table; 351 struct uv_gam_range_entry *gre = uv_gre_table;
@@ -342,25 +363,24 @@ static __init void build_uv_gr_table(void)
342 363
343 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { 364 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
344 if (gre->type == UV_GAM_RANGE_TYPE_HOLE) { 365 if (gre->type == UV_GAM_RANGE_TYPE_HOLE) {
345 if (!ram_limit) { /* mark hole between ram/non-ram */ 366 if (!ram_limit) {
367 /* Mark hole between RAM/non-RAM: */
346 ram_limit = last_limit; 368 ram_limit = last_limit;
347 last_limit = gre->limit; 369 last_limit = gre->limit;
348 lsid++; 370 lsid++;
349 continue; 371 continue;
350 } 372 }
351 last_limit = gre->limit; 373 last_limit = gre->limit;
352 pr_info("UV: extra hole in GAM RE table @%d\n", 374 pr_info("UV: extra hole in GAM RE table @%d\n", (int)(gre - uv_gre_table));
353 (int)(gre - uv_gre_table));
354 continue; 375 continue;
355 } 376 }
356 if (_max_socket < gre->sockid) { 377 if (_max_socket < gre->sockid) {
357 pr_err("UV: GAM table sockid(%d) too large(>%d) @%d\n", 378 pr_err("UV: GAM table sockid(%d) too large(>%d) @%d\n", gre->sockid, _max_socket, (int)(gre - uv_gre_table));
358 gre->sockid, _max_socket,
359 (int)(gre - uv_gre_table));
360 continue; 379 continue;
361 } 380 }
362 sid = gre->sockid - _min_socket; 381 sid = gre->sockid - _min_socket;
363 if (lsid < sid) { /* new range */ 382 if (lsid < sid) {
383 /* New range: */
364 grt = &_gr_table[indx]; 384 grt = &_gr_table[indx];
365 grt->base = lindx; 385 grt->base = lindx;
366 grt->nasid = gre->nasid; 386 grt->nasid = gre->nasid;
@@ -369,27 +389,32 @@ static __init void build_uv_gr_table(void)
369 lindx = indx++; 389 lindx = indx++;
370 continue; 390 continue;
371 } 391 }
372 if (lsid == sid && !ram_limit) { /* update range */ 392 /* Update range: */
373 if (grt->limit == last_limit) { /* .. if contiguous */ 393 if (lsid == sid && !ram_limit) {
394 /* .. if contiguous: */
395 if (grt->limit == last_limit) {
374 grt->limit = last_limit = gre->limit; 396 grt->limit = last_limit = gre->limit;
375 continue; 397 continue;
376 } 398 }
377 } 399 }
378 if (!ram_limit) { /* non-contiguous ram range */ 400 /* Non-contiguous RAM range: */
401 if (!ram_limit) {
379 grt++; 402 grt++;
380 grt->base = lindx; 403 grt->base = lindx;
381 grt->nasid = gre->nasid; 404 grt->nasid = gre->nasid;
382 grt->limit = last_limit = gre->limit; 405 grt->limit = last_limit = gre->limit;
383 continue; 406 continue;
384 } 407 }
385 grt++; /* non-contiguous/non-ram */ 408 /* Non-contiguous/non-RAM: */
386 grt->base = grt - _gr_table; /* base is this entry */ 409 grt++;
410 /* base is this entry */
411 grt->base = grt - _gr_table;
387 grt->nasid = gre->nasid; 412 grt->nasid = gre->nasid;
388 grt->limit = last_limit = gre->limit; 413 grt->limit = last_limit = gre->limit;
389 lsid++; 414 lsid++;
390 } 415 }
391 416
392 /* shorten table if possible */ 417 /* Shorten table if possible */
393 grt++; 418 grt++;
394 i = grt - _gr_table; 419 i = grt - _gr_table;
395 if (i < _gr_table_len) { 420 if (i < _gr_table_len) {
@@ -403,16 +428,15 @@ static __init void build_uv_gr_table(void)
403 } 428 }
404 } 429 }
405 430
406 /* display resultant gam range table */ 431 /* Display resultant GAM range table: */
407 for (i = 0, grt = _gr_table; i < _gr_table_len; i++, grt++) { 432 for (i = 0, grt = _gr_table; i < _gr_table_len; i++, grt++) {
433 unsigned long start, end;
408 int gb = grt->base; 434 int gb = grt->base;
409 unsigned long start = gb < 0 ? 0 :
410 (unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT;
411 unsigned long end =
412 (unsigned long)grt->limit << UV_GAM_RANGE_SHFT;
413 435
414 pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n", 436 start = gb < 0 ? 0 : (unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT;
415 i, grt->nasid, start, end, gb); 437 end = (unsigned long)grt->limit << UV_GAM_RANGE_SHFT;
438
439 pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n", i, grt->nasid, start, end, gb);
416 } 440 }
417} 441}
418 442
@@ -423,16 +447,19 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
423 447
424 pnode = uv_apicid_to_pnode(phys_apicid); 448 pnode = uv_apicid_to_pnode(phys_apicid);
425 phys_apicid |= uv_apicid_hibits; 449 phys_apicid |= uv_apicid_hibits;
450
426 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 451 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
427 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 452 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
428 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 453 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
429 APIC_DM_INIT; 454 APIC_DM_INIT;
455
430 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 456 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
431 457
432 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 458 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
433 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 459 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
434 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 460 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
435 APIC_DM_STARTUP; 461 APIC_DM_STARTUP;
462
436 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 463 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
437 464
438 return 0; 465 return 0;
@@ -566,7 +593,7 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
566 .apic_id_registered = uv_apic_id_registered, 593 .apic_id_registered = uv_apic_id_registered,
567 594
568 .irq_delivery_mode = dest_Fixed, 595 .irq_delivery_mode = dest_Fixed,
569 .irq_dest_mode = 0, /* physical */ 596 .irq_dest_mode = 0, /* Physical */
570 597
571 .target_cpus = online_target_cpus, 598 .target_cpus = online_target_cpus,
572 .disable_esr = 0, 599 .disable_esr = 0,
@@ -627,23 +654,22 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
627 switch (i) { 654 switch (i) {
628 case 0: 655 case 0:
629 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR; 656 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR;
630 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR; 657 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
631 break; 658 break;
632 case 1: 659 case 1:
633 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR; 660 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR;
634 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR; 661 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
635 break; 662 break;
636 case 2: 663 case 2:
637 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR; 664 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR;
638 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR; 665 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
639 break; 666 break;
640 } 667 }
641 alias.v = uv_read_local_mmr(m_overlay); 668 alias.v = uv_read_local_mmr(m_overlay);
642 if (alias.s.enable && alias.s.base == 0) { 669 if (alias.s.enable && alias.s.base == 0) {
643 *size = (1UL << alias.s.m_alias); 670 *size = (1UL << alias.s.m_alias);
644 redirect.v = uv_read_local_mmr(m_redirect); 671 redirect.v = uv_read_local_mmr(m_redirect);
645 *base = (unsigned long)redirect.s.dest_base 672 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
646 << DEST_SHIFT;
647 return; 673 return;
648 } 674 }
649 } 675 }
@@ -652,8 +678,7 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
652 678
653enum map_type {map_wb, map_uc}; 679enum map_type {map_wb, map_uc};
654 680
655static __init void map_high(char *id, unsigned long base, int pshift, 681static __init void map_high(char *id, unsigned long base, int pshift, int bshift, int max_pnode, enum map_type map_type)
656 int bshift, int max_pnode, enum map_type map_type)
657{ 682{
658 unsigned long bytes, paddr; 683 unsigned long bytes, paddr;
659 684
@@ -678,16 +703,19 @@ static __init void map_gru_distributed(unsigned long c)
678 int nid; 703 int nid;
679 704
680 gru.v = c; 705 gru.v = c;
681 /* only base bits 42:28 relevant in dist mode */ 706
707 /* Only base bits 42:28 relevant in dist mode */
682 gru_dist_base = gru.v & 0x000007fff0000000UL; 708 gru_dist_base = gru.v & 0x000007fff0000000UL;
683 if (!gru_dist_base) { 709 if (!gru_dist_base) {
684 pr_info("UV: Map GRU_DIST base address NULL\n"); 710 pr_info("UV: Map GRU_DIST base address NULL\n");
685 return; 711 return;
686 } 712 }
713
687 bytes = 1UL << UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; 714 bytes = 1UL << UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
688 gru_dist_lmask = ((1UL << uv_hub_info->m_val) - 1) & ~(bytes - 1); 715 gru_dist_lmask = ((1UL << uv_hub_info->m_val) - 1) & ~(bytes - 1);
689 gru_dist_umask = ~((1UL << uv_hub_info->m_val) - 1); 716 gru_dist_umask = ~((1UL << uv_hub_info->m_val) - 1);
690 gru_dist_base &= gru_dist_lmask; /* Clear bits above M */ 717 gru_dist_base &= gru_dist_lmask; /* Clear bits above M */
718
691 for_each_online_node(nid) { 719 for_each_online_node(nid) {
692 paddr = ((u64)uv_node_to_pnode(nid) << uv_hub_info->m_val) | 720 paddr = ((u64)uv_node_to_pnode(nid) << uv_hub_info->m_val) |
693 gru_dist_base; 721 gru_dist_base;
@@ -695,11 +723,12 @@ static __init void map_gru_distributed(unsigned long c)
695 gru_first_node_paddr = min(paddr, gru_first_node_paddr); 723 gru_first_node_paddr = min(paddr, gru_first_node_paddr);
696 gru_last_node_paddr = max(paddr, gru_last_node_paddr); 724 gru_last_node_paddr = max(paddr, gru_last_node_paddr);
697 } 725 }
726
698 /* Save upper (63:M) bits of address only for is_GRU_range */ 727 /* Save upper (63:M) bits of address only for is_GRU_range */
699 gru_first_node_paddr &= gru_dist_umask; 728 gru_first_node_paddr &= gru_dist_umask;
700 gru_last_node_paddr &= gru_dist_umask; 729 gru_last_node_paddr &= gru_dist_umask;
701 pr_debug("UV: Map GRU_DIST base 0x%016llx 0x%016llx - 0x%016llx\n", 730
702 gru_dist_base, gru_first_node_paddr, gru_last_node_paddr); 731 pr_debug("UV: Map GRU_DIST base 0x%016llx 0x%016llx - 0x%016llx\n", gru_dist_base, gru_first_node_paddr, gru_last_node_paddr);
703} 732}
704 733
705static __init void map_gru_high(int max_pnode) 734static __init void map_gru_high(int max_pnode)
@@ -719,6 +748,7 @@ static __init void map_gru_high(int max_pnode)
719 map_gru_distributed(gru.v); 748 map_gru_distributed(gru.v);
720 return; 749 return;
721 } 750 }
751
722 base = (gru.v & mask) >> shift; 752 base = (gru.v & mask) >> shift;
723 map_high("GRU", base, shift, shift, max_pnode, map_wb); 753 map_high("GRU", base, shift, shift, max_pnode, map_wb);
724 gru_start_paddr = ((u64)base << shift); 754 gru_start_paddr = ((u64)base << shift);
@@ -772,8 +802,8 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
772 802
773 id = mmiohs[index].id; 803 id = mmiohs[index].id;
774 overlay.v = uv_read_local_mmr(mmiohs[index].overlay); 804 overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
775 pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n", 805
776 id, overlay.v, overlay.s3.base, overlay.s3.m_io); 806 pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n", id, overlay.v, overlay.s3.base, overlay.s3.m_io);
777 if (!overlay.s3.enable) { 807 if (!overlay.s3.enable) {
778 pr_info("UV: %s disabled\n", id); 808 pr_info("UV: %s disabled\n", id);
779 return; 809 return;
@@ -784,7 +814,8 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
784 m_io = overlay.s3.m_io; 814 m_io = overlay.s3.m_io;
785 mmr = mmiohs[index].redirect; 815 mmr = mmiohs[index].redirect;
786 n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH; 816 n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
787 min_pnode *= 2; /* convert to NASID */ 817 /* Convert to NASID: */
818 min_pnode *= 2;
788 max_pnode *= 2; 819 max_pnode *= 2;
789 max_io = lnasid = fi = li = -1; 820 max_io = lnasid = fi = li = -1;
790 821
@@ -793,16 +824,18 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
793 824
794 redirect.v = uv_read_local_mmr(mmr + i * 8); 825 redirect.v = uv_read_local_mmr(mmr + i * 8);
795 nasid = redirect.s3.nasid; 826 nasid = redirect.s3.nasid;
827 /* Invalid NASID: */
796 if (nasid < min_pnode || max_pnode < nasid) 828 if (nasid < min_pnode || max_pnode < nasid)
797 nasid = -1; /* invalid NASID */ 829 nasid = -1;
798 830
799 if (nasid == lnasid) { 831 if (nasid == lnasid) {
800 li = i; 832 li = i;
801 if (i != n-1) /* last entry check */ 833 /* Last entry check: */
834 if (i != n-1)
802 continue; 835 continue;
803 } 836 }
804 837
805 /* check if we have a cached (or last) redirect to print */ 838 /* Check if we have a cached (or last) redirect to print: */
806 if (lnasid != -1 || (i == n-1 && nasid != -1)) { 839 if (lnasid != -1 || (i == n-1 && nasid != -1)) {
807 unsigned long addr1, addr2; 840 unsigned long addr1, addr2;
808 int f, l; 841 int f, l;
@@ -814,12 +847,9 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
814 f = fi; 847 f = fi;
815 l = li; 848 l = li;
816 } 849 }
817 addr1 = (base << shift) + 850 addr1 = (base << shift) + f * (1ULL << m_io);
818 f * (1ULL << m_io); 851 addr2 = (base << shift) + (l + 1) * (1ULL << m_io);
819 addr2 = (base << shift) + 852 pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", id, fi, li, lnasid, addr1, addr2);
820 (l + 1) * (1ULL << m_io);
821 pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
822 id, fi, li, lnasid, addr1, addr2);
823 if (max_io < l) 853 if (max_io < l)
824 max_io = l; 854 max_io = l;
825 } 855 }
@@ -827,8 +857,7 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
827 lnasid = nasid; 857 lnasid = nasid;
828 } 858 }
829 859
830 pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n", 860 pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n", id, base, shift, m_io, max_io);
831 id, base, shift, m_io, max_io);
832 861
833 if (max_io >= 0) 862 if (max_io >= 0)
834 map_high(id, base, shift, m_io, max_io, map_uc); 863 map_high(id, base, shift, m_io, max_io, map_uc);
@@ -841,36 +870,35 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
841 int shift, enable, m_io, n_io; 870 int shift, enable, m_io, n_io;
842 871
843 if (is_uv3_hub() || is_uv4_hub()) { 872 if (is_uv3_hub() || is_uv4_hub()) {
844 /* Map both MMIOH Regions */ 873 /* Map both MMIOH regions: */
845 map_mmioh_high_uv3(0, min_pnode, max_pnode); 874 map_mmioh_high_uv3(0, min_pnode, max_pnode);
846 map_mmioh_high_uv3(1, min_pnode, max_pnode); 875 map_mmioh_high_uv3(1, min_pnode, max_pnode);
847 return; 876 return;
848 } 877 }
849 878
850 if (is_uv1_hub()) { 879 if (is_uv1_hub()) {
851 mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR; 880 mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
852 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 881 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
853 mmioh.v = uv_read_local_mmr(mmr); 882 mmioh.v = uv_read_local_mmr(mmr);
854 enable = !!mmioh.s1.enable; 883 enable = !!mmioh.s1.enable;
855 base = mmioh.s1.base; 884 base = mmioh.s1.base;
856 m_io = mmioh.s1.m_io; 885 m_io = mmioh.s1.m_io;
857 n_io = mmioh.s1.n_io; 886 n_io = mmioh.s1.n_io;
858 } else if (is_uv2_hub()) { 887 } else if (is_uv2_hub()) {
859 mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR; 888 mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
860 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 889 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
861 mmioh.v = uv_read_local_mmr(mmr); 890 mmioh.v = uv_read_local_mmr(mmr);
862 enable = !!mmioh.s2.enable; 891 enable = !!mmioh.s2.enable;
863 base = mmioh.s2.base; 892 base = mmioh.s2.base;
864 m_io = mmioh.s2.m_io; 893 m_io = mmioh.s2.m_io;
865 n_io = mmioh.s2.n_io; 894 n_io = mmioh.s2.n_io;
866 } else 895 } else {
867 return; 896 return;
897 }
868 898
869 if (enable) { 899 if (enable) {
870 max_pnode &= (1 << n_io) - 1; 900 max_pnode &= (1 << n_io) - 1;
871 pr_info( 901 pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n", base, shift, m_io, n_io, max_pnode);
872 "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
873 base, shift, m_io, n_io, max_pnode);
874 map_high("MMIOH", base, shift, m_io, max_pnode, map_uc); 902 map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
875 } else { 903 } else {
876 pr_info("UV: MMIOH disabled\n"); 904 pr_info("UV: MMIOH disabled\n");
@@ -888,16 +916,16 @@ static __init void uv_rtc_init(void)
888 long status; 916 long status;
889 u64 ticks_per_sec; 917 u64 ticks_per_sec;
890 918
891 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, 919 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec);
892 &ticks_per_sec); 920
893 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) { 921 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
894 printk(KERN_WARNING 922 pr_warn("UV: unable to determine platform RTC clock frequency, guessing.\n");
895 "unable to determine platform RTC clock frequency, " 923
896 "guessing.\n"); 924 /* BIOS gives wrong value for clock frequency, so guess: */
897 /* BIOS gives wrong value for clock freq. so guess */
898 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; 925 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
899 } else 926 } else {
900 sn_rtc_cycles_per_second = ticks_per_sec; 927 sn_rtc_cycles_per_second = ticks_per_sec;
928 }
901} 929}
902 930
903/* 931/*
@@ -908,19 +936,19 @@ static void uv_heartbeat(unsigned long ignored)
908 struct timer_list *timer = &uv_scir_info->timer; 936 struct timer_list *timer = &uv_scir_info->timer;
909 unsigned char bits = uv_scir_info->state; 937 unsigned char bits = uv_scir_info->state;
910 938
911 /* flip heartbeat bit */ 939 /* Flip heartbeat bit: */
912 bits ^= SCIR_CPU_HEARTBEAT; 940 bits ^= SCIR_CPU_HEARTBEAT;
913 941
914 /* is this cpu idle? */ 942 /* Is this CPU idle? */
915 if (idle_cpu(raw_smp_processor_id())) 943 if (idle_cpu(raw_smp_processor_id()))
916 bits &= ~SCIR_CPU_ACTIVITY; 944 bits &= ~SCIR_CPU_ACTIVITY;
917 else 945 else
918 bits |= SCIR_CPU_ACTIVITY; 946 bits |= SCIR_CPU_ACTIVITY;
919 947
920 /* update system controller interface reg */ 948 /* Update system controller interface reg: */
921 uv_set_scir_bits(bits); 949 uv_set_scir_bits(bits);
922 950
923 /* enable next timer period */ 951 /* Enable next timer period: */
924 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); 952 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
925} 953}
926 954
@@ -935,7 +963,7 @@ static int uv_heartbeat_enable(unsigned int cpu)
935 add_timer_on(timer, cpu); 963 add_timer_on(timer, cpu);
936 uv_cpu_scir_info(cpu)->enabled = 1; 964 uv_cpu_scir_info(cpu)->enabled = 1;
937 965
938 /* also ensure that boot cpu is enabled */ 966 /* Also ensure that boot CPU is enabled: */
939 cpu = 0; 967 cpu = 0;
940 } 968 }
941 return 0; 969 return 0;
@@ -968,9 +996,11 @@ static __init int uv_init_heartbeat(void)
968{ 996{
969 int cpu; 997 int cpu;
970 998
971 if (is_uv_system()) 999 if (is_uv_system()) {
972 for_each_online_cpu(cpu) 1000 for_each_online_cpu(cpu)
973 uv_heartbeat_enable(cpu); 1001 uv_heartbeat_enable(cpu);
1002 }
1003
974 return 0; 1004 return 0;
975} 1005}
976 1006
@@ -979,14 +1009,10 @@ late_initcall(uv_init_heartbeat);
979#endif /* !CONFIG_HOTPLUG_CPU */ 1009#endif /* !CONFIG_HOTPLUG_CPU */
980 1010
981/* Direct Legacy VGA I/O traffic to designated IOH */ 1011/* Direct Legacy VGA I/O traffic to designated IOH */
982int uv_set_vga_state(struct pci_dev *pdev, bool decode, 1012int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags)
983 unsigned int command_bits, u32 flags)
984{ 1013{
985 int domain, bus, rc; 1014 int domain, bus, rc;
986 1015
987 PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
988 pdev->devfn, decode, command_bits, flags);
989
990 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) 1016 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
991 return 0; 1017 return 0;
992 1018
@@ -997,13 +1023,12 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode,
997 bus = pdev->bus->number; 1023 bus = pdev->bus->number;
998 1024
999 rc = uv_bios_set_legacy_vga_target(decode, domain, bus); 1025 rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
1000 PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
1001 1026
1002 return rc; 1027 return rc;
1003} 1028}
1004 1029
1005/* 1030/*
1006 * Called on each cpu to initialize the per_cpu UV data area. 1031 * Called on each CPU to initialize the per_cpu UV data area.
1007 * FIXME: hotplug not supported yet 1032 * FIXME: hotplug not supported yet
1008 */ 1033 */
1009void uv_cpu_init(void) 1034void uv_cpu_init(void)
@@ -1030,90 +1055,79 @@ static void get_mn(struct mn *mnp)
1030 union uvh_rh_gam_config_mmr_u m_n_config; 1055 union uvh_rh_gam_config_mmr_u m_n_config;
1031 union uv3h_gr0_gam_gr_config_u m_gr_config; 1056 union uv3h_gr0_gam_gr_config_u m_gr_config;
1032 1057
1033 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR); 1058 /* Make sure the whole structure is well initialized: */
1034 mnp->n_val = m_n_config.s.n_skt; 1059 memset(mnp, 0, sizeof(*mnp));
1060
1061 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
1062 mnp->n_val = m_n_config.s.n_skt;
1063
1035 if (is_uv4_hub()) { 1064 if (is_uv4_hub()) {
1036 mnp->m_val = 0; 1065 mnp->m_val = 0;
1037 mnp->n_lshift = 0; 1066 mnp->n_lshift = 0;
1038 } else if (is_uv3_hub()) { 1067 } else if (is_uv3_hub()) {
1039 mnp->m_val = m_n_config.s3.m_skt; 1068 mnp->m_val = m_n_config.s3.m_skt;
1040 m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG); 1069 m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
1041 mnp->n_lshift = m_gr_config.s3.m_skt; 1070 mnp->n_lshift = m_gr_config.s3.m_skt;
1042 } else if (is_uv2_hub()) { 1071 } else if (is_uv2_hub()) {
1043 mnp->m_val = m_n_config.s2.m_skt; 1072 mnp->m_val = m_n_config.s2.m_skt;
1044 mnp->n_lshift = mnp->m_val == 40 ? 40 : 39; 1073 mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
1045 } else if (is_uv1_hub()) { 1074 } else if (is_uv1_hub()) {
1046 mnp->m_val = m_n_config.s1.m_skt; 1075 mnp->m_val = m_n_config.s1.m_skt;
1047 mnp->n_lshift = mnp->m_val; 1076 mnp->n_lshift = mnp->m_val;
1048 } 1077 }
1049 mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0; 1078 mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
1050} 1079}
1051 1080
1052void __init uv_init_hub_info(struct uv_hub_info_s *hub_info) 1081void __init uv_init_hub_info(struct uv_hub_info_s *hi)
1053{ 1082{
1054 struct mn mn = {0}; /* avoid unitialized warnings */
1055 union uvh_node_id_u node_id; 1083 union uvh_node_id_u node_id;
1084 struct mn mn;
1056 1085
1057 get_mn(&mn); 1086 get_mn(&mn);
1058 hub_info->m_val = mn.m_val; 1087 hi->gpa_mask = mn.m_val ?
1059 hub_info->n_val = mn.n_val;
1060 hub_info->m_shift = mn.m_shift;
1061 hub_info->n_lshift = mn.n_lshift ? mn.n_lshift : 0;
1062
1063 hub_info->hub_revision = uv_hub_info->hub_revision;
1064 hub_info->pnode_mask = uv_cpuid.pnode_mask;
1065 hub_info->min_pnode = _min_pnode;
1066 hub_info->min_socket = _min_socket;
1067 hub_info->pnode_to_socket = _pnode_to_socket;
1068 hub_info->socket_to_node = _socket_to_node;
1069 hub_info->socket_to_pnode = _socket_to_pnode;
1070 hub_info->gr_table_len = _gr_table_len;
1071 hub_info->gr_table = _gr_table;
1072 hub_info->gpa_mask = mn.m_val ?
1073 (1UL << (mn.m_val + mn.n_val)) - 1 : 1088 (1UL << (mn.m_val + mn.n_val)) - 1 :
1074 (1UL << uv_cpuid.gpa_shift) - 1; 1089 (1UL << uv_cpuid.gpa_shift) - 1;
1075 1090
1076 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 1091 hi->m_val = mn.m_val;
1077 hub_info->gnode_extra = 1092 hi->n_val = mn.n_val;
1078 (node_id.s.node_id & ~((1 << mn.n_val) - 1)) >> 1; 1093 hi->m_shift = mn.m_shift;
1079 1094 hi->n_lshift = mn.n_lshift ? mn.n_lshift : 0;
1080 hub_info->gnode_upper = 1095 hi->hub_revision = uv_hub_info->hub_revision;
1081 ((unsigned long)hub_info->gnode_extra << mn.m_val); 1096 hi->pnode_mask = uv_cpuid.pnode_mask;
1097 hi->min_pnode = _min_pnode;
1098 hi->min_socket = _min_socket;
1099 hi->pnode_to_socket = _pnode_to_socket;
1100 hi->socket_to_node = _socket_to_node;
1101 hi->socket_to_pnode = _socket_to_pnode;
1102 hi->gr_table_len = _gr_table_len;
1103 hi->gr_table = _gr_table;
1104
1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
1108 hi->gnode_upper = (unsigned long)hi->gnode_extra << mn.m_val;
1082 1109
1083 if (uv_gp_table) { 1110 if (uv_gp_table) {
1084 hub_info->global_mmr_base = uv_gp_table->mmr_base; 1111 hi->global_mmr_base = uv_gp_table->mmr_base;
1085 hub_info->global_mmr_shift = uv_gp_table->mmr_shift; 1112 hi->global_mmr_shift = uv_gp_table->mmr_shift;
1086 hub_info->global_gru_base = uv_gp_table->gru_base; 1113 hi->global_gru_base = uv_gp_table->gru_base;
1087 hub_info->global_gru_shift = uv_gp_table->gru_shift; 1114 hi->global_gru_shift = uv_gp_table->gru_shift;
1088 hub_info->gpa_shift = uv_gp_table->gpa_shift; 1115 hi->gpa_shift = uv_gp_table->gpa_shift;
1089 hub_info->gpa_mask = (1UL << hub_info->gpa_shift) - 1; 1116 hi->gpa_mask = (1UL << hi->gpa_shift) - 1;
1090 } else { 1117 } else {
1091 hub_info->global_mmr_base = 1118 hi->global_mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE;
1092 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & 1119 hi->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT;
1093 ~UV_MMR_ENABLE;
1094 hub_info->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT;
1095 } 1120 }
1096 1121
1097 get_lowmem_redirect( 1122 get_lowmem_redirect(&hi->lowmem_remap_base, &hi->lowmem_remap_top);
1098 &hub_info->lowmem_remap_base, &hub_info->lowmem_remap_top);
1099
1100 hub_info->apic_pnode_shift = uv_cpuid.socketid_shift;
1101
1102 /* show system specific info */
1103 pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n",
1104 hub_info->n_val, hub_info->m_val,
1105 hub_info->m_shift, hub_info->n_lshift);
1106
1107 pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n",
1108 hub_info->gpa_mask, hub_info->gpa_shift,
1109 hub_info->pnode_mask, hub_info->apic_pnode_shift);
1110 1123
1111 pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n", 1124 hi->apic_pnode_shift = uv_cpuid.socketid_shift;
1112 hub_info->global_mmr_base, hub_info->global_mmr_shift,
1113 hub_info->global_gru_base, hub_info->global_gru_shift);
1114 1125
1115 pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n", 1126 /* Show system specific info: */
1116 hub_info->gnode_upper, hub_info->gnode_extra); 1127 pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n", hi->n_val, hi->m_val, hi->m_shift, hi->n_lshift);
1128 pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n", hi->gpa_mask, hi->gpa_shift, hi->pnode_mask, hi->apic_pnode_shift);
1129 pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift, hi->global_gru_base, hi->global_gru_shift);
1130 pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n", hi->gnode_upper, hi->gnode_extra);
1117} 1131}
1118 1132
1119static void __init decode_gam_params(unsigned long ptr) 1133static void __init decode_gam_params(unsigned long ptr)
@@ -1139,12 +1153,9 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
1139 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { 1153 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
1140 if (!index) { 1154 if (!index) {
1141 pr_info("UV: GAM Range Table...\n"); 1155 pr_info("UV: GAM Range Table...\n");
1142 pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", 1156 pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
1143 "Range", "", "Size", "Type", "NASID",
1144 "SID", "PN");
1145 } 1157 }
1146 pr_info( 1158 pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n",
1147 "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n",
1148 index++, 1159 index++,
1149 (unsigned long)lgre << UV_GAM_RANGE_SHFT, 1160 (unsigned long)lgre << UV_GAM_RANGE_SHFT,
1150 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, 1161 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
@@ -1162,29 +1173,32 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
1162 if (pnode_max < gre->pnode) 1173 if (pnode_max < gre->pnode)
1163 pnode_max = gre->pnode; 1174 pnode_max = gre->pnode;
1164 } 1175 }
1165 _min_socket = sock_min; 1176 _min_socket = sock_min;
1166 _max_socket = sock_max; 1177 _max_socket = sock_max;
1167 _min_pnode = pnode_min; 1178 _min_pnode = pnode_min;
1168 _max_pnode = pnode_max; 1179 _max_pnode = pnode_max;
1169 _gr_table_len = index; 1180 _gr_table_len = index;
1170 pr_info( 1181
1171 "UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n", 1182 pr_info("UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n", index, _min_socket, _max_socket, _min_pnode, _max_pnode);
1172 index, _min_socket, _max_socket, _min_pnode, _max_pnode);
1173} 1183}
1174 1184
1175static void __init decode_uv_systab(void) 1185static int __init decode_uv_systab(void)
1176{ 1186{
1177 struct uv_systab *st; 1187 struct uv_systab *st;
1178 int i; 1188 int i;
1179 1189
1190 if (uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE)
1191 return 0; /* No extended UVsystab required */
1192
1180 st = uv_systab; 1193 st = uv_systab;
1181 if ((!st || st->revision < UV_SYSTAB_VERSION_UV4) && !is_uv4_hub()) 1194 if ((!st) || (st->revision < UV_SYSTAB_VERSION_UV4_LATEST)) {
1182 return; 1195 int rev = st ? st->revision : 0;
1183 if (st->revision != UV_SYSTAB_VERSION_UV4_LATEST) { 1196
1184 pr_crit( 1197 pr_err("UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n", rev, UV_SYSTAB_VERSION_UV4_LATEST);
1185 "UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n", 1198 pr_err("UV: Cannot support UV operations, switching to generic PC\n");
1186 st->revision, UV_SYSTAB_VERSION_UV4_LATEST); 1199 uv_system_type = UV_NONE;
1187 BUG(); 1200
1201 return -EINVAL;
1188 } 1202 }
1189 1203
1190 for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) { 1204 for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) {
@@ -1205,10 +1219,11 @@ static void __init decode_uv_systab(void)
1205 break; 1219 break;
1206 } 1220 }
1207 } 1221 }
1222 return 0;
1208} 1223}
1209 1224
1210/* 1225/*
1211 * Setup physical blade translations from UVH_NODE_PRESENT_TABLE 1226 * Set up physical blade translations from UVH_NODE_PRESENT_TABLE
1212 * .. NB: UVH_NODE_PRESENT_TABLE is going away, 1227 * .. NB: UVH_NODE_PRESENT_TABLE is going away,
1213 * .. being replaced by GAM Range Table 1228 * .. being replaced by GAM Range Table
1214 */ 1229 */
@@ -1244,14 +1259,13 @@ static void __init build_socket_tables(void)
1244 if (!gre) { 1259 if (!gre) {
1245 if (is_uv1_hub() || is_uv2_hub() || is_uv3_hub()) { 1260 if (is_uv1_hub() || is_uv2_hub() || is_uv3_hub()) {
1246 pr_info("UV: No UVsystab socket table, ignoring\n"); 1261 pr_info("UV: No UVsystab socket table, ignoring\n");
1247 return; /* not required */ 1262 return;
1248 } 1263 }
1249 pr_crit( 1264 pr_crit("UV: Error: UVsystab address translations not available!\n");
1250 "UV: Error: UVsystab address translations not available!\n");
1251 BUG(); 1265 BUG();
1252 } 1266 }
1253 1267
1254 /* build socket id -> node id, pnode */ 1268 /* Build socket id -> node id, pnode */
1255 num = maxsock - minsock + 1; 1269 num = maxsock - minsock + 1;
1256 bytes = num * sizeof(_socket_to_node[0]); 1270 bytes = num * sizeof(_socket_to_node[0]);
1257 _socket_to_node = kmalloc(bytes, GFP_KERNEL); 1271 _socket_to_node = kmalloc(bytes, GFP_KERNEL);
@@ -1268,27 +1282,27 @@ static void __init build_socket_tables(void)
1268 for (i = 0; i < nump; i++) 1282 for (i = 0; i < nump; i++)
1269 _pnode_to_socket[i] = SOCK_EMPTY; 1283 _pnode_to_socket[i] = SOCK_EMPTY;
1270 1284
1271 /* fill in pnode/node/addr conversion list values */ 1285 /* Fill in pnode/node/addr conversion list values: */
1272 pr_info("UV: GAM Building socket/pnode conversion tables\n"); 1286 pr_info("UV: GAM Building socket/pnode conversion tables\n");
1273 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { 1287 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
1274 if (gre->type == UV_GAM_RANGE_TYPE_HOLE) 1288 if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
1275 continue; 1289 continue;
1276 i = gre->sockid - minsock; 1290 i = gre->sockid - minsock;
1291 /* Duplicate: */
1277 if (_socket_to_pnode[i] != SOCK_EMPTY) 1292 if (_socket_to_pnode[i] != SOCK_EMPTY)
1278 continue; /* duplicate */ 1293 continue;
1279 _socket_to_pnode[i] = gre->pnode; 1294 _socket_to_pnode[i] = gre->pnode;
1280 1295
1281 i = gre->pnode - minpnode; 1296 i = gre->pnode - minpnode;
1282 _pnode_to_socket[i] = gre->sockid; 1297 _pnode_to_socket[i] = gre->sockid;
1283 1298
1284 pr_info( 1299 pr_info("UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
1285 "UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
1286 gre->sockid, gre->type, gre->nasid, 1300 gre->sockid, gre->type, gre->nasid,
1287 _socket_to_pnode[gre->sockid - minsock], 1301 _socket_to_pnode[gre->sockid - minsock],
1288 _pnode_to_socket[gre->pnode - minpnode]); 1302 _pnode_to_socket[gre->pnode - minpnode]);
1289 } 1303 }
1290 1304
1291 /* Set socket -> node values */ 1305 /* Set socket -> node values: */
1292 lnid = -1; 1306 lnid = -1;
1293 for_each_present_cpu(cpu) { 1307 for_each_present_cpu(cpu) {
1294 int nid = cpu_to_node(cpu); 1308 int nid = cpu_to_node(cpu);
@@ -1304,7 +1318,7 @@ static void __init build_socket_tables(void)
1304 sockid, apicid, nid); 1318 sockid, apicid, nid);
1305 } 1319 }
1306 1320
1307 /* Setup physical blade to pnode translation from GAM Range Table */ 1321 /* Set up physical blade to pnode translation from GAM Range Table: */
1308 bytes = num_possible_nodes() * sizeof(_node_to_pnode[0]); 1322 bytes = num_possible_nodes() * sizeof(_node_to_pnode[0]);
1309 _node_to_pnode = kmalloc(bytes, GFP_KERNEL); 1323 _node_to_pnode = kmalloc(bytes, GFP_KERNEL);
1310 BUG_ON(!_node_to_pnode); 1324 BUG_ON(!_node_to_pnode);
@@ -1314,8 +1328,7 @@ static void __init build_socket_tables(void)
1314 1328
1315 for (sockid = minsock; sockid <= maxsock; sockid++) { 1329 for (sockid = minsock; sockid <= maxsock; sockid++) {
1316 if (lnid == _socket_to_node[sockid - minsock]) { 1330 if (lnid == _socket_to_node[sockid - minsock]) {
1317 _node_to_pnode[lnid] = 1331 _node_to_pnode[lnid] = _socket_to_pnode[sockid - minsock];
1318 _socket_to_pnode[sockid - minsock];
1319 break; 1332 break;
1320 } 1333 }
1321 } 1334 }
@@ -1332,8 +1345,7 @@ static void __init build_socket_tables(void)
1332 pr_info("UV: Checking socket->node/pnode for identity maps\n"); 1345 pr_info("UV: Checking socket->node/pnode for identity maps\n");
1333 if (minsock == 0) { 1346 if (minsock == 0) {
1334 for (i = 0; i < num; i++) 1347 for (i = 0; i < num; i++)
1335 if (_socket_to_node[i] == SOCK_EMPTY || 1348 if (_socket_to_node[i] == SOCK_EMPTY || i != _socket_to_node[i])
1336 i != _socket_to_node[i])
1337 break; 1349 break;
1338 if (i >= num) { 1350 if (i >= num) {
1339 kfree(_socket_to_node); 1351 kfree(_socket_to_node);
@@ -1354,7 +1366,7 @@ static void __init build_socket_tables(void)
1354 } 1366 }
1355} 1367}
1356 1368
1357void __init uv_system_init(void) 1369static void __init uv_system_init_hub(void)
1358{ 1370{
1359 struct uv_hub_info_s hub_info = {0}; 1371 struct uv_hub_info_s hub_info = {0};
1360 int bytes, cpu, nodeid; 1372 int bytes, cpu, nodeid;
@@ -1372,8 +1384,13 @@ void __init uv_system_init(void)
1372 1384
1373 map_low_mmrs(); 1385 map_low_mmrs();
1374 1386
1375 uv_bios_init(); /* get uv_systab for decoding */ 1387 /* Get uv_systab for decoding: */
1376 decode_uv_systab(); 1388 uv_bios_init();
1389
1390 /* If there's an UVsystab problem then abort UV init: */
1391 if (decode_uv_systab() < 0)
1392 return;
1393
1377 build_socket_tables(); 1394 build_socket_tables();
1378 build_uv_gr_table(); 1395 build_uv_gr_table();
1379 uv_init_hub_info(&hub_info); 1396 uv_init_hub_info(&hub_info);
@@ -1381,14 +1398,10 @@ void __init uv_system_init(void)
1381 if (!_node_to_pnode) 1398 if (!_node_to_pnode)
1382 boot_init_possible_blades(&hub_info); 1399 boot_init_possible_blades(&hub_info);
1383 1400
1384 /* uv_num_possible_blades() is really the hub count */ 1401 /* uv_num_possible_blades() is really the hub count: */
1385 pr_info("UV: Found %d hubs, %d nodes, %d cpus\n", 1402 pr_info("UV: Found %d hubs, %d nodes, %d CPUs\n", uv_num_possible_blades(), num_possible_nodes(), num_possible_cpus());
1386 uv_num_possible_blades(),
1387 num_possible_nodes(),
1388 num_possible_cpus());
1389 1403
1390 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id, 1404 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id, &sn_region_size, &system_serial_number);
1391 &sn_region_size, &system_serial_number);
1392 hub_info.coherency_domain_number = sn_coherency_id; 1405 hub_info.coherency_domain_number = sn_coherency_id;
1393 uv_rtc_init(); 1406 uv_rtc_init();
1394 1407
@@ -1401,33 +1414,31 @@ void __init uv_system_init(void)
1401 struct uv_hub_info_s *new_hub; 1414 struct uv_hub_info_s *new_hub;
1402 1415
1403 if (__uv_hub_info_list[nodeid]) { 1416 if (__uv_hub_info_list[nodeid]) {
1404 pr_err("UV: Node %d UV HUB already initialized!?\n", 1417 pr_err("UV: Node %d UV HUB already initialized!?\n", nodeid);
1405 nodeid);
1406 BUG(); 1418 BUG();
1407 } 1419 }
1408 1420
1409 /* Allocate new per hub info list */ 1421 /* Allocate new per hub info list */
1410 new_hub = (nodeid == 0) ? 1422 new_hub = (nodeid == 0) ? &uv_hub_info_node0 : kzalloc_node(bytes, GFP_KERNEL, nodeid);
1411 &uv_hub_info_node0 :
1412 kzalloc_node(bytes, GFP_KERNEL, nodeid);
1413 BUG_ON(!new_hub); 1423 BUG_ON(!new_hub);
1414 __uv_hub_info_list[nodeid] = new_hub; 1424 __uv_hub_info_list[nodeid] = new_hub;
1415 new_hub = uv_hub_info_list(nodeid); 1425 new_hub = uv_hub_info_list(nodeid);
1416 BUG_ON(!new_hub); 1426 BUG_ON(!new_hub);
1417 *new_hub = hub_info; 1427 *new_hub = hub_info;
1418 1428
1419 /* Use information from GAM table if available */ 1429 /* Use information from GAM table if available: */
1420 if (_node_to_pnode) 1430 if (_node_to_pnode)
1421 new_hub->pnode = _node_to_pnode[nodeid]; 1431 new_hub->pnode = _node_to_pnode[nodeid];
1422 else /* Fill in during cpu loop */ 1432 else /* Or fill in during CPU loop: */
1423 new_hub->pnode = 0xffff; 1433 new_hub->pnode = 0xffff;
1434
1424 new_hub->numa_blade_id = uv_node_to_blade_id(nodeid); 1435 new_hub->numa_blade_id = uv_node_to_blade_id(nodeid);
1425 new_hub->memory_nid = -1; 1436 new_hub->memory_nid = -1;
1426 new_hub->nr_possible_cpus = 0; 1437 new_hub->nr_possible_cpus = 0;
1427 new_hub->nr_online_cpus = 0; 1438 new_hub->nr_online_cpus = 0;
1428 } 1439 }
1429 1440
1430 /* Initialize per cpu info */ 1441 /* Initialize per CPU info: */
1431 for_each_possible_cpu(cpu) { 1442 for_each_possible_cpu(cpu) {
1432 int apicid = per_cpu(x86_cpu_to_apicid, cpu); 1443 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
1433 int numa_node_id; 1444 int numa_node_id;
@@ -1438,22 +1449,24 @@ void __init uv_system_init(void)
1438 pnode = uv_apicid_to_pnode(apicid); 1449 pnode = uv_apicid_to_pnode(apicid);
1439 1450
1440 uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid); 1451 uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
1441 uv_cpu_info_per(cpu)->blade_cpu_id = 1452 uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++;
1442 uv_cpu_hub_info(cpu)->nr_possible_cpus++;
1443 if (uv_cpu_hub_info(cpu)->memory_nid == -1) 1453 if (uv_cpu_hub_info(cpu)->memory_nid == -1)
1444 uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu); 1454 uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
1445 if (nodeid != numa_node_id && /* init memoryless node */ 1455
1456 /* Init memoryless node: */
1457 if (nodeid != numa_node_id &&
1446 uv_hub_info_list(numa_node_id)->pnode == 0xffff) 1458 uv_hub_info_list(numa_node_id)->pnode == 0xffff)
1447 uv_hub_info_list(numa_node_id)->pnode = pnode; 1459 uv_hub_info_list(numa_node_id)->pnode = pnode;
1448 else if (uv_cpu_hub_info(cpu)->pnode == 0xffff) 1460 else if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
1449 uv_cpu_hub_info(cpu)->pnode = pnode; 1461 uv_cpu_hub_info(cpu)->pnode = pnode;
1462
1450 uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid); 1463 uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid);
1451 } 1464 }
1452 1465
1453 for_each_node(nodeid) { 1466 for_each_node(nodeid) {
1454 unsigned short pnode = uv_hub_info_list(nodeid)->pnode; 1467 unsigned short pnode = uv_hub_info_list(nodeid)->pnode;
1455 1468
1456 /* Add pnode info for pre-GAM list nodes without cpus */ 1469 /* Add pnode info for pre-GAM list nodes without CPUs: */
1457 if (pnode == 0xffff) { 1470 if (pnode == 0xffff) {
1458 unsigned long paddr; 1471 unsigned long paddr;
1459 1472
@@ -1479,15 +1492,30 @@ void __init uv_system_init(void)
1479 uv_scir_register_cpu_notifier(); 1492 uv_scir_register_cpu_notifier();
1480 proc_mkdir("sgi_uv", NULL); 1493 proc_mkdir("sgi_uv", NULL);
1481 1494
1482 /* register Legacy VGA I/O redirection handler */ 1495 /* Register Legacy VGA I/O redirection handler: */
1483 pci_register_set_vga_state(uv_set_vga_state); 1496 pci_register_set_vga_state(uv_set_vga_state);
1484 1497
1485 /* 1498 /*
1486 * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as 1499 * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
1487 * EFI is not enabled in the kdump kernel. 1500 * EFI is not enabled in the kdump kernel:
1488 */ 1501 */
1489 if (is_kdump_kernel()) 1502 if (is_kdump_kernel())
1490 reboot_type = BOOT_ACPI; 1503 reboot_type = BOOT_ACPI;
1491} 1504}
1492 1505
1506/*
1507 * There is a small amount of UV specific code needed to initialize a
1508 * UV system that does not have a "UV HUB" (referred to as "hubless").
1509 */
1510void __init uv_system_init(void)
1511{
1512 if (likely(!is_uv_system() && !is_uv_hubless()))
1513 return;
1514
1515 if (is_uv_system())
1516 uv_system_init_hub();
1517 else
1518 uv_nmi_setup_hubless();
1519}
1520
1493apic_driver(apic_x2apic_uv_x); 1521apic_driver(apic_x2apic_uv_x);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 99b920d0e516..a0d38685f7df 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1347,8 +1347,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1347 pr_info("CPU0: "); 1347 pr_info("CPU0: ");
1348 print_cpu_info(&cpu_data(0)); 1348 print_cpu_info(&cpu_data(0));
1349 1349
1350 if (is_uv_system()) 1350 uv_system_init();
1351 uv_system_init();
1352 1351
1353 set_mtrr_aps_delayed_init(); 1352 set_mtrr_aps_delayed_init();
1354 1353
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 90e4f2a6625b..a7dbec4dce27 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -5,14 +5,12 @@ obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_mrfld_sd.o
5# WiFi 5# WiFi
6obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o 6obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o
7# IPC Devices 7# IPC Devices
8obj-y += platform_ipc.o
9obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o 8obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o
10obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o 9obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o
11obj-$(subst m,y,$(CONFIG_GPIO_MSIC)) += platform_msic_gpio.o 10obj-$(subst m,y,$(CONFIG_GPIO_MSIC)) += platform_msic_gpio.o
12obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_ocd.o 11obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_ocd.o
13obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_battery.o 12obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_battery.o
14obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o 13obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
15obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
16obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o 14obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
17# SPI Devices 15# SPI Devices
18obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o 16obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
@@ -28,4 +26,5 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
28obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o 26obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
29# MISC Devices 27# MISC Devices
30obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o 28obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
29obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
31obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o 30obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
index 52534ec29765..74283875c7e8 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
@@ -32,6 +32,9 @@ static struct gpio_keys_button gpio_button[] = {
32 {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20}, 32 {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20},
33 {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20}, 33 {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20},
34 {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20}, 34 {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20},
35 {KEY_MUTE, -1, 1, "mute_enable", EV_KEY, 0, 20},
36 {KEY_VOLUMEUP, -1, 1, "volume_up", EV_KEY, 0, 20},
37 {KEY_VOLUMEDOWN, -1, 1, "volume_down", EV_KEY, 0, 20},
35 {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20}, 38 {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20},
36 {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20}, 39 {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20},
37 {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20}, 40 {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20},
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.c b/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
deleted file mode 100644
index a84b73d6c4a0..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * platform_ipc.c: IPC platform library file
3 *
4 * (C) Copyright 2013 Intel Corporation
5 * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/interrupt.h>
16#include <linux/sfi.h>
17#include <linux/gpio.h>
18#include <asm/intel-mid.h>
19#include "platform_ipc.h"
20
21void __init ipc_device_handler(struct sfi_device_table_entry *pentry,
22 struct devs_id *dev)
23{
24 struct platform_device *pdev;
25 void *pdata = NULL;
26 static struct resource res __initdata = {
27 .name = "IRQ",
28 .flags = IORESOURCE_IRQ,
29 };
30
31 pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
32 pentry->name, pentry->irq);
33
34 /*
35 * We need to call platform init of IPC devices to fill misc_pdata
36 * structure. It will be used in msic_init for initialization.
37 */
38 if (dev != NULL)
39 pdata = dev->get_platform_data(pentry);
40
41 /*
42 * On Medfield the platform device creation is handled by the MSIC
43 * MFD driver so we don't need to do it here.
44 */
45 if (intel_mid_has_msic())
46 return;
47
48 pdev = platform_device_alloc(pentry->name, 0);
49 if (pdev == NULL) {
50 pr_err("out of memory for SFI platform device '%s'.\n",
51 pentry->name);
52 return;
53 }
54 res.start = pentry->irq;
55 platform_device_add_resources(pdev, &res, 1);
56
57 pdev->dev.platform_data = pdata;
58 intel_scu_device_register(pdev);
59}
60
61static const struct devs_id pmic_audio_dev_id __initconst = {
62 .name = "pmic_audio",
63 .type = SFI_DEV_TYPE_IPC,
64 .delay = 1,
65 .device_handler = &ipc_device_handler,
66};
67
68sfi_device(pmic_audio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
deleted file mode 100644
index 79bb09d4f718..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * platform_ipc.h: IPC platform library header file
3 *
4 * (C) Copyright 2013 Intel Corporation
5 * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12#ifndef _PLATFORM_IPC_H_
13#define _PLATFORM_IPC_H_
14
15void __init
16ipc_device_handler(struct sfi_device_table_entry *pentry, struct devs_id *dev);
17
18#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c
new file mode 100644
index 000000000000..3135416df037
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c
@@ -0,0 +1,48 @@
1/*
2 * Intel Merrifield legacy RTC initialization file
3 *
4 * (C) Copyright 2017 Intel Corporation
5 *
6 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
11 * of the License.
12 */
13
14#include <linux/init.h>
15
16#include <asm/hw_irq.h>
17#include <asm/intel-mid.h>
18#include <asm/io_apic.h>
19#include <asm/time.h>
20#include <asm/x86_init.h>
21
22static int __init mrfld_legacy_rtc_alloc_irq(void)
23{
24 struct irq_alloc_info info;
25 int ret;
26
27 if (!x86_platform.legacy.rtc)
28 return -ENODEV;
29
30 ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 1, 0);
31 ret = mp_map_gsi_to_irq(RTC_IRQ, IOAPIC_MAP_ALLOC, &info);
32 if (ret < 0) {
33 pr_info("Failed to allocate RTC interrupt. Disabling RTC\n");
34 x86_platform.legacy.rtc = 0;
35 return ret;
36 }
37
38 return 0;
39}
40
41static int __init mrfld_legacy_rtc_init(void)
42{
43 if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
44 return -ENODEV;
45
46 return mrfld_legacy_rtc_alloc_irq();
47}
48arch_initcall(mrfld_legacy_rtc_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 3f1f1c77d090..86edd1e941eb 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -28,9 +28,9 @@ static struct platform_device wdt_dev = {
28 28
29static int tangier_probe(struct platform_device *pdev) 29static int tangier_probe(struct platform_device *pdev)
30{ 30{
31 int gsi;
32 struct irq_alloc_info info; 31 struct irq_alloc_info info;
33 struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data; 32 struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
33 int gsi, irq;
34 34
35 if (!pdata) 35 if (!pdata)
36 return -EINVAL; 36 return -EINVAL;
@@ -38,10 +38,10 @@ static int tangier_probe(struct platform_device *pdev)
38 /* IOAPIC builds identity mapping between GSI and IRQ on MID */ 38 /* IOAPIC builds identity mapping between GSI and IRQ on MID */
39 gsi = pdata->irq; 39 gsi = pdata->irq;
40 ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0); 40 ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
41 if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) { 41 irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
42 dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", 42 if (irq < 0) {
43 gsi); 43 dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", gsi);
44 return -EINVAL; 44 return irq;
45 } 45 }
46 46
47 return 0; 47 return 0;
@@ -82,4 +82,4 @@ static int __init register_mid_wdt(void)
82 82
83 return 0; 83 return 0;
84} 84}
85rootfs_initcall(register_mid_wdt); 85arch_initcall(register_mid_wdt);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
index cb3490ecb341..d4dc744dd5a5 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
@@ -20,7 +20,6 @@
20#include <asm/intel-mid.h> 20#include <asm/intel-mid.h>
21 21
22#include "platform_msic.h" 22#include "platform_msic.h"
23#include "platform_ipc.h"
24 23
25static void *msic_audio_platform_data(void *info) 24static void *msic_audio_platform_data(void *info)
26{ 25{
@@ -40,8 +39,8 @@ static const struct devs_id msic_audio_dev_id __initconst = {
40 .name = "msic_audio", 39 .name = "msic_audio",
41 .type = SFI_DEV_TYPE_IPC, 40 .type = SFI_DEV_TYPE_IPC,
42 .delay = 1, 41 .delay = 1,
42 .msic = 1,
43 .get_platform_data = &msic_audio_platform_data, 43 .get_platform_data = &msic_audio_platform_data,
44 .device_handler = &ipc_device_handler,
45}; 44};
46 45
47sfi_device(msic_audio_dev_id); 46sfi_device(msic_audio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
index 4f72193939a6..5c3e9919633f 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
@@ -19,7 +19,6 @@
19#include <asm/intel-mid.h> 19#include <asm/intel-mid.h>
20 20
21#include "platform_msic.h" 21#include "platform_msic.h"
22#include "platform_ipc.h"
23 22
24static void __init *msic_battery_platform_data(void *info) 23static void __init *msic_battery_platform_data(void *info)
25{ 24{
@@ -30,8 +29,8 @@ static const struct devs_id msic_battery_dev_id __initconst = {
30 .name = "msic_battery", 29 .name = "msic_battery",
31 .type = SFI_DEV_TYPE_IPC, 30 .type = SFI_DEV_TYPE_IPC,
32 .delay = 1, 31 .delay = 1,
32 .msic = 1,
33 .get_platform_data = &msic_battery_platform_data, 33 .get_platform_data = &msic_battery_platform_data,
34 .device_handler = &ipc_device_handler,
35}; 34};
36 35
37sfi_device(msic_battery_dev_id); 36sfi_device(msic_battery_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
index 70de5b531ba0..9fdb88d460d7 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
@@ -20,7 +20,6 @@
20#include <asm/intel-mid.h> 20#include <asm/intel-mid.h>
21 21
22#include "platform_msic.h" 22#include "platform_msic.h"
23#include "platform_ipc.h"
24 23
25static void __init *msic_gpio_platform_data(void *info) 24static void __init *msic_gpio_platform_data(void *info)
26{ 25{
@@ -41,8 +40,8 @@ static const struct devs_id msic_gpio_dev_id __initconst = {
41 .name = "msic_gpio", 40 .name = "msic_gpio",
42 .type = SFI_DEV_TYPE_IPC, 41 .type = SFI_DEV_TYPE_IPC,
43 .delay = 1, 42 .delay = 1,
43 .msic = 1,
44 .get_platform_data = &msic_gpio_platform_data, 44 .get_platform_data = &msic_gpio_platform_data,
45 .device_handler = &ipc_device_handler,
46}; 45};
47 46
48sfi_device(msic_gpio_dev_id); 47sfi_device(msic_gpio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
index 3d7c2011b6cf..7ae37cdbf256 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
@@ -20,7 +20,6 @@
20#include <asm/intel-mid.h> 20#include <asm/intel-mid.h>
21 21
22#include "platform_msic.h" 22#include "platform_msic.h"
23#include "platform_ipc.h"
24 23
25static void __init *msic_ocd_platform_data(void *info) 24static void __init *msic_ocd_platform_data(void *info)
26{ 25{
@@ -42,8 +41,8 @@ static const struct devs_id msic_ocd_dev_id __initconst = {
42 .name = "msic_ocd", 41 .name = "msic_ocd",
43 .type = SFI_DEV_TYPE_IPC, 42 .type = SFI_DEV_TYPE_IPC,
44 .delay = 1, 43 .delay = 1,
44 .msic = 1,
45 .get_platform_data = &msic_ocd_platform_data, 45 .get_platform_data = &msic_ocd_platform_data,
46 .device_handler = &ipc_device_handler,
47}; 46};
48 47
49sfi_device(msic_ocd_dev_id); 48sfi_device(msic_ocd_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
index 038f618fbc52..96809b98cf69 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
@@ -18,7 +18,6 @@
18#include <asm/intel-mid.h> 18#include <asm/intel-mid.h>
19 19
20#include "platform_msic.h" 20#include "platform_msic.h"
21#include "platform_ipc.h"
22 21
23static void __init *msic_power_btn_platform_data(void *info) 22static void __init *msic_power_btn_platform_data(void *info)
24{ 23{
@@ -29,8 +28,8 @@ static const struct devs_id msic_power_btn_dev_id __initconst = {
29 .name = "msic_power_btn", 28 .name = "msic_power_btn",
30 .type = SFI_DEV_TYPE_IPC, 29 .type = SFI_DEV_TYPE_IPC,
31 .delay = 1, 30 .delay = 1,
31 .msic = 1,
32 .get_platform_data = &msic_power_btn_platform_data, 32 .get_platform_data = &msic_power_btn_platform_data,
33 .device_handler = &ipc_device_handler,
34}; 33};
35 34
36sfi_device(msic_power_btn_dev_id); 35sfi_device(msic_power_btn_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
index 114a5755b1e4..3e4167d246cd 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
@@ -19,7 +19,6 @@
19#include <asm/intel-mid.h> 19#include <asm/intel-mid.h>
20 20
21#include "platform_msic.h" 21#include "platform_msic.h"
22#include "platform_ipc.h"
23 22
24static void __init *msic_thermal_platform_data(void *info) 23static void __init *msic_thermal_platform_data(void *info)
25{ 24{
@@ -30,8 +29,8 @@ static const struct devs_id msic_thermal_dev_id __initconst = {
30 .name = "msic_thermal", 29 .name = "msic_thermal",
31 .type = SFI_DEV_TYPE_IPC, 30 .type = SFI_DEV_TYPE_IPC,
32 .delay = 1, 31 .delay = 1,
32 .msic = 1,
33 .get_platform_data = &msic_thermal_platform_data, 33 .get_platform_data = &msic_thermal_platform_data,
34 .device_handler = &ipc_device_handler,
35}; 34};
36 35
37sfi_device(msic_thermal_dev_id); 36sfi_device(msic_thermal_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
deleted file mode 100644
index e30cb62e3300..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * platform_pmic_gpio.c: PMIC GPIO platform data initialization file
3 *
4 * (C) Copyright 2013 Intel Corporation
5 * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/scatterlist.h>
16#include <linux/gpio.h>
17#include <linux/init.h>
18#include <linux/sfi.h>
19#include <linux/intel_pmic_gpio.h>
20#include <asm/intel-mid.h>
21
22#include "platform_ipc.h"
23
24static void __init *pmic_gpio_platform_data(void *info)
25{
26 static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
27 int gpio_base = get_gpio_by_name("pmic_gpio_base");
28
29 if (gpio_base < 0)
30 gpio_base = 64;
31 pmic_gpio_pdata.gpio_base = gpio_base;
32 pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
33 pmic_gpio_pdata.gpiointr = 0xffffeff8;
34
35 return &pmic_gpio_pdata;
36}
37
38static const struct devs_id pmic_gpio_spi_dev_id __initconst = {
39 .name = "pmic_gpio",
40 .type = SFI_DEV_TYPE_SPI,
41 .delay = 1,
42 .get_platform_data = &pmic_gpio_platform_data,
43};
44
45static const struct devs_id pmic_gpio_ipc_dev_id __initconst = {
46 .name = "pmic_gpio",
47 .type = SFI_DEV_TYPE_IPC,
48 .delay = 1,
49 .get_platform_data = &pmic_gpio_platform_data,
50 .device_handler = &ipc_device_handler
51};
52
53sfi_device(pmic_gpio_spi_dev_id);
54sfi_device(pmic_gpio_ipc_dev_id);
diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c
index e0607c77a1bd..ae7bdeb0e507 100644
--- a/arch/x86/platform/intel-mid/mrfld.c
+++ b/arch/x86/platform/intel-mid/mrfld.c
@@ -91,6 +91,7 @@ static unsigned long __init tangier_calibrate_tsc(void)
91static void __init tangier_arch_setup(void) 91static void __init tangier_arch_setup(void)
92{ 92{
93 x86_platform.calibrate_tsc = tangier_calibrate_tsc; 93 x86_platform.calibrate_tsc = tangier_calibrate_tsc;
94 x86_platform.legacy.rtc = 1;
94} 95}
95 96
96/* tangier arch ops */ 97/* tangier arch ops */
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index 051d264fce2e..19b43e3a9f0f 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -15,7 +15,6 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/scatterlist.h> 16#include <linux/scatterlist.h>
17#include <linux/sfi.h> 17#include <linux/sfi.h>
18#include <linux/intel_pmic_gpio.h>
19#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
20#include <linux/i2c.h> 19#include <linux/i2c.h>
21#include <linux/skbuff.h> 20#include <linux/skbuff.h>
@@ -226,7 +225,7 @@ int get_gpio_by_name(const char *name)
226 return -EINVAL; 225 return -EINVAL;
227} 226}
228 227
229void __init intel_scu_device_register(struct platform_device *pdev) 228static void __init intel_scu_ipc_device_register(struct platform_device *pdev)
230{ 229{
231 if (ipc_next_dev == MAX_IPCDEVS) 230 if (ipc_next_dev == MAX_IPCDEVS)
232 pr_err("too many SCU IPC devices"); 231 pr_err("too many SCU IPC devices");
@@ -335,10 +334,22 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
335 334
336 pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n", 335 pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
337 pentry->name, pentry->irq); 336 pentry->name, pentry->irq);
337
338 /*
339 * We need to call platform init of IPC devices to fill misc_pdata
340 * structure. It will be used in msic_init for initialization.
341 */
338 pdata = intel_mid_sfi_get_pdata(dev, pentry); 342 pdata = intel_mid_sfi_get_pdata(dev, pentry);
339 if (IS_ERR(pdata)) 343 if (IS_ERR(pdata))
340 return; 344 return;
341 345
346 /*
347 * On Medfield the platform device creation is handled by the MSIC
348 * MFD driver so we don't need to do it here.
349 */
350 if (dev->msic && intel_mid_has_msic())
351 return;
352
342 pdev = platform_device_alloc(pentry->name, 0); 353 pdev = platform_device_alloc(pentry->name, 0);
343 if (pdev == NULL) { 354 if (pdev == NULL) {
344 pr_err("out of memory for SFI platform device '%s'.\n", 355 pr_err("out of memory for SFI platform device '%s'.\n",
@@ -348,7 +359,10 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
348 install_irq_resource(pdev, pentry->irq); 359 install_irq_resource(pdev, pentry->irq);
349 360
350 pdev->dev.platform_data = pdata; 361 pdev->dev.platform_data = pdata;
351 platform_device_add(pdev); 362 if (dev->delay)
363 intel_scu_ipc_device_register(pdev);
364 else
365 platform_device_add(pdev);
352} 366}
353 367
354static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry, 368static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
@@ -503,27 +517,23 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
503 if (!dev) 517 if (!dev)
504 continue; 518 continue;
505 519
506 if (dev->device_handler) { 520 switch (pentry->type) {
507 dev->device_handler(pentry, dev); 521 case SFI_DEV_TYPE_IPC:
508 } else { 522 sfi_handle_ipc_dev(pentry, dev);
509 switch (pentry->type) { 523 break;
510 case SFI_DEV_TYPE_IPC: 524 case SFI_DEV_TYPE_SPI:
511 sfi_handle_ipc_dev(pentry, dev); 525 sfi_handle_spi_dev(pentry, dev);
512 break; 526 break;
513 case SFI_DEV_TYPE_SPI: 527 case SFI_DEV_TYPE_I2C:
514 sfi_handle_spi_dev(pentry, dev); 528 sfi_handle_i2c_dev(pentry, dev);
515 break; 529 break;
516 case SFI_DEV_TYPE_I2C: 530 case SFI_DEV_TYPE_SD:
517 sfi_handle_i2c_dev(pentry, dev); 531 sfi_handle_sd_dev(pentry, dev);
518 break; 532 break;
519 case SFI_DEV_TYPE_SD: 533 case SFI_DEV_TYPE_UART:
520 sfi_handle_sd_dev(pentry, dev); 534 case SFI_DEV_TYPE_HSI:
521 break; 535 default:
522 case SFI_DEV_TYPE_UART: 536 break;
523 case SFI_DEV_TYPE_HSI:
524 default:
525 break;
526 }
527 } 537 }
528 } 538 }
529 return 0; 539 return 0;
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 8410e7d0a5b5..9743d0ccfec6 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -45,8 +45,8 @@
45 * 45 *
46 * Handle system-wide NMI events generated by the global 'power nmi' command. 46 * Handle system-wide NMI events generated by the global 'power nmi' command.
47 * 47 *
48 * Basic operation is to field the NMI interrupt on each cpu and wait 48 * Basic operation is to field the NMI interrupt on each CPU and wait
49 * until all cpus have arrived into the nmi handler. If some cpus do not 49 * until all CPU's have arrived into the nmi handler. If some CPU's do not
50 * make it into the handler, try and force them in with the IPI(NMI) signal. 50 * make it into the handler, try and force them in with the IPI(NMI) signal.
51 * 51 *
52 * We also have to lessen UV Hub MMR accesses as much as possible as this 52 * We also have to lessen UV Hub MMR accesses as much as possible as this
@@ -56,7 +56,7 @@
56 * To do this we register our primary NMI notifier on the NMI_UNKNOWN 56 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
57 * chain. This reduces the number of false NMI calls when the perf 57 * chain. This reduces the number of false NMI calls when the perf
58 * tools are running which generate an enormous number of NMIs per 58 * tools are running which generate an enormous number of NMIs per
59 * second (~4M/s for 1024 cpu threads). Our secondary NMI handler is 59 * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
60 * very short as it only checks that if it has been "pinged" with the 60 * very short as it only checks that if it has been "pinged" with the
61 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. 61 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
62 * 62 *
@@ -65,8 +65,20 @@
65static struct uv_hub_nmi_s **uv_hub_nmi_list; 65static struct uv_hub_nmi_s **uv_hub_nmi_list;
66 66
67DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); 67DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
68EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
69 68
69/* UV hubless values */
70#define NMI_CONTROL_PORT 0x70
71#define NMI_DUMMY_PORT 0x71
72#define PAD_OWN_GPP_D_0 0x2c
73#define GPI_NMI_STS_GPP_D_0 0x164
74#define GPI_NMI_ENA_GPP_D_0 0x174
75#define STS_GPP_D_0_MASK 0x1
76#define PAD_CFG_DW0_GPP_D_0 0x4c0
77#define GPIROUTNMI (1ul << 17)
78#define PCH_PCR_GPIO_1_BASE 0xfdae0000ul
79#define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
80
81static u64 *pch_base;
70static unsigned long nmi_mmr; 82static unsigned long nmi_mmr;
71static unsigned long nmi_mmr_clear; 83static unsigned long nmi_mmr_clear;
72static unsigned long nmi_mmr_pending; 84static unsigned long nmi_mmr_pending;
@@ -100,7 +112,7 @@ static int param_get_local64(char *buffer, const struct kernel_param *kp)
100 112
101static int param_set_local64(const char *val, const struct kernel_param *kp) 113static int param_set_local64(const char *val, const struct kernel_param *kp)
102{ 114{
103 /* clear on any write */ 115 /* Clear on any write */
104 local64_set((local64_t *)kp->arg, 0); 116 local64_set((local64_t *)kp->arg, 0);
105 return 0; 117 return 0;
106} 118}
@@ -144,16 +156,80 @@ module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
144static int uv_nmi_retry_count = 500; 156static int uv_nmi_retry_count = 500;
145module_param_named(retry_count, uv_nmi_retry_count, int, 0644); 157module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
146 158
147/* 159static bool uv_pch_intr_enable = true;
148 * Valid NMI Actions: 160static bool uv_pch_intr_now_enabled;
149 * "dump" - dump process stack for each cpu 161module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
150 * "ips" - dump IP info for each cpu 162
151 * "kdump" - do crash dump 163static bool uv_pch_init_enable = true;
152 * "kdb" - enter KDB (default) 164module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
153 * "kgdb" - enter KGDB 165
154 */ 166static int uv_nmi_debug;
155static char uv_nmi_action[8] = "kdb"; 167module_param_named(debug, uv_nmi_debug, int, 0644);
156module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644); 168
169#define nmi_debug(fmt, ...) \
170 do { \
171 if (uv_nmi_debug) \
172 pr_info(fmt, ##__VA_ARGS__); \
173 } while (0)
174
175/* Valid NMI Actions */
176#define ACTION_LEN 16
177static struct nmi_action {
178 char *action;
179 char *desc;
180} valid_acts[] = {
181 { "kdump", "do kernel crash dump" },
182 { "dump", "dump process stack for each cpu" },
183 { "ips", "dump Inst Ptr info for each cpu" },
184 { "kdb", "enter KDB (needs kgdboc= assignment)" },
185 { "kgdb", "enter KGDB (needs gdb target remote)" },
186 { "health", "check if CPUs respond to NMI" },
187};
188typedef char action_t[ACTION_LEN];
189static action_t uv_nmi_action = { "dump" };
190
191static int param_get_action(char *buffer, const struct kernel_param *kp)
192{
193 return sprintf(buffer, "%s\n", uv_nmi_action);
194}
195
196static int param_set_action(const char *val, const struct kernel_param *kp)
197{
198 int i;
199 int n = ARRAY_SIZE(valid_acts);
200 char arg[ACTION_LEN], *p;
201
202 /* (remove possible '\n') */
203 strncpy(arg, val, ACTION_LEN - 1);
204 arg[ACTION_LEN - 1] = '\0';
205 p = strchr(arg, '\n');
206 if (p)
207 *p = '\0';
208
209 for (i = 0; i < n; i++)
210 if (!strcmp(arg, valid_acts[i].action))
211 break;
212
213 if (i < n) {
214 strcpy(uv_nmi_action, arg);
215 pr_info("UV: New NMI action:%s\n", uv_nmi_action);
216 return 0;
217 }
218
219 pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
220 for (i = 0; i < n; i++)
221 pr_err("UV: %-8s - %s\n",
222 valid_acts[i].action, valid_acts[i].desc);
223 return -EINVAL;
224}
225
226static const struct kernel_param_ops param_ops_action = {
227 .get = param_get_action,
228 .set = param_set_action,
229};
230#define param_check_action(name, p) __param_check(name, p, action_t)
231
232module_param_named(action, uv_nmi_action, action, 0644);
157 233
158static inline bool uv_nmi_action_is(const char *action) 234static inline bool uv_nmi_action_is(const char *action)
159{ 235{
@@ -192,8 +268,200 @@ static inline void uv_local_mmr_clear_nmi(void)
192} 268}
193 269
194/* 270/*
195 * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and 271 * UV hubless NMI handler functions
196 * return true. If first cpu in on the system, set global "in_nmi" flag. 272 */
273static inline void uv_reassert_nmi(void)
274{
275 /* (from arch/x86/include/asm/mach_traps.h) */
276 outb(0x8f, NMI_CONTROL_PORT);
277 inb(NMI_DUMMY_PORT); /* dummy read */
278 outb(0x0f, NMI_CONTROL_PORT);
279 inb(NMI_DUMMY_PORT); /* dummy read */
280}
281
282static void uv_init_hubless_pch_io(int offset, int mask, int data)
283{
284 int *addr = PCH_PCR_GPIO_ADDRESS(offset);
285 int readd = readl(addr);
286
287 if (mask) { /* OR in new data */
288 int writed = (readd & ~mask) | data;
289
290 nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
291 addr, readd, ~mask, data, writed);
292 writel(writed, addr);
293 } else if (readd & data) { /* clear status bit */
294 nmi_debug("UV:PCH: %p = %x\n", addr, data);
295 writel(data, addr);
296 }
297
298 (void)readl(addr); /* flush write data */
299}
300
301static void uv_nmi_setup_hubless_intr(void)
302{
303 uv_pch_intr_now_enabled = uv_pch_intr_enable;
304
305 uv_init_hubless_pch_io(
306 PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
307 uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
308
309 nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
310 uv_pch_intr_now_enabled ? "enabled" : "disabled");
311}
312
313static struct init_nmi {
314 unsigned int offset;
315 unsigned int mask;
316 unsigned int data;
317} init_nmi[] = {
318 { /* HOSTSW_OWN_GPP_D_0 */
319 .offset = 0x84,
320 .mask = 0x1,
321 .data = 0x0, /* ACPI Mode */
322 },
323
324/* Clear status: */
325 { /* GPI_INT_STS_GPP_D_0 */
326 .offset = 0x104,
327 .mask = 0x0,
328 .data = 0x1, /* Clear Status */
329 },
330 { /* GPI_GPE_STS_GPP_D_0 */
331 .offset = 0x124,
332 .mask = 0x0,
333 .data = 0x1, /* Clear Status */
334 },
335 { /* GPI_SMI_STS_GPP_D_0 */
336 .offset = 0x144,
337 .mask = 0x0,
338 .data = 0x1, /* Clear Status */
339 },
340 { /* GPI_NMI_STS_GPP_D_0 */
341 .offset = 0x164,
342 .mask = 0x0,
343 .data = 0x1, /* Clear Status */
344 },
345
346/* Disable interrupts: */
347 { /* GPI_INT_EN_GPP_D_0 */
348 .offset = 0x114,
349 .mask = 0x1,
350 .data = 0x0, /* Disable interrupt generation */
351 },
352 { /* GPI_GPE_EN_GPP_D_0 */
353 .offset = 0x134,
354 .mask = 0x1,
355 .data = 0x0, /* Disable interrupt generation */
356 },
357 { /* GPI_SMI_EN_GPP_D_0 */
358 .offset = 0x154,
359 .mask = 0x1,
360 .data = 0x0, /* Disable interrupt generation */
361 },
362 { /* GPI_NMI_EN_GPP_D_0 */
363 .offset = 0x174,
364 .mask = 0x1,
365 .data = 0x0, /* Disable interrupt generation */
366 },
367
368/* Setup GPP_D_0 Pad Config: */
369 { /* PAD_CFG_DW0_GPP_D_0 */
370 .offset = 0x4c0,
371 .mask = 0xffffffff,
372 .data = 0x82020100,
373/*
374 * 31:30 Pad Reset Config (PADRSTCFG): = 2h # PLTRST# (default)
375 *
376 * 29 RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly
377 * from RX buffer (default)
378 *
379 * 28 RX Raw Override to '1' (RXRAW1): = 0 # No Override
380 *
381 * 26:25 RX Level/Edge Configuration (RXEVCFG):
382 * = 0h # Level
383 * = 1h # Edge
384 *
385 * 23 RX Invert (RXINV): = 0 # No Inversion (signal active high)
386 *
387 * 20 GPIO Input Route IOxAPIC (GPIROUTIOXAPIC):
388 * = 0 # Routing does not cause peripheral IRQ...
389 * # (we want an NMI not an IRQ)
390 *
391 * 19 GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI.
392 * 18 GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI.
393 * 17 GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI.
394 *
395 * 11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad.
396 * 9 GPIO RX Disable (GPIORXDIS):
397 * = 0 # Enable the input buffer (active low enable)
398 *
399 * 8 GPIO TX Disable (GPIOTXDIS):
400 * = 1 # Disable the output buffer; i.e. Hi-Z
401 *
402 * 1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state..
403 * 0 GPIO TX State (GPIOTXSTATE):
404 * = 0 # (Leave at default)
405 */
406 },
407
408/* Pad Config DW1 */
409 { /* PAD_CFG_DW1_GPP_D_0 */
410 .offset = 0x4c4,
411 .mask = 0x3c00,
412 .data = 0, /* Termination = none (default) */
413 },
414};
415
416static void uv_init_hubless_pch_d0(void)
417{
418 int i, read;
419
420 read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
421 if (read != 0) {
422 pr_info("UV: Hubless NMI already configured\n");
423 return;
424 }
425
426 nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
427 for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
428 uv_init_hubless_pch_io(init_nmi[i].offset,
429 init_nmi[i].mask,
430 init_nmi[i].data);
431 }
432}
433
434static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
435{
436 int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
437 int status = *pstat;
438
439 hub_nmi->nmi_value = status;
440 atomic_inc(&hub_nmi->read_mmr_count);
441
442 if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */
443 return 0;
444
445 *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
446 (void)*pstat; /* Flush write */
447
448 return 1;
449}
450
451static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
452{
453 if (hub_nmi->hub_present)
454 return uv_nmi_test_mmr(hub_nmi);
455
456 if (hub_nmi->pch_owner) /* Only PCH owner can check status */
457 return uv_nmi_test_hubless(hub_nmi);
458
459 return -1;
460}
461
462/*
463 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
464 * return true. If first CPU in on the system, set global "in_nmi" flag.
197 */ 465 */
198static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) 466static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
199{ 467{
@@ -214,6 +482,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
214{ 482{
215 int cpu = smp_processor_id(); 483 int cpu = smp_processor_id();
216 int nmi = 0; 484 int nmi = 0;
485 int nmi_detected = 0;
217 486
218 local64_inc(&uv_nmi_count); 487 local64_inc(&uv_nmi_count);
219 this_cpu_inc(uv_cpu_nmi.queries); 488 this_cpu_inc(uv_cpu_nmi.queries);
@@ -224,35 +493,48 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
224 break; 493 break;
225 494
226 if (raw_spin_trylock(&hub_nmi->nmi_lock)) { 495 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
496 nmi_detected = uv_test_nmi(hub_nmi);
227 497
228 /* check hub MMR NMI flag */ 498 /* Check flag for UV external NMI */
229 if (uv_nmi_test_mmr(hub_nmi)) { 499 if (nmi_detected > 0) {
230 uv_set_in_nmi(cpu, hub_nmi); 500 uv_set_in_nmi(cpu, hub_nmi);
231 nmi = 1; 501 nmi = 1;
232 break; 502 break;
233 } 503 }
234 504
235 /* MMR NMI flag is clear */ 505 /* A non-PCH node in a hubless system waits for NMI */
506 else if (nmi_detected < 0)
507 goto slave_wait;
508
509 /* MMR/PCH NMI flag is clear */
236 raw_spin_unlock(&hub_nmi->nmi_lock); 510 raw_spin_unlock(&hub_nmi->nmi_lock);
237 511
238 } else { 512 } else {
239 /* wait a moment for the hub nmi locker to set flag */ 513
240 cpu_relax(); 514 /* Wait a moment for the HUB NMI locker to set flag */
515slave_wait: cpu_relax();
241 udelay(uv_nmi_slave_delay); 516 udelay(uv_nmi_slave_delay);
242 517
243 /* re-check hub in_nmi flag */ 518 /* Re-check hub in_nmi flag */
244 nmi = atomic_read(&hub_nmi->in_nmi); 519 nmi = atomic_read(&hub_nmi->in_nmi);
245 if (nmi) 520 if (nmi)
246 break; 521 break;
247 } 522 }
248 523
249 /* check if this BMC missed setting the MMR NMI flag */ 524 /*
525 * Check if this BMC missed setting the MMR NMI flag (or)
526 * UV hubless system where only PCH owner can check flag
527 */
250 if (!nmi) { 528 if (!nmi) {
251 nmi = atomic_read(&uv_in_nmi); 529 nmi = atomic_read(&uv_in_nmi);
252 if (nmi) 530 if (nmi)
253 uv_set_in_nmi(cpu, hub_nmi); 531 uv_set_in_nmi(cpu, hub_nmi);
254 } 532 }
255 533
534 /* If we're holding the hub lock, release it now */
535 if (nmi_detected < 0)
536 raw_spin_unlock(&hub_nmi->nmi_lock);
537
256 } while (0); 538 } while (0);
257 539
258 if (!nmi) 540 if (!nmi)
@@ -269,12 +551,15 @@ static inline void uv_clear_nmi(int cpu)
269 if (cpu == atomic_read(&hub_nmi->cpu_owner)) { 551 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
270 atomic_set(&hub_nmi->cpu_owner, -1); 552 atomic_set(&hub_nmi->cpu_owner, -1);
271 atomic_set(&hub_nmi->in_nmi, 0); 553 atomic_set(&hub_nmi->in_nmi, 0);
272 uv_local_mmr_clear_nmi(); 554 if (hub_nmi->hub_present)
555 uv_local_mmr_clear_nmi();
556 else
557 uv_reassert_nmi();
273 raw_spin_unlock(&hub_nmi->nmi_lock); 558 raw_spin_unlock(&hub_nmi->nmi_lock);
274 } 559 }
275} 560}
276 561
277/* Ping non-responding cpus attemping to force them into the NMI handler */ 562/* Ping non-responding CPU's attemping to force them into the NMI handler */
278static void uv_nmi_nr_cpus_ping(void) 563static void uv_nmi_nr_cpus_ping(void)
279{ 564{
280 int cpu; 565 int cpu;
@@ -285,7 +570,7 @@ static void uv_nmi_nr_cpus_ping(void)
285 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); 570 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
286} 571}
287 572
288/* Clean up flags for cpus that ignored both NMI and ping */ 573/* Clean up flags for CPU's that ignored both NMI and ping */
289static void uv_nmi_cleanup_mask(void) 574static void uv_nmi_cleanup_mask(void)
290{ 575{
291 int cpu; 576 int cpu;
@@ -297,11 +582,12 @@ static void uv_nmi_cleanup_mask(void)
297 } 582 }
298} 583}
299 584
300/* Loop waiting as cpus enter nmi handler */ 585/* Loop waiting as CPU's enter NMI handler */
301static int uv_nmi_wait_cpus(int first) 586static int uv_nmi_wait_cpus(int first)
302{ 587{
303 int i, j, k, n = num_online_cpus(); 588 int i, j, k, n = num_online_cpus();
304 int last_k = 0, waiting = 0; 589 int last_k = 0, waiting = 0;
590 int cpu = smp_processor_id();
305 591
306 if (first) { 592 if (first) {
307 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask); 593 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
@@ -310,6 +596,12 @@ static int uv_nmi_wait_cpus(int first)
310 k = n - cpumask_weight(uv_nmi_cpu_mask); 596 k = n - cpumask_weight(uv_nmi_cpu_mask);
311 } 597 }
312 598
599 /* PCH NMI causes only one CPU to respond */
600 if (first && uv_pch_intr_now_enabled) {
601 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
602 return n - k - 1;
603 }
604
313 udelay(uv_nmi_initial_delay); 605 udelay(uv_nmi_initial_delay);
314 for (i = 0; i < uv_nmi_retry_count; i++) { 606 for (i = 0; i < uv_nmi_retry_count; i++) {
315 int loop_delay = uv_nmi_loop_delay; 607 int loop_delay = uv_nmi_loop_delay;
@@ -325,13 +617,13 @@ static int uv_nmi_wait_cpus(int first)
325 k = n; 617 k = n;
326 break; 618 break;
327 } 619 }
328 if (last_k != k) { /* abort if no new cpus coming in */ 620 if (last_k != k) { /* abort if no new CPU's coming in */
329 last_k = k; 621 last_k = k;
330 waiting = 0; 622 waiting = 0;
331 } else if (++waiting > uv_nmi_wait_count) 623 } else if (++waiting > uv_nmi_wait_count)
332 break; 624 break;
333 625
334 /* extend delay if waiting only for cpu 0 */ 626 /* Extend delay if waiting only for CPU 0: */
335 if (waiting && (n - k) == 1 && 627 if (waiting && (n - k) == 1 &&
336 cpumask_test_cpu(0, uv_nmi_cpu_mask)) 628 cpumask_test_cpu(0, uv_nmi_cpu_mask))
337 loop_delay *= 100; 629 loop_delay *= 100;
@@ -342,29 +634,29 @@ static int uv_nmi_wait_cpus(int first)
342 return n - k; 634 return n - k;
343} 635}
344 636
345/* Wait until all slave cpus have entered UV NMI handler */ 637/* Wait until all slave CPU's have entered UV NMI handler */
346static void uv_nmi_wait(int master) 638static void uv_nmi_wait(int master)
347{ 639{
348 /* indicate this cpu is in */ 640 /* Indicate this CPU is in: */
349 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN); 641 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
350 642
351 /* if not the first cpu in (the master), then we are a slave cpu */ 643 /* If not the first CPU in (the master), then we are a slave CPU */
352 if (!master) 644 if (!master)
353 return; 645 return;
354 646
355 do { 647 do {
356 /* wait for all other cpus to gather here */ 648 /* Wait for all other CPU's to gather here */
357 if (!uv_nmi_wait_cpus(1)) 649 if (!uv_nmi_wait_cpus(1))
358 break; 650 break;
359 651
360 /* if not all made it in, send IPI NMI to them */ 652 /* If not all made it in, send IPI NMI to them */
361 pr_alert("UV: Sending NMI IPI to %d non-responding CPUs: %*pbl\n", 653 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
362 cpumask_weight(uv_nmi_cpu_mask), 654 cpumask_weight(uv_nmi_cpu_mask),
363 cpumask_pr_args(uv_nmi_cpu_mask)); 655 cpumask_pr_args(uv_nmi_cpu_mask));
364 656
365 uv_nmi_nr_cpus_ping(); 657 uv_nmi_nr_cpus_ping();
366 658
367 /* if all cpus are in, then done */ 659 /* If all CPU's are in, then done */
368 if (!uv_nmi_wait_cpus(0)) 660 if (!uv_nmi_wait_cpus(0))
369 break; 661 break;
370 662
@@ -416,7 +708,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
416 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); 708 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
417} 709}
418 710
419/* Trigger a slave cpu to dump it's state */ 711/* Trigger a slave CPU to dump it's state */
420static void uv_nmi_trigger_dump(int cpu) 712static void uv_nmi_trigger_dump(int cpu)
421{ 713{
422 int retry = uv_nmi_trigger_delay; 714 int retry = uv_nmi_trigger_delay;
@@ -437,7 +729,7 @@ static void uv_nmi_trigger_dump(int cpu)
437 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; 729 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
438} 730}
439 731
440/* Wait until all cpus ready to exit */ 732/* Wait until all CPU's ready to exit */
441static void uv_nmi_sync_exit(int master) 733static void uv_nmi_sync_exit(int master)
442{ 734{
443 atomic_dec(&uv_nmi_cpus_in_nmi); 735 atomic_dec(&uv_nmi_cpus_in_nmi);
@@ -451,7 +743,23 @@ static void uv_nmi_sync_exit(int master)
451 } 743 }
452} 744}
453 745
454/* Walk through cpu list and dump state of each */ 746/* Current "health" check is to check which CPU's are responsive */
747static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
748{
749 if (master) {
750 int in = atomic_read(&uv_nmi_cpus_in_nmi);
751 int out = num_online_cpus() - in;
752
753 pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
754 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
755 } else {
756 while (!atomic_read(&uv_nmi_slave_continue))
757 cpu_relax();
758 }
759 uv_nmi_sync_exit(master);
760}
761
762/* Walk through CPU list and dump state of each */
455static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) 763static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
456{ 764{
457 if (master) { 765 if (master) {
@@ -538,7 +846,7 @@ static inline int uv_nmi_kdb_reason(void)
538#else /* !CONFIG_KGDB_KDB */ 846#else /* !CONFIG_KGDB_KDB */
539static inline int uv_nmi_kdb_reason(void) 847static inline int uv_nmi_kdb_reason(void)
540{ 848{
541 /* Insure user is expecting to attach gdb remote */ 849 /* Ensure user is expecting to attach gdb remote */
542 if (uv_nmi_action_is("kgdb")) 850 if (uv_nmi_action_is("kgdb"))
543 return 0; 851 return 0;
544 852
@@ -563,7 +871,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
563 if (reason < 0) 871 if (reason < 0)
564 return; 872 return;
565 873
566 /* call KGDB NMI handler as MASTER */ 874 /* Call KGDB NMI handler as MASTER */
567 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason, 875 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
568 &uv_nmi_slave_continue); 876 &uv_nmi_slave_continue);
569 if (ret) { 877 if (ret) {
@@ -571,7 +879,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
571 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT); 879 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
572 } 880 }
573 } else { 881 } else {
574 /* wait for KGDB signal that it's ready for slaves to enter */ 882 /* Wait for KGDB signal that it's ready for slaves to enter */
575 int sig; 883 int sig;
576 884
577 do { 885 do {
@@ -579,7 +887,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
579 sig = atomic_read(&uv_nmi_slave_continue); 887 sig = atomic_read(&uv_nmi_slave_continue);
580 } while (!sig); 888 } while (!sig);
581 889
582 /* call KGDB as slave */ 890 /* Call KGDB as slave */
583 if (sig == SLAVE_CONTINUE) 891 if (sig == SLAVE_CONTINUE)
584 kgdb_nmicallback(cpu, regs); 892 kgdb_nmicallback(cpu, regs);
585 } 893 }
@@ -623,18 +931,23 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
623 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action)); 931 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
624 } 932 }
625 933
626 /* Pause as all cpus enter the NMI handler */ 934 /* Pause as all CPU's enter the NMI handler */
627 uv_nmi_wait(master); 935 uv_nmi_wait(master);
628 936
629 /* Dump state of each cpu */ 937 /* Process actions other than "kdump": */
630 if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) 938 if (uv_nmi_action_is("health")) {
939 uv_nmi_action_health(cpu, regs, master);
940 } else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
631 uv_nmi_dump_state(cpu, regs, master); 941 uv_nmi_dump_state(cpu, regs, master);
632 942 } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
633 /* Call KGDB/KDB if enabled */
634 else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb"))
635 uv_call_kgdb_kdb(cpu, regs, master); 943 uv_call_kgdb_kdb(cpu, regs, master);
944 } else {
945 if (master)
946 pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
947 uv_nmi_sync_exit(master);
948 }
636 949
637 /* Clear per_cpu "in nmi" flag */ 950 /* Clear per_cpu "in_nmi" flag */
638 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT); 951 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
639 952
640 /* Clear MMR NMI flag on each hub */ 953 /* Clear MMR NMI flag on each hub */
@@ -648,6 +961,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
648 atomic_set(&uv_nmi_cpu, -1); 961 atomic_set(&uv_nmi_cpu, -1);
649 atomic_set(&uv_in_nmi, 0); 962 atomic_set(&uv_in_nmi, 0);
650 atomic_set(&uv_nmi_kexec_failed, 0); 963 atomic_set(&uv_nmi_kexec_failed, 0);
964 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
651 } 965 }
652 966
653 uv_nmi_touch_watchdogs(); 967 uv_nmi_touch_watchdogs();
@@ -657,7 +971,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
657} 971}
658 972
659/* 973/*
660 * NMI handler for pulling in CPUs when perf events are grabbing our NMI 974 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
661 */ 975 */
662static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs) 976static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
663{ 977{
@@ -690,35 +1004,62 @@ void uv_nmi_init(void)
690 unsigned int value; 1004 unsigned int value;
691 1005
692 /* 1006 /*
693 * Unmask NMI on all cpus 1007 * Unmask NMI on all CPU's
694 */ 1008 */
695 value = apic_read(APIC_LVT1) | APIC_DM_NMI; 1009 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
696 value &= ~APIC_LVT_MASKED; 1010 value &= ~APIC_LVT_MASKED;
697 apic_write(APIC_LVT1, value); 1011 apic_write(APIC_LVT1, value);
698} 1012}
699 1013
700void uv_nmi_setup(void) 1014/* Setup HUB NMI info */
1015void __init uv_nmi_setup_common(bool hubbed)
701{ 1016{
702 int size = sizeof(void *) * (1 << NODES_SHIFT); 1017 int size = sizeof(void *) * (1 << NODES_SHIFT);
703 int cpu, nid; 1018 int cpu;
704 1019
705 /* Setup hub nmi info */
706 uv_nmi_setup_mmrs();
707 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL); 1020 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
708 pr_info("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size); 1021 nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
709 BUG_ON(!uv_hub_nmi_list); 1022 BUG_ON(!uv_hub_nmi_list);
710 size = sizeof(struct uv_hub_nmi_s); 1023 size = sizeof(struct uv_hub_nmi_s);
711 for_each_present_cpu(cpu) { 1024 for_each_present_cpu(cpu) {
712 nid = cpu_to_node(cpu); 1025 int nid = cpu_to_node(cpu);
713 if (uv_hub_nmi_list[nid] == NULL) { 1026 if (uv_hub_nmi_list[nid] == NULL) {
714 uv_hub_nmi_list[nid] = kzalloc_node(size, 1027 uv_hub_nmi_list[nid] = kzalloc_node(size,
715 GFP_KERNEL, nid); 1028 GFP_KERNEL, nid);
716 BUG_ON(!uv_hub_nmi_list[nid]); 1029 BUG_ON(!uv_hub_nmi_list[nid]);
717 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock)); 1030 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
718 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1); 1031 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
1032 uv_hub_nmi_list[nid]->hub_present = hubbed;
1033 uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
719 } 1034 }
720 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid]; 1035 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
721 } 1036 }
722 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL)); 1037 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
1038}
1039
1040/* Setup for UV Hub systems */
1041void __init uv_nmi_setup(void)
1042{
1043 uv_nmi_setup_mmrs();
1044 uv_nmi_setup_common(true);
1045 uv_register_nmi_notifier();
1046 pr_info("UV: Hub NMI enabled\n");
1047}
1048
1049/* Setup for UV Hubless systems */
1050void __init uv_nmi_setup_hubless(void)
1051{
1052 uv_nmi_setup_common(false);
1053 pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
1054 nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
1055 pch_base, PCH_PCR_GPIO_1_BASE);
1056 if (uv_pch_init_enable)
1057 uv_init_hubless_pch_d0();
1058 uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
1059 STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
1060 uv_nmi_setup_hubless_intr();
1061 /* Ensure NMI enabled in Processor Interface Reg: */
1062 uv_reassert_nmi();
723 uv_register_nmi_notifier(); 1063 uv_register_nmi_notifier();
1064 pr_info("UV: Hubless NMI enabled\n");
724} 1065}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 59aa8e302bc3..49a594855f98 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -816,13 +816,6 @@ config INTEL_SCU_IPC_UTIL
816 low level access for debug work and updating the firmware. Say 816 low level access for debug work and updating the firmware. Say
817 N unless you will be doing this on an Intel MID platform. 817 N unless you will be doing this on an Intel MID platform.
818 818
819config GPIO_INTEL_PMIC
820 bool "Intel PMIC GPIO support"
821 depends on INTEL_SCU_IPC && GPIOLIB
822 ---help---
823 Say Y here to support GPIO via the SCU IPC interface
824 on Intel MID platforms.
825
826config INTEL_MID_POWER_BUTTON 819config INTEL_MID_POWER_BUTTON
827 tristate "power button driver for Intel MID platforms" 820 tristate "power button driver for Intel MID platforms"
828 depends on INTEL_SCU_IPC && INPUT 821 depends on INTEL_SCU_IPC && INPUT
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index d4111f0f8a78..b2f52a7690af 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
50obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o 50obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
51obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o 51obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
52obj-$(CONFIG_INTEL_IPS) += intel_ips.o 52obj-$(CONFIG_INTEL_IPS) += intel_ips.o
53obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o
54obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o 53obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
55obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o 54obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
56obj-$(CONFIG_IBM_RTL) += ibm_rtl.o 55obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
deleted file mode 100644
index 91ae58510d92..000000000000
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ /dev/null
@@ -1,326 +0,0 @@
1/* Moorestown PMIC GPIO (access through IPC) driver
2 * Copyright (c) 2008 - 2009, Intel Corporation.
3 *
4 * Author: Alek Du <alek.du@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/* Supports:
21 * Moorestown platform PMIC chip
22 */
23
24#define pr_fmt(fmt) "%s: " fmt, __func__
25
26#include <linux/kernel.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/stddef.h>
30#include <linux/slab.h>
31#include <linux/ioport.h>
32#include <linux/init.h>
33#include <linux/io.h>
34#include <linux/gpio/driver.h>
35#include <asm/intel_scu_ipc.h>
36#include <linux/device.h>
37#include <linux/intel_pmic_gpio.h>
38#include <linux/platform_device.h>
39
40#define DRIVER_NAME "pmic_gpio"
41
42/* register offset that IPC driver should use
43 * 8 GPIO + 8 GPOSW (6 controllable) + 8GPO
44 */
45enum pmic_gpio_register {
46 GPIO0 = 0xE0,
47 GPIO7 = 0xE7,
48 GPIOINT = 0xE8,
49 GPOSWCTL0 = 0xEC,
50 GPOSWCTL5 = 0xF1,
51 GPO = 0xF4,
52};
53
54/* bits definition for GPIO & GPOSW */
55#define GPIO_DRV 0x01
56#define GPIO_DIR 0x02
57#define GPIO_DIN 0x04
58#define GPIO_DOU 0x08
59#define GPIO_INTCTL 0x30
60#define GPIO_DBC 0xc0
61
62#define GPOSW_DRV 0x01
63#define GPOSW_DOU 0x08
64#define GPOSW_RDRV 0x30
65
66#define GPIO_UPDATE_TYPE 0x80000000
67
68#define NUM_GPIO 24
69
70struct pmic_gpio {
71 struct mutex buslock;
72 struct gpio_chip chip;
73 void *gpiointr;
74 int irq;
75 unsigned irq_base;
76 unsigned int update_type;
77 u32 trigger_type;
78};
79
80static void pmic_program_irqtype(int gpio, int type)
81{
82 if (type & IRQ_TYPE_EDGE_RISING)
83 intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
84 else
85 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
86
87 if (type & IRQ_TYPE_EDGE_FALLING)
88 intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
89 else
90 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
91};
92
93static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
94{
95 if (offset >= 8) {
96 pr_err("only pin 0-7 support input\n");
97 return -1;/* we only have 8 GPIO can use as input */
98 }
99 return intel_scu_ipc_update_register(GPIO0 + offset,
100 GPIO_DIR, GPIO_DIR);
101}
102
103static int pmic_gpio_direction_output(struct gpio_chip *chip,
104 unsigned offset, int value)
105{
106 int rc = 0;
107
108 if (offset < 8)/* it is GPIO */
109 rc = intel_scu_ipc_update_register(GPIO0 + offset,
110 GPIO_DRV | (value ? GPIO_DOU : 0),
111 GPIO_DRV | GPIO_DOU | GPIO_DIR);
112 else if (offset < 16)/* it is GPOSW */
113 rc = intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
114 GPOSW_DRV | (value ? GPOSW_DOU : 0),
115 GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
116 else if (offset > 15 && offset < 24)/* it is GPO */
117 rc = intel_scu_ipc_update_register(GPO,
118 value ? 1 << (offset - 16) : 0,
119 1 << (offset - 16));
120 else {
121 pr_err("invalid PMIC GPIO pin %d!\n", offset);
122 WARN_ON(1);
123 }
124
125 return rc;
126}
127
128static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
129{
130 u8 r;
131 int ret;
132
133 /* we only have 8 GPIO pins we can use as input */
134 if (offset >= 8)
135 return -EOPNOTSUPP;
136 ret = intel_scu_ipc_ioread8(GPIO0 + offset, &r);
137 if (ret < 0)
138 return ret;
139 return r & GPIO_DIN;
140}
141
142static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
143{
144 if (offset < 8)/* it is GPIO */
145 intel_scu_ipc_update_register(GPIO0 + offset,
146 GPIO_DRV | (value ? GPIO_DOU : 0),
147 GPIO_DRV | GPIO_DOU);
148 else if (offset < 16)/* it is GPOSW */
149 intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
150 GPOSW_DRV | (value ? GPOSW_DOU : 0),
151 GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
152 else if (offset > 15 && offset < 24) /* it is GPO */
153 intel_scu_ipc_update_register(GPO,
154 value ? 1 << (offset - 16) : 0,
155 1 << (offset - 16));
156}
157
158/*
159 * This is called from genirq with pg->buslock locked and
160 * irq_desc->lock held. We can not access the scu bus here, so we
161 * store the change and update in the bus_sync_unlock() function below
162 */
163static int pmic_irq_type(struct irq_data *data, unsigned type)
164{
165 struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
166 u32 gpio = data->irq - pg->irq_base;
167
168 if (gpio >= pg->chip.ngpio)
169 return -EINVAL;
170
171 pg->trigger_type = type;
172 pg->update_type = gpio | GPIO_UPDATE_TYPE;
173 return 0;
174}
175
176static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
177{
178 struct pmic_gpio *pg = gpiochip_get_data(chip);
179
180 return pg->irq_base + offset;
181}
182
183static void pmic_bus_lock(struct irq_data *data)
184{
185 struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
186
187 mutex_lock(&pg->buslock);
188}
189
190static void pmic_bus_sync_unlock(struct irq_data *data)
191{
192 struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
193
194 if (pg->update_type) {
195 unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
196
197 pmic_program_irqtype(gpio, pg->trigger_type);
198 pg->update_type = 0;
199 }
200 mutex_unlock(&pg->buslock);
201}
202
203/* the gpiointr register is read-clear, so just do nothing. */
204static void pmic_irq_unmask(struct irq_data *data) { }
205
206static void pmic_irq_mask(struct irq_data *data) { }
207
208static struct irq_chip pmic_irqchip = {
209 .name = "PMIC-GPIO",
210 .irq_mask = pmic_irq_mask,
211 .irq_unmask = pmic_irq_unmask,
212 .irq_set_type = pmic_irq_type,
213 .irq_bus_lock = pmic_bus_lock,
214 .irq_bus_sync_unlock = pmic_bus_sync_unlock,
215};
216
217static irqreturn_t pmic_irq_handler(int irq, void *data)
218{
219 struct pmic_gpio *pg = data;
220 u8 intsts = *((u8 *)pg->gpiointr + 4);
221 int gpio;
222 irqreturn_t ret = IRQ_NONE;
223
224 for (gpio = 0; gpio < 8; gpio++) {
225 if (intsts & (1 << gpio)) {
226 pr_debug("pmic pin %d triggered\n", gpio);
227 generic_handle_irq(pg->irq_base + gpio);
228 ret = IRQ_HANDLED;
229 }
230 }
231 return ret;
232}
233
234static int platform_pmic_gpio_probe(struct platform_device *pdev)
235{
236 struct device *dev = &pdev->dev;
237 int irq = platform_get_irq(pdev, 0);
238 struct intel_pmic_gpio_platform_data *pdata = dev->platform_data;
239
240 struct pmic_gpio *pg;
241 int retval;
242 int i;
243
244 if (irq < 0) {
245 dev_dbg(dev, "no IRQ line\n");
246 return -EINVAL;
247 }
248
249 if (!pdata || !pdata->gpio_base || !pdata->irq_base) {
250 dev_dbg(dev, "incorrect or missing platform data\n");
251 return -EINVAL;
252 }
253
254 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
255 if (!pg)
256 return -ENOMEM;
257
258 dev_set_drvdata(dev, pg);
259
260 pg->irq = irq;
261 /* setting up SRAM mapping for GPIOINT register */
262 pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
263 if (!pg->gpiointr) {
264 pr_err("Can not map GPIOINT\n");
265 retval = -EINVAL;
266 goto err2;
267 }
268 pg->irq_base = pdata->irq_base;
269 pg->chip.label = "intel_pmic";
270 pg->chip.direction_input = pmic_gpio_direction_input;
271 pg->chip.direction_output = pmic_gpio_direction_output;
272 pg->chip.get = pmic_gpio_get;
273 pg->chip.set = pmic_gpio_set;
274 pg->chip.to_irq = pmic_gpio_to_irq;
275 pg->chip.base = pdata->gpio_base;
276 pg->chip.ngpio = NUM_GPIO;
277 pg->chip.can_sleep = 1;
278 pg->chip.parent = dev;
279
280 mutex_init(&pg->buslock);
281
282 pg->chip.parent = dev;
283 retval = gpiochip_add_data(&pg->chip, pg);
284 if (retval) {
285 pr_err("Can not add pmic gpio chip\n");
286 goto err;
287 }
288
289 retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
290 if (retval) {
291 pr_warn("Interrupt request failed\n");
292 goto fail_request_irq;
293 }
294
295 for (i = 0; i < 8; i++) {
296 irq_set_chip_and_handler_name(i + pg->irq_base,
297 &pmic_irqchip,
298 handle_simple_irq,
299 "demux");
300 irq_set_chip_data(i + pg->irq_base, pg);
301 }
302 return 0;
303
304fail_request_irq:
305 gpiochip_remove(&pg->chip);
306err:
307 iounmap(pg->gpiointr);
308err2:
309 kfree(pg);
310 return retval;
311}
312
313/* at the same time, register a platform driver
314 * this supports the sfi 0.81 fw */
315static struct platform_driver platform_pmic_gpio_driver = {
316 .driver = {
317 .name = DRIVER_NAME,
318 },
319 .probe = platform_pmic_gpio_probe,
320};
321
322static int __init platform_pmic_gpio_init(void)
323{
324 return platform_driver_register(&platform_pmic_gpio_driver);
325}
326subsys_initcall(platform_pmic_gpio_init);
diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h
deleted file mode 100644
index 920109a29191..000000000000
--- a/include/linux/intel_pmic_gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef LINUX_INTEL_PMIC_H
2#define LINUX_INTEL_PMIC_H
3
4struct intel_pmic_gpio_platform_data {
5 /* the first IRQ of the chip */
6 unsigned irq_base;
7 /* number assigned to the first GPIO */
8 unsigned gpio_base;
9 /* sram address for gpiointr register, the langwell chip will map
10 * the PMIC spi GPIO expander's GPIOINTR register in sram.
11 */
12 unsigned gpiointr;
13};
14
15#endif