diff options
Diffstat (limited to 'arch/x86/kernel')
43 files changed, 1876 insertions, 1029 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index cd40aba6aa95..9a5ed58f09dc 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -94,6 +94,53 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; | |||
94 | 94 | ||
95 | 95 | ||
96 | /* | 96 | /* |
97 | * ISA irqs by default are the first 16 gsis but can be | ||
98 | * any gsi as specified by an interrupt source override. | ||
99 | */ | ||
100 | static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = { | ||
101 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 | ||
102 | }; | ||
103 | |||
104 | static unsigned int gsi_to_irq(unsigned int gsi) | ||
105 | { | ||
106 | unsigned int irq = gsi + NR_IRQS_LEGACY; | ||
107 | unsigned int i; | ||
108 | |||
109 | for (i = 0; i < NR_IRQS_LEGACY; i++) { | ||
110 | if (isa_irq_to_gsi[i] == gsi) { | ||
111 | return i; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | /* Provide an identity mapping of gsi == irq | ||
116 | * except on truly weird platforms that have | ||
117 | * non isa irqs in the first 16 gsis. | ||
118 | */ | ||
119 | if (gsi >= NR_IRQS_LEGACY) | ||
120 | irq = gsi; | ||
121 | else | ||
122 | irq = gsi_end + 1 + gsi; | ||
123 | |||
124 | return irq; | ||
125 | } | ||
126 | |||
127 | static u32 irq_to_gsi(int irq) | ||
128 | { | ||
129 | unsigned int gsi; | ||
130 | |||
131 | if (irq < NR_IRQS_LEGACY) | ||
132 | gsi = isa_irq_to_gsi[irq]; | ||
133 | else if (irq <= gsi_end) | ||
134 | gsi = irq; | ||
135 | else if (irq <= (gsi_end + NR_IRQS_LEGACY)) | ||
136 | gsi = irq - gsi_end; | ||
137 | else | ||
138 | gsi = 0xffffffff; | ||
139 | |||
140 | return gsi; | ||
141 | } | ||
142 | |||
143 | /* | ||
97 | * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, | 144 | * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, |
98 | * to map the target physical address. The problem is that set_fixmap() | 145 | * to map the target physical address. The problem is that set_fixmap() |
99 | * provides a single page, and it is possible that the page is not | 146 | * provides a single page, and it is possible that the page is not |
@@ -313,7 +360,7 @@ acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) | |||
313 | /* | 360 | /* |
314 | * Parse Interrupt Source Override for the ACPI SCI | 361 | * Parse Interrupt Source Override for the ACPI SCI |
315 | */ | 362 | */ |
316 | static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) | 363 | static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi) |
317 | { | 364 | { |
318 | if (trigger == 0) /* compatible SCI trigger is level */ | 365 | if (trigger == 0) /* compatible SCI trigger is level */ |
319 | trigger = 3; | 366 | trigger = 3; |
@@ -333,7 +380,7 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) | |||
333 | * If GSI is < 16, this will update its flags, | 380 | * If GSI is < 16, this will update its flags, |
334 | * else it will create a new mp_irqs[] entry. | 381 | * else it will create a new mp_irqs[] entry. |
335 | */ | 382 | */ |
336 | mp_override_legacy_irq(gsi, polarity, trigger, gsi); | 383 | mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); |
337 | 384 | ||
338 | /* | 385 | /* |
339 | * stash over-ride to indicate we've been here | 386 | * stash over-ride to indicate we've been here |
@@ -357,9 +404,10 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, | |||
357 | acpi_table_print_madt_entry(header); | 404 | acpi_table_print_madt_entry(header); |
358 | 405 | ||
359 | if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { | 406 | if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { |
360 | acpi_sci_ioapic_setup(intsrc->global_irq, | 407 | acpi_sci_ioapic_setup(intsrc->source_irq, |
361 | intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, | 408 | intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, |
362 | (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2); | 409 | (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, |
410 | intsrc->global_irq); | ||
363 | return 0; | 411 | return 0; |
364 | } | 412 | } |
365 | 413 | ||
@@ -448,7 +496,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) | |||
448 | 496 | ||
449 | int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) | 497 | int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) |
450 | { | 498 | { |
451 | *irq = gsi; | 499 | *irq = gsi_to_irq(gsi); |
452 | 500 | ||
453 | #ifdef CONFIG_X86_IO_APIC | 501 | #ifdef CONFIG_X86_IO_APIC |
454 | if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) | 502 | if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) |
@@ -458,6 +506,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) | |||
458 | return 0; | 506 | return 0; |
459 | } | 507 | } |
460 | 508 | ||
509 | int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) | ||
510 | { | ||
511 | if (isa_irq >= 16) | ||
512 | return -1; | ||
513 | *gsi = irq_to_gsi(isa_irq); | ||
514 | return 0; | ||
515 | } | ||
516 | |||
461 | /* | 517 | /* |
462 | * success: return IRQ number (>=0) | 518 | * success: return IRQ number (>=0) |
463 | * failure: return < 0 | 519 | * failure: return < 0 |
@@ -482,7 +538,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | |||
482 | plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); | 538 | plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); |
483 | } | 539 | } |
484 | #endif | 540 | #endif |
485 | irq = plat_gsi; | 541 | irq = gsi_to_irq(plat_gsi); |
486 | 542 | ||
487 | return irq; | 543 | return irq; |
488 | } | 544 | } |
@@ -867,29 +923,6 @@ static int __init acpi_parse_madt_lapic_entries(void) | |||
867 | extern int es7000_plat; | 923 | extern int es7000_plat; |
868 | #endif | 924 | #endif |
869 | 925 | ||
870 | int __init acpi_probe_gsi(void) | ||
871 | { | ||
872 | int idx; | ||
873 | int gsi; | ||
874 | int max_gsi = 0; | ||
875 | |||
876 | if (acpi_disabled) | ||
877 | return 0; | ||
878 | |||
879 | if (!acpi_ioapic) | ||
880 | return 0; | ||
881 | |||
882 | max_gsi = 0; | ||
883 | for (idx = 0; idx < nr_ioapics; idx++) { | ||
884 | gsi = mp_gsi_routing[idx].gsi_end; | ||
885 | |||
886 | if (gsi > max_gsi) | ||
887 | max_gsi = gsi; | ||
888 | } | ||
889 | |||
890 | return max_gsi + 1; | ||
891 | } | ||
892 | |||
893 | static void assign_to_mp_irq(struct mpc_intsrc *m, | 926 | static void assign_to_mp_irq(struct mpc_intsrc *m, |
894 | struct mpc_intsrc *mp_irq) | 927 | struct mpc_intsrc *mp_irq) |
895 | { | 928 | { |
@@ -947,13 +980,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) | |||
947 | mp_irq.dstirq = pin; /* INTIN# */ | 980 | mp_irq.dstirq = pin; /* INTIN# */ |
948 | 981 | ||
949 | save_mp_irq(&mp_irq); | 982 | save_mp_irq(&mp_irq); |
983 | |||
984 | isa_irq_to_gsi[bus_irq] = gsi; | ||
950 | } | 985 | } |
951 | 986 | ||
952 | void __init mp_config_acpi_legacy_irqs(void) | 987 | void __init mp_config_acpi_legacy_irqs(void) |
953 | { | 988 | { |
954 | int i; | 989 | int i; |
955 | int ioapic; | ||
956 | unsigned int dstapic; | ||
957 | struct mpc_intsrc mp_irq; | 990 | struct mpc_intsrc mp_irq; |
958 | 991 | ||
959 | #if defined (CONFIG_MCA) || defined (CONFIG_EISA) | 992 | #if defined (CONFIG_MCA) || defined (CONFIG_EISA) |
@@ -974,19 +1007,27 @@ void __init mp_config_acpi_legacy_irqs(void) | |||
974 | #endif | 1007 | #endif |
975 | 1008 | ||
976 | /* | 1009 | /* |
977 | * Locate the IOAPIC that manages the ISA IRQs (0-15). | ||
978 | */ | ||
979 | ioapic = mp_find_ioapic(0); | ||
980 | if (ioapic < 0) | ||
981 | return; | ||
982 | dstapic = mp_ioapics[ioapic].apicid; | ||
983 | |||
984 | /* | ||
985 | * Use the default configuration for the IRQs 0-15. Unless | 1010 | * Use the default configuration for the IRQs 0-15. Unless |
986 | * overridden by (MADT) interrupt source override entries. | 1011 | * overridden by (MADT) interrupt source override entries. |
987 | */ | 1012 | */ |
988 | for (i = 0; i < 16; i++) { | 1013 | for (i = 0; i < 16; i++) { |
1014 | int ioapic, pin; | ||
1015 | unsigned int dstapic; | ||
989 | int idx; | 1016 | int idx; |
1017 | u32 gsi; | ||
1018 | |||
1019 | /* Locate the gsi that irq i maps to. */ | ||
1020 | if (acpi_isa_irq_to_gsi(i, &gsi)) | ||
1021 | continue; | ||
1022 | |||
1023 | /* | ||
1024 | * Locate the IOAPIC that manages the ISA IRQ. | ||
1025 | */ | ||
1026 | ioapic = mp_find_ioapic(gsi); | ||
1027 | if (ioapic < 0) | ||
1028 | continue; | ||
1029 | pin = mp_find_ioapic_pin(ioapic, gsi); | ||
1030 | dstapic = mp_ioapics[ioapic].apicid; | ||
990 | 1031 | ||
991 | for (idx = 0; idx < mp_irq_entries; idx++) { | 1032 | for (idx = 0; idx < mp_irq_entries; idx++) { |
992 | struct mpc_intsrc *irq = mp_irqs + idx; | 1033 | struct mpc_intsrc *irq = mp_irqs + idx; |
@@ -996,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void) | |||
996 | break; | 1037 | break; |
997 | 1038 | ||
998 | /* Do we already have a mapping for this IOAPIC pin */ | 1039 | /* Do we already have a mapping for this IOAPIC pin */ |
999 | if (irq->dstapic == dstapic && irq->dstirq == i) | 1040 | if (irq->dstapic == dstapic && irq->dstirq == pin) |
1000 | break; | 1041 | break; |
1001 | } | 1042 | } |
1002 | 1043 | ||
@@ -1011,7 +1052,7 @@ void __init mp_config_acpi_legacy_irqs(void) | |||
1011 | mp_irq.dstapic = dstapic; | 1052 | mp_irq.dstapic = dstapic; |
1012 | mp_irq.irqtype = mp_INT; | 1053 | mp_irq.irqtype = mp_INT; |
1013 | mp_irq.srcbusirq = i; /* Identity mapped */ | 1054 | mp_irq.srcbusirq = i; /* Identity mapped */ |
1014 | mp_irq.dstirq = i; | 1055 | mp_irq.dstirq = pin; |
1015 | 1056 | ||
1016 | save_mp_irq(&mp_irq); | 1057 | save_mp_irq(&mp_irq); |
1017 | } | 1058 | } |
@@ -1076,11 +1117,6 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | |||
1076 | 1117 | ||
1077 | ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); | 1118 | ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); |
1078 | 1119 | ||
1079 | #ifdef CONFIG_X86_32 | ||
1080 | if (ioapic_renumber_irq) | ||
1081 | gsi = ioapic_renumber_irq(ioapic, gsi); | ||
1082 | #endif | ||
1083 | |||
1084 | if (ioapic_pin > MP_MAX_IOAPIC_PIN) { | 1120 | if (ioapic_pin > MP_MAX_IOAPIC_PIN) { |
1085 | printk(KERN_ERR "Invalid reference to IOAPIC pin " | 1121 | printk(KERN_ERR "Invalid reference to IOAPIC pin " |
1086 | "%d-%d\n", mp_ioapics[ioapic].apicid, | 1122 | "%d-%d\n", mp_ioapics[ioapic].apicid, |
@@ -1094,7 +1130,7 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | |||
1094 | set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, | 1130 | set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, |
1095 | trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, | 1131 | trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, |
1096 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); | 1132 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); |
1097 | io_apic_set_pci_routing(dev, gsi, &irq_attr); | 1133 | io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr); |
1098 | 1134 | ||
1099 | return gsi; | 1135 | return gsi; |
1100 | } | 1136 | } |
@@ -1154,7 +1190,8 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
1154 | * pretend we got one so we can set the SCI flags. | 1190 | * pretend we got one so we can set the SCI flags. |
1155 | */ | 1191 | */ |
1156 | if (!acpi_sci_override_gsi) | 1192 | if (!acpi_sci_override_gsi) |
1157 | acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0); | 1193 | acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0, |
1194 | acpi_gbl_FADT.sci_interrupt); | ||
1158 | 1195 | ||
1159 | /* Fill in identity legacy mappings where no override */ | 1196 | /* Fill in identity legacy mappings where no override */ |
1160 | mp_config_acpi_legacy_irqs(); | 1197 | mp_config_acpi_legacy_irqs(); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 1a160d5d44d0..70237732a6c7 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -194,7 +194,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) | |||
194 | } | 194 | } |
195 | 195 | ||
196 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 196 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
197 | extern u8 *__smp_locks[], *__smp_locks_end[]; | 197 | extern s32 __smp_locks[], __smp_locks_end[]; |
198 | static void *text_poke_early(void *addr, const void *opcode, size_t len); | 198 | static void *text_poke_early(void *addr, const void *opcode, size_t len); |
199 | 199 | ||
200 | /* Replace instructions with better alternatives for this CPU type. | 200 | /* Replace instructions with better alternatives for this CPU type. |
@@ -235,37 +235,41 @@ void __init_or_module apply_alternatives(struct alt_instr *start, | |||
235 | 235 | ||
236 | #ifdef CONFIG_SMP | 236 | #ifdef CONFIG_SMP |
237 | 237 | ||
238 | static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | 238 | static void alternatives_smp_lock(const s32 *start, const s32 *end, |
239 | u8 *text, u8 *text_end) | ||
239 | { | 240 | { |
240 | u8 **ptr; | 241 | const s32 *poff; |
241 | 242 | ||
242 | mutex_lock(&text_mutex); | 243 | mutex_lock(&text_mutex); |
243 | for (ptr = start; ptr < end; ptr++) { | 244 | for (poff = start; poff < end; poff++) { |
244 | if (*ptr < text) | 245 | u8 *ptr = (u8 *)poff + *poff; |
245 | continue; | 246 | |
246 | if (*ptr > text_end) | 247 | if (!*poff || ptr < text || ptr >= text_end) |
247 | continue; | 248 | continue; |
248 | /* turn DS segment override prefix into lock prefix */ | 249 | /* turn DS segment override prefix into lock prefix */ |
249 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); | 250 | if (*ptr == 0x3e) |
251 | text_poke(ptr, ((unsigned char []){0xf0}), 1); | ||
250 | }; | 252 | }; |
251 | mutex_unlock(&text_mutex); | 253 | mutex_unlock(&text_mutex); |
252 | } | 254 | } |
253 | 255 | ||
254 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) | 256 | static void alternatives_smp_unlock(const s32 *start, const s32 *end, |
257 | u8 *text, u8 *text_end) | ||
255 | { | 258 | { |
256 | u8 **ptr; | 259 | const s32 *poff; |
257 | 260 | ||
258 | if (noreplace_smp) | 261 | if (noreplace_smp) |
259 | return; | 262 | return; |
260 | 263 | ||
261 | mutex_lock(&text_mutex); | 264 | mutex_lock(&text_mutex); |
262 | for (ptr = start; ptr < end; ptr++) { | 265 | for (poff = start; poff < end; poff++) { |
263 | if (*ptr < text) | 266 | u8 *ptr = (u8 *)poff + *poff; |
264 | continue; | 267 | |
265 | if (*ptr > text_end) | 268 | if (!*poff || ptr < text || ptr >= text_end) |
266 | continue; | 269 | continue; |
267 | /* turn lock prefix into DS segment override prefix */ | 270 | /* turn lock prefix into DS segment override prefix */ |
268 | text_poke(*ptr, ((unsigned char []){0x3E}), 1); | 271 | if (*ptr == 0xf0) |
272 | text_poke(ptr, ((unsigned char []){0x3E}), 1); | ||
269 | }; | 273 | }; |
270 | mutex_unlock(&text_mutex); | 274 | mutex_unlock(&text_mutex); |
271 | } | 275 | } |
@@ -276,8 +280,8 @@ struct smp_alt_module { | |||
276 | char *name; | 280 | char *name; |
277 | 281 | ||
278 | /* ptrs to lock prefixes */ | 282 | /* ptrs to lock prefixes */ |
279 | u8 **locks; | 283 | const s32 *locks; |
280 | u8 **locks_end; | 284 | const s32 *locks_end; |
281 | 285 | ||
282 | /* .text segment, needed to avoid patching init code ;) */ | 286 | /* .text segment, needed to avoid patching init code ;) */ |
283 | u8 *text; | 287 | u8 *text; |
@@ -398,16 +402,19 @@ void alternatives_smp_switch(int smp) | |||
398 | int alternatives_text_reserved(void *start, void *end) | 402 | int alternatives_text_reserved(void *start, void *end) |
399 | { | 403 | { |
400 | struct smp_alt_module *mod; | 404 | struct smp_alt_module *mod; |
401 | u8 **ptr; | 405 | const s32 *poff; |
402 | u8 *text_start = start; | 406 | u8 *text_start = start; |
403 | u8 *text_end = end; | 407 | u8 *text_end = end; |
404 | 408 | ||
405 | list_for_each_entry(mod, &smp_alt_modules, next) { | 409 | list_for_each_entry(mod, &smp_alt_modules, next) { |
406 | if (mod->text > text_end || mod->text_end < text_start) | 410 | if (mod->text > text_end || mod->text_end < text_start) |
407 | continue; | 411 | continue; |
408 | for (ptr = mod->locks; ptr < mod->locks_end; ptr++) | 412 | for (poff = mod->locks; poff < mod->locks_end; poff++) { |
409 | if (text_start <= *ptr && text_end >= *ptr) | 413 | const u8 *ptr = (const u8 *)poff + *poff; |
414 | |||
415 | if (text_start <= ptr && text_end > ptr) | ||
410 | return 1; | 416 | return 1; |
417 | } | ||
411 | } | 418 | } |
412 | 419 | ||
413 | return 0; | 420 | return 0; |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f854d89b7edf..fa5a1474cd18 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -731,18 +731,22 @@ static bool increase_address_space(struct protection_domain *domain, | |||
731 | 731 | ||
732 | static u64 *alloc_pte(struct protection_domain *domain, | 732 | static u64 *alloc_pte(struct protection_domain *domain, |
733 | unsigned long address, | 733 | unsigned long address, |
734 | int end_lvl, | 734 | unsigned long page_size, |
735 | u64 **pte_page, | 735 | u64 **pte_page, |
736 | gfp_t gfp) | 736 | gfp_t gfp) |
737 | { | 737 | { |
738 | int level, end_lvl; | ||
738 | u64 *pte, *page; | 739 | u64 *pte, *page; |
739 | int level; | 740 | |
741 | BUG_ON(!is_power_of_2(page_size)); | ||
740 | 742 | ||
741 | while (address > PM_LEVEL_SIZE(domain->mode)) | 743 | while (address > PM_LEVEL_SIZE(domain->mode)) |
742 | increase_address_space(domain, gfp); | 744 | increase_address_space(domain, gfp); |
743 | 745 | ||
744 | level = domain->mode - 1; | 746 | level = domain->mode - 1; |
745 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 747 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
748 | address = PAGE_SIZE_ALIGN(address, page_size); | ||
749 | end_lvl = PAGE_SIZE_LEVEL(page_size); | ||
746 | 750 | ||
747 | while (level > end_lvl) { | 751 | while (level > end_lvl) { |
748 | if (!IOMMU_PTE_PRESENT(*pte)) { | 752 | if (!IOMMU_PTE_PRESENT(*pte)) { |
@@ -752,6 +756,10 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
752 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | 756 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); |
753 | } | 757 | } |
754 | 758 | ||
759 | /* No level skipping support yet */ | ||
760 | if (PM_PTE_LEVEL(*pte) != level) | ||
761 | return NULL; | ||
762 | |||
755 | level -= 1; | 763 | level -= 1; |
756 | 764 | ||
757 | pte = IOMMU_PTE_PAGE(*pte); | 765 | pte = IOMMU_PTE_PAGE(*pte); |
@@ -769,28 +777,47 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
769 | * This function checks if there is a PTE for a given dma address. If | 777 | * This function checks if there is a PTE for a given dma address. If |
770 | * there is one, it returns the pointer to it. | 778 | * there is one, it returns the pointer to it. |
771 | */ | 779 | */ |
772 | static u64 *fetch_pte(struct protection_domain *domain, | 780 | static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) |
773 | unsigned long address, int map_size) | ||
774 | { | 781 | { |
775 | int level; | 782 | int level; |
776 | u64 *pte; | 783 | u64 *pte; |
777 | 784 | ||
778 | level = domain->mode - 1; | 785 | if (address > PM_LEVEL_SIZE(domain->mode)) |
779 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 786 | return NULL; |
787 | |||
788 | level = domain->mode - 1; | ||
789 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
780 | 790 | ||
781 | while (level > map_size) { | 791 | while (level > 0) { |
792 | |||
793 | /* Not Present */ | ||
782 | if (!IOMMU_PTE_PRESENT(*pte)) | 794 | if (!IOMMU_PTE_PRESENT(*pte)) |
783 | return NULL; | 795 | return NULL; |
784 | 796 | ||
797 | /* Large PTE */ | ||
798 | if (PM_PTE_LEVEL(*pte) == 0x07) { | ||
799 | unsigned long pte_mask, __pte; | ||
800 | |||
801 | /* | ||
802 | * If we have a series of large PTEs, make | ||
803 | * sure to return a pointer to the first one. | ||
804 | */ | ||
805 | pte_mask = PTE_PAGE_SIZE(*pte); | ||
806 | pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); | ||
807 | __pte = ((unsigned long)pte) & pte_mask; | ||
808 | |||
809 | return (u64 *)__pte; | ||
810 | } | ||
811 | |||
812 | /* No level skipping support yet */ | ||
813 | if (PM_PTE_LEVEL(*pte) != level) | ||
814 | return NULL; | ||
815 | |||
785 | level -= 1; | 816 | level -= 1; |
786 | 817 | ||
818 | /* Walk to the next level */ | ||
787 | pte = IOMMU_PTE_PAGE(*pte); | 819 | pte = IOMMU_PTE_PAGE(*pte); |
788 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | 820 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
789 | |||
790 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { | ||
791 | pte = NULL; | ||
792 | break; | ||
793 | } | ||
794 | } | 821 | } |
795 | 822 | ||
796 | return pte; | 823 | return pte; |
@@ -807,44 +834,84 @@ static int iommu_map_page(struct protection_domain *dom, | |||
807 | unsigned long bus_addr, | 834 | unsigned long bus_addr, |
808 | unsigned long phys_addr, | 835 | unsigned long phys_addr, |
809 | int prot, | 836 | int prot, |
810 | int map_size) | 837 | unsigned long page_size) |
811 | { | 838 | { |
812 | u64 __pte, *pte; | 839 | u64 __pte, *pte; |
813 | 840 | int i, count; | |
814 | bus_addr = PAGE_ALIGN(bus_addr); | ||
815 | phys_addr = PAGE_ALIGN(phys_addr); | ||
816 | |||
817 | BUG_ON(!PM_ALIGNED(map_size, bus_addr)); | ||
818 | BUG_ON(!PM_ALIGNED(map_size, phys_addr)); | ||
819 | 841 | ||
820 | if (!(prot & IOMMU_PROT_MASK)) | 842 | if (!(prot & IOMMU_PROT_MASK)) |
821 | return -EINVAL; | 843 | return -EINVAL; |
822 | 844 | ||
823 | pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); | 845 | bus_addr = PAGE_ALIGN(bus_addr); |
846 | phys_addr = PAGE_ALIGN(phys_addr); | ||
847 | count = PAGE_SIZE_PTE_COUNT(page_size); | ||
848 | pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); | ||
849 | |||
850 | for (i = 0; i < count; ++i) | ||
851 | if (IOMMU_PTE_PRESENT(pte[i])) | ||
852 | return -EBUSY; | ||
824 | 853 | ||
825 | if (IOMMU_PTE_PRESENT(*pte)) | 854 | if (page_size > PAGE_SIZE) { |
826 | return -EBUSY; | 855 | __pte = PAGE_SIZE_PTE(phys_addr, page_size); |
856 | __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; | ||
857 | } else | ||
858 | __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC; | ||
827 | 859 | ||
828 | __pte = phys_addr | IOMMU_PTE_P; | ||
829 | if (prot & IOMMU_PROT_IR) | 860 | if (prot & IOMMU_PROT_IR) |
830 | __pte |= IOMMU_PTE_IR; | 861 | __pte |= IOMMU_PTE_IR; |
831 | if (prot & IOMMU_PROT_IW) | 862 | if (prot & IOMMU_PROT_IW) |
832 | __pte |= IOMMU_PTE_IW; | 863 | __pte |= IOMMU_PTE_IW; |
833 | 864 | ||
834 | *pte = __pte; | 865 | for (i = 0; i < count; ++i) |
866 | pte[i] = __pte; | ||
835 | 867 | ||
836 | update_domain(dom); | 868 | update_domain(dom); |
837 | 869 | ||
838 | return 0; | 870 | return 0; |
839 | } | 871 | } |
840 | 872 | ||
841 | static void iommu_unmap_page(struct protection_domain *dom, | 873 | static unsigned long iommu_unmap_page(struct protection_domain *dom, |
842 | unsigned long bus_addr, int map_size) | 874 | unsigned long bus_addr, |
875 | unsigned long page_size) | ||
843 | { | 876 | { |
844 | u64 *pte = fetch_pte(dom, bus_addr, map_size); | 877 | unsigned long long unmap_size, unmapped; |
878 | u64 *pte; | ||
879 | |||
880 | BUG_ON(!is_power_of_2(page_size)); | ||
881 | |||
882 | unmapped = 0; | ||
845 | 883 | ||
846 | if (pte) | 884 | while (unmapped < page_size) { |
847 | *pte = 0; | 885 | |
886 | pte = fetch_pte(dom, bus_addr); | ||
887 | |||
888 | if (!pte) { | ||
889 | /* | ||
890 | * No PTE for this address | ||
891 | * move forward in 4kb steps | ||
892 | */ | ||
893 | unmap_size = PAGE_SIZE; | ||
894 | } else if (PM_PTE_LEVEL(*pte) == 0) { | ||
895 | /* 4kb PTE found for this address */ | ||
896 | unmap_size = PAGE_SIZE; | ||
897 | *pte = 0ULL; | ||
898 | } else { | ||
899 | int count, i; | ||
900 | |||
901 | /* Large PTE found which maps this address */ | ||
902 | unmap_size = PTE_PAGE_SIZE(*pte); | ||
903 | count = PAGE_SIZE_PTE_COUNT(unmap_size); | ||
904 | for (i = 0; i < count; i++) | ||
905 | pte[i] = 0ULL; | ||
906 | } | ||
907 | |||
908 | bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; | ||
909 | unmapped += unmap_size; | ||
910 | } | ||
911 | |||
912 | BUG_ON(!is_power_of_2(unmapped)); | ||
913 | |||
914 | return unmapped; | ||
848 | } | 915 | } |
849 | 916 | ||
850 | /* | 917 | /* |
@@ -878,7 +945,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
878 | for (addr = e->address_start; addr < e->address_end; | 945 | for (addr = e->address_start; addr < e->address_end; |
879 | addr += PAGE_SIZE) { | 946 | addr += PAGE_SIZE) { |
880 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, | 947 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, |
881 | PM_MAP_4k); | 948 | PAGE_SIZE); |
882 | if (ret) | 949 | if (ret) |
883 | return ret; | 950 | return ret; |
884 | /* | 951 | /* |
@@ -1006,7 +1073,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1006 | u64 *pte, *pte_page; | 1073 | u64 *pte, *pte_page; |
1007 | 1074 | ||
1008 | for (i = 0; i < num_ptes; ++i) { | 1075 | for (i = 0; i < num_ptes; ++i) { |
1009 | pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, | 1076 | pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, |
1010 | &pte_page, gfp); | 1077 | &pte_page, gfp); |
1011 | if (!pte) | 1078 | if (!pte) |
1012 | goto out_free; | 1079 | goto out_free; |
@@ -1042,7 +1109,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1042 | for (i = dma_dom->aperture[index]->offset; | 1109 | for (i = dma_dom->aperture[index]->offset; |
1043 | i < dma_dom->aperture_size; | 1110 | i < dma_dom->aperture_size; |
1044 | i += PAGE_SIZE) { | 1111 | i += PAGE_SIZE) { |
1045 | u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k); | 1112 | u64 *pte = fetch_pte(&dma_dom->domain, i); |
1046 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 1113 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
1047 | continue; | 1114 | continue; |
1048 | 1115 | ||
@@ -1712,7 +1779,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
1712 | 1779 | ||
1713 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; | 1780 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; |
1714 | if (!pte) { | 1781 | if (!pte) { |
1715 | pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, | 1782 | pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, |
1716 | GFP_ATOMIC); | 1783 | GFP_ATOMIC); |
1717 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; | 1784 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; |
1718 | } else | 1785 | } else |
@@ -2439,12 +2506,11 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, | |||
2439 | return ret; | 2506 | return ret; |
2440 | } | 2507 | } |
2441 | 2508 | ||
2442 | static int amd_iommu_map_range(struct iommu_domain *dom, | 2509 | static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, |
2443 | unsigned long iova, phys_addr_t paddr, | 2510 | phys_addr_t paddr, int gfp_order, int iommu_prot) |
2444 | size_t size, int iommu_prot) | ||
2445 | { | 2511 | { |
2512 | unsigned long page_size = 0x1000UL << gfp_order; | ||
2446 | struct protection_domain *domain = dom->priv; | 2513 | struct protection_domain *domain = dom->priv; |
2447 | unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE); | ||
2448 | int prot = 0; | 2514 | int prot = 0; |
2449 | int ret; | 2515 | int ret; |
2450 | 2516 | ||
@@ -2453,61 +2519,50 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2453 | if (iommu_prot & IOMMU_WRITE) | 2519 | if (iommu_prot & IOMMU_WRITE) |
2454 | prot |= IOMMU_PROT_IW; | 2520 | prot |= IOMMU_PROT_IW; |
2455 | 2521 | ||
2456 | iova &= PAGE_MASK; | ||
2457 | paddr &= PAGE_MASK; | ||
2458 | |||
2459 | mutex_lock(&domain->api_lock); | 2522 | mutex_lock(&domain->api_lock); |
2460 | 2523 | ret = iommu_map_page(domain, iova, paddr, prot, page_size); | |
2461 | for (i = 0; i < npages; ++i) { | ||
2462 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); | ||
2463 | if (ret) | ||
2464 | return ret; | ||
2465 | |||
2466 | iova += PAGE_SIZE; | ||
2467 | paddr += PAGE_SIZE; | ||
2468 | } | ||
2469 | |||
2470 | mutex_unlock(&domain->api_lock); | 2524 | mutex_unlock(&domain->api_lock); |
2471 | 2525 | ||
2472 | return 0; | 2526 | return ret; |
2473 | } | 2527 | } |
2474 | 2528 | ||
2475 | static void amd_iommu_unmap_range(struct iommu_domain *dom, | 2529 | static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, |
2476 | unsigned long iova, size_t size) | 2530 | int gfp_order) |
2477 | { | 2531 | { |
2478 | |||
2479 | struct protection_domain *domain = dom->priv; | 2532 | struct protection_domain *domain = dom->priv; |
2480 | unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE); | 2533 | unsigned long page_size, unmap_size; |
2481 | 2534 | ||
2482 | iova &= PAGE_MASK; | 2535 | page_size = 0x1000UL << gfp_order; |
2483 | 2536 | ||
2484 | mutex_lock(&domain->api_lock); | 2537 | mutex_lock(&domain->api_lock); |
2485 | 2538 | unmap_size = iommu_unmap_page(domain, iova, page_size); | |
2486 | for (i = 0; i < npages; ++i) { | 2539 | mutex_unlock(&domain->api_lock); |
2487 | iommu_unmap_page(domain, iova, PM_MAP_4k); | ||
2488 | iova += PAGE_SIZE; | ||
2489 | } | ||
2490 | 2540 | ||
2491 | iommu_flush_tlb_pde(domain); | 2541 | iommu_flush_tlb_pde(domain); |
2492 | 2542 | ||
2493 | mutex_unlock(&domain->api_lock); | 2543 | return get_order(unmap_size); |
2494 | } | 2544 | } |
2495 | 2545 | ||
2496 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 2546 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
2497 | unsigned long iova) | 2547 | unsigned long iova) |
2498 | { | 2548 | { |
2499 | struct protection_domain *domain = dom->priv; | 2549 | struct protection_domain *domain = dom->priv; |
2500 | unsigned long offset = iova & ~PAGE_MASK; | 2550 | unsigned long offset_mask; |
2501 | phys_addr_t paddr; | 2551 | phys_addr_t paddr; |
2502 | u64 *pte; | 2552 | u64 *pte, __pte; |
2503 | 2553 | ||
2504 | pte = fetch_pte(domain, iova, PM_MAP_4k); | 2554 | pte = fetch_pte(domain, iova); |
2505 | 2555 | ||
2506 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 2556 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
2507 | return 0; | 2557 | return 0; |
2508 | 2558 | ||
2509 | paddr = *pte & IOMMU_PAGE_MASK; | 2559 | if (PM_PTE_LEVEL(*pte) == 0) |
2510 | paddr |= offset; | 2560 | offset_mask = PAGE_SIZE - 1; |
2561 | else | ||
2562 | offset_mask = PTE_PAGE_SIZE(*pte) - 1; | ||
2563 | |||
2564 | __pte = *pte & PM_ADDR_MASK; | ||
2565 | paddr = (__pte & ~offset_mask) | (iova & offset_mask); | ||
2511 | 2566 | ||
2512 | return paddr; | 2567 | return paddr; |
2513 | } | 2568 | } |
@@ -2523,8 +2578,8 @@ static struct iommu_ops amd_iommu_ops = { | |||
2523 | .domain_destroy = amd_iommu_domain_destroy, | 2578 | .domain_destroy = amd_iommu_domain_destroy, |
2524 | .attach_dev = amd_iommu_attach_device, | 2579 | .attach_dev = amd_iommu_attach_device, |
2525 | .detach_dev = amd_iommu_detach_device, | 2580 | .detach_dev = amd_iommu_detach_device, |
2526 | .map = amd_iommu_map_range, | 2581 | .map = amd_iommu_map, |
2527 | .unmap = amd_iommu_unmap_range, | 2582 | .unmap = amd_iommu_unmap, |
2528 | .iova_to_phys = amd_iommu_iova_to_phys, | 2583 | .iova_to_phys = amd_iommu_iova_to_phys, |
2529 | .domain_has_cap = amd_iommu_domain_has_cap, | 2584 | .domain_has_cap = amd_iommu_domain_has_cap, |
2530 | }; | 2585 | }; |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 6360abf993d4..3bacb4d0844c 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -120,6 +120,7 @@ struct ivmd_header { | |||
120 | bool amd_iommu_dump; | 120 | bool amd_iommu_dump; |
121 | 121 | ||
122 | static int __initdata amd_iommu_detected; | 122 | static int __initdata amd_iommu_detected; |
123 | static bool __initdata amd_iommu_disabled; | ||
123 | 124 | ||
124 | u16 amd_iommu_last_bdf; /* largest PCI device id we have | 125 | u16 amd_iommu_last_bdf; /* largest PCI device id we have |
125 | to handle */ | 126 | to handle */ |
@@ -1372,6 +1373,9 @@ void __init amd_iommu_detect(void) | |||
1372 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) | 1373 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) |
1373 | return; | 1374 | return; |
1374 | 1375 | ||
1376 | if (amd_iommu_disabled) | ||
1377 | return; | ||
1378 | |||
1375 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { | 1379 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { |
1376 | iommu_detected = 1; | 1380 | iommu_detected = 1; |
1377 | amd_iommu_detected = 1; | 1381 | amd_iommu_detected = 1; |
@@ -1401,6 +1405,8 @@ static int __init parse_amd_iommu_options(char *str) | |||
1401 | for (; *str; ++str) { | 1405 | for (; *str; ++str) { |
1402 | if (strncmp(str, "fullflush", 9) == 0) | 1406 | if (strncmp(str, "fullflush", 9) == 0) |
1403 | amd_iommu_unmap_flush = true; | 1407 | amd_iommu_unmap_flush = true; |
1408 | if (strncmp(str, "off", 3) == 0) | ||
1409 | amd_iommu_disabled = true; | ||
1404 | } | 1410 | } |
1405 | 1411 | ||
1406 | return 1; | 1412 | return 1; |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 03ba1b895f5e..425e53a87feb 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -131,24 +131,6 @@ int es7000_plat; | |||
131 | 131 | ||
132 | static unsigned int base; | 132 | static unsigned int base; |
133 | 133 | ||
134 | static int | ||
135 | es7000_rename_gsi(int ioapic, int gsi) | ||
136 | { | ||
137 | if (es7000_plat == ES7000_ZORRO) | ||
138 | return gsi; | ||
139 | |||
140 | if (!base) { | ||
141 | int i; | ||
142 | for (i = 0; i < nr_ioapics; i++) | ||
143 | base += nr_ioapic_registers[i]; | ||
144 | } | ||
145 | |||
146 | if (!ioapic && (gsi < 16)) | ||
147 | gsi += base; | ||
148 | |||
149 | return gsi; | ||
150 | } | ||
151 | |||
152 | static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) | 134 | static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) |
153 | { | 135 | { |
154 | unsigned long vect = 0, psaival = 0; | 136 | unsigned long vect = 0, psaival = 0; |
@@ -190,7 +172,6 @@ static void setup_unisys(void) | |||
190 | es7000_plat = ES7000_ZORRO; | 172 | es7000_plat = ES7000_ZORRO; |
191 | else | 173 | else |
192 | es7000_plat = ES7000_CLASSIC; | 174 | es7000_plat = ES7000_CLASSIC; |
193 | ioapic_renumber_irq = es7000_rename_gsi; | ||
194 | } | 175 | } |
195 | 176 | ||
196 | /* | 177 | /* |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index eb2789c3f721..33f3563a2a52 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -89,6 +89,9 @@ int nr_ioapics; | |||
89 | /* IO APIC gsi routing info */ | 89 | /* IO APIC gsi routing info */ |
90 | struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; | 90 | struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; |
91 | 91 | ||
92 | /* The last gsi number used */ | ||
93 | u32 gsi_end; | ||
94 | |||
92 | /* MP IRQ source entries */ | 95 | /* MP IRQ source entries */ |
93 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; | 96 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; |
94 | 97 | ||
@@ -1013,10 +1016,9 @@ static inline int irq_trigger(int idx) | |||
1013 | return MPBIOS_trigger(idx); | 1016 | return MPBIOS_trigger(idx); |
1014 | } | 1017 | } |
1015 | 1018 | ||
1016 | int (*ioapic_renumber_irq)(int ioapic, int irq); | ||
1017 | static int pin_2_irq(int idx, int apic, int pin) | 1019 | static int pin_2_irq(int idx, int apic, int pin) |
1018 | { | 1020 | { |
1019 | int irq, i; | 1021 | int irq; |
1020 | int bus = mp_irqs[idx].srcbus; | 1022 | int bus = mp_irqs[idx].srcbus; |
1021 | 1023 | ||
1022 | /* | 1024 | /* |
@@ -1028,18 +1030,12 @@ static int pin_2_irq(int idx, int apic, int pin) | |||
1028 | if (test_bit(bus, mp_bus_not_pci)) { | 1030 | if (test_bit(bus, mp_bus_not_pci)) { |
1029 | irq = mp_irqs[idx].srcbusirq; | 1031 | irq = mp_irqs[idx].srcbusirq; |
1030 | } else { | 1032 | } else { |
1031 | /* | 1033 | u32 gsi = mp_gsi_routing[apic].gsi_base + pin; |
1032 | * PCI IRQs are mapped in order | 1034 | |
1033 | */ | 1035 | if (gsi >= NR_IRQS_LEGACY) |
1034 | i = irq = 0; | 1036 | irq = gsi; |
1035 | while (i < apic) | 1037 | else |
1036 | irq += nr_ioapic_registers[i++]; | 1038 | irq = gsi_end + 1 + gsi; |
1037 | irq += pin; | ||
1038 | /* | ||
1039 | * For MPS mode, so far only needed by ES7000 platform | ||
1040 | */ | ||
1041 | if (ioapic_renumber_irq) | ||
1042 | irq = ioapic_renumber_irq(apic, irq); | ||
1043 | } | 1039 | } |
1044 | 1040 | ||
1045 | #ifdef CONFIG_X86_32 | 1041 | #ifdef CONFIG_X86_32 |
@@ -1950,20 +1946,8 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | |||
1950 | 1946 | ||
1951 | void __init enable_IO_APIC(void) | 1947 | void __init enable_IO_APIC(void) |
1952 | { | 1948 | { |
1953 | union IO_APIC_reg_01 reg_01; | ||
1954 | int i8259_apic, i8259_pin; | 1949 | int i8259_apic, i8259_pin; |
1955 | int apic; | 1950 | int apic; |
1956 | unsigned long flags; | ||
1957 | |||
1958 | /* | ||
1959 | * The number of IO-APIC IRQ registers (== #pins): | ||
1960 | */ | ||
1961 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1962 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
1963 | reg_01.raw = io_apic_read(apic, 1); | ||
1964 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1965 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | ||
1966 | } | ||
1967 | 1951 | ||
1968 | if (!legacy_pic->nr_legacy_irqs) | 1952 | if (!legacy_pic->nr_legacy_irqs) |
1969 | return; | 1953 | return; |
@@ -3858,27 +3842,20 @@ int __init io_apic_get_redir_entries (int ioapic) | |||
3858 | reg_01.raw = io_apic_read(ioapic, 1); | 3842 | reg_01.raw = io_apic_read(ioapic, 1); |
3859 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 3843 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
3860 | 3844 | ||
3861 | return reg_01.bits.entries; | 3845 | /* The register returns the maximum index redir index |
3846 | * supported, which is one less than the total number of redir | ||
3847 | * entries. | ||
3848 | */ | ||
3849 | return reg_01.bits.entries + 1; | ||
3862 | } | 3850 | } |
3863 | 3851 | ||
3864 | void __init probe_nr_irqs_gsi(void) | 3852 | void __init probe_nr_irqs_gsi(void) |
3865 | { | 3853 | { |
3866 | int nr = 0; | 3854 | int nr; |
3867 | 3855 | ||
3868 | nr = acpi_probe_gsi(); | 3856 | nr = gsi_end + 1 + NR_IRQS_LEGACY; |
3869 | if (nr > nr_irqs_gsi) { | 3857 | if (nr > nr_irqs_gsi) |
3870 | nr_irqs_gsi = nr; | 3858 | nr_irqs_gsi = nr; |
3871 | } else { | ||
3872 | /* for acpi=off or acpi is not compiled in */ | ||
3873 | int idx; | ||
3874 | |||
3875 | nr = 0; | ||
3876 | for (idx = 0; idx < nr_ioapics; idx++) | ||
3877 | nr += io_apic_get_redir_entries(idx) + 1; | ||
3878 | |||
3879 | if (nr > nr_irqs_gsi) | ||
3880 | nr_irqs_gsi = nr; | ||
3881 | } | ||
3882 | 3859 | ||
3883 | printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); | 3860 | printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); |
3884 | } | 3861 | } |
@@ -4085,22 +4062,27 @@ int __init io_apic_get_version(int ioapic) | |||
4085 | return reg_01.bits.version; | 4062 | return reg_01.bits.version; |
4086 | } | 4063 | } |
4087 | 4064 | ||
4088 | int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | 4065 | int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) |
4089 | { | 4066 | { |
4090 | int i; | 4067 | int ioapic, pin, idx; |
4091 | 4068 | ||
4092 | if (skip_ioapic_setup) | 4069 | if (skip_ioapic_setup) |
4093 | return -1; | 4070 | return -1; |
4094 | 4071 | ||
4095 | for (i = 0; i < mp_irq_entries; i++) | 4072 | ioapic = mp_find_ioapic(gsi); |
4096 | if (mp_irqs[i].irqtype == mp_INT && | 4073 | if (ioapic < 0) |
4097 | mp_irqs[i].srcbusirq == bus_irq) | ||
4098 | break; | ||
4099 | if (i >= mp_irq_entries) | ||
4100 | return -1; | 4074 | return -1; |
4101 | 4075 | ||
4102 | *trigger = irq_trigger(i); | 4076 | pin = mp_find_ioapic_pin(ioapic, gsi); |
4103 | *polarity = irq_polarity(i); | 4077 | if (pin < 0) |
4078 | return -1; | ||
4079 | |||
4080 | idx = find_irq_entry(ioapic, pin, mp_INT); | ||
4081 | if (idx < 0) | ||
4082 | return -1; | ||
4083 | |||
4084 | *trigger = irq_trigger(idx); | ||
4085 | *polarity = irq_polarity(idx); | ||
4104 | return 0; | 4086 | return 0; |
4105 | } | 4087 | } |
4106 | 4088 | ||
@@ -4241,7 +4223,7 @@ void __init ioapic_insert_resources(void) | |||
4241 | } | 4223 | } |
4242 | } | 4224 | } |
4243 | 4225 | ||
4244 | int mp_find_ioapic(int gsi) | 4226 | int mp_find_ioapic(u32 gsi) |
4245 | { | 4227 | { |
4246 | int i = 0; | 4228 | int i = 0; |
4247 | 4229 | ||
@@ -4256,7 +4238,7 @@ int mp_find_ioapic(int gsi) | |||
4256 | return -1; | 4238 | return -1; |
4257 | } | 4239 | } |
4258 | 4240 | ||
4259 | int mp_find_ioapic_pin(int ioapic, int gsi) | 4241 | int mp_find_ioapic_pin(int ioapic, u32 gsi) |
4260 | { | 4242 | { |
4261 | if (WARN_ON(ioapic == -1)) | 4243 | if (WARN_ON(ioapic == -1)) |
4262 | return -1; | 4244 | return -1; |
@@ -4284,6 +4266,7 @@ static int bad_ioapic(unsigned long address) | |||
4284 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | 4266 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) |
4285 | { | 4267 | { |
4286 | int idx = 0; | 4268 | int idx = 0; |
4269 | int entries; | ||
4287 | 4270 | ||
4288 | if (bad_ioapic(address)) | 4271 | if (bad_ioapic(address)) |
4289 | return; | 4272 | return; |
@@ -4302,9 +4285,17 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
4302 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | 4285 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups |
4303 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | 4286 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). |
4304 | */ | 4287 | */ |
4288 | entries = io_apic_get_redir_entries(idx); | ||
4305 | mp_gsi_routing[idx].gsi_base = gsi_base; | 4289 | mp_gsi_routing[idx].gsi_base = gsi_base; |
4306 | mp_gsi_routing[idx].gsi_end = gsi_base + | 4290 | mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1; |
4307 | io_apic_get_redir_entries(idx); | 4291 | |
4292 | /* | ||
4293 | * The number of IO-APIC IRQ registers (== #pins): | ||
4294 | */ | ||
4295 | nr_ioapic_registers[idx] = entries; | ||
4296 | |||
4297 | if (mp_gsi_routing[idx].gsi_end > gsi_end) | ||
4298 | gsi_end = mp_gsi_routing[idx].gsi_end; | ||
4308 | 4299 | ||
4309 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | 4300 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " |
4310 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, | 4301 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index c085d52dbaf2..e46f98f36e31 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -735,9 +735,6 @@ void __init uv_system_init(void) | |||
735 | uv_node_to_blade[nid] = blade; | 735 | uv_node_to_blade[nid] = blade; |
736 | uv_cpu_to_blade[cpu] = blade; | 736 | uv_cpu_to_blade[cpu] = blade; |
737 | max_pnode = max(pnode, max_pnode); | 737 | max_pnode = max(pnode, max_pnode); |
738 | |||
739 | printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n", | ||
740 | cpu, apicid, pnode, nid, lcpu, blade); | ||
741 | } | 738 | } |
742 | 739 | ||
743 | /* Add blade/pnode info for nodes without cpus */ | 740 | /* Add blade/pnode info for nodes without cpus */ |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 031aa887b0eb..c4f9182ca3ac 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1224,7 +1224,7 @@ static void reinit_timer(void) | |||
1224 | #ifdef INIT_TIMER_AFTER_SUSPEND | 1224 | #ifdef INIT_TIMER_AFTER_SUSPEND |
1225 | unsigned long flags; | 1225 | unsigned long flags; |
1226 | 1226 | ||
1227 | spin_lock_irqsave(&i8253_lock, flags); | 1227 | raw_spin_lock_irqsave(&i8253_lock, flags); |
1228 | /* set the clock to HZ */ | 1228 | /* set the clock to HZ */ |
1229 | outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ | 1229 | outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ |
1230 | udelay(10); | 1230 | udelay(10); |
@@ -1232,7 +1232,7 @@ static void reinit_timer(void) | |||
1232 | udelay(10); | 1232 | udelay(10); |
1233 | outb_pit(LATCH >> 8, PIT_CH0); /* MSB */ | 1233 | outb_pit(LATCH >> 8, PIT_CH0); /* MSB */ |
1234 | udelay(10); | 1234 | udelay(10); |
1235 | spin_unlock_irqrestore(&i8253_lock, flags); | 1235 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
1236 | #endif | 1236 | #endif |
1237 | } | 1237 | } |
1238 | 1238 | ||
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index c202b62f3671..3a785da34b6f 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -14,7 +14,7 @@ CFLAGS_common.o := $(nostackp) | |||
14 | 14 | ||
15 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 15 | obj-y := intel_cacheinfo.o addon_cpuid_features.o |
16 | obj-y += proc.o capflags.o powerflags.o common.o | 16 | obj-y += proc.o capflags.o powerflags.o common.o |
17 | obj-y += vmware.o hypervisor.o sched.o | 17 | obj-y += vmware.o hypervisor.o sched.o mshyperv.o |
18 | 18 | ||
19 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o | 19 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o |
20 | obj-$(CONFIG_X86_64) += bugs_64.o | 20 | obj-$(CONFIG_X86_64) += bugs_64.o |
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 97ad79cdf688..10fa5684a662 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -30,12 +30,14 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
30 | const struct cpuid_bit *cb; | 30 | const struct cpuid_bit *cb; |
31 | 31 | ||
32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | 32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, | 33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, |
34 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, | 34 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, |
35 | { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a }, | 35 | { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006 }, |
36 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a }, | 36 | { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 }, |
37 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a }, | 37 | { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a }, |
38 | { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a }, | 38 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a }, |
39 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a }, | ||
40 | { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a }, | ||
39 | { 0, 0, 0, 0 } | 41 | { 0, 0, 0, 0 } |
40 | }; | 42 | }; |
41 | 43 | ||
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 01a265212395..c39576cb3018 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -86,7 +86,7 @@ static void __init check_fpu(void) | |||
86 | 86 | ||
87 | static void __init check_hlt(void) | 87 | static void __init check_hlt(void) |
88 | { | 88 | { |
89 | if (paravirt_enabled()) | 89 | if (boot_cpu_data.x86 >= 5 || paravirt_enabled()) |
90 | return; | 90 | return; |
91 | 91 | ||
92 | printk(KERN_INFO "Checking 'hlt' instruction... "); | 92 | printk(KERN_INFO "Checking 'hlt' instruction... "); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4868e4a951ee..c1c00d0b1692 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1243,10 +1243,7 @@ void __cpuinit cpu_init(void) | |||
1243 | /* | 1243 | /* |
1244 | * Force FPU initialization: | 1244 | * Force FPU initialization: |
1245 | */ | 1245 | */ |
1246 | if (cpu_has_xsave) | 1246 | current_thread_info()->status = 0; |
1247 | current_thread_info()->status = TS_XSAVE; | ||
1248 | else | ||
1249 | current_thread_info()->status = 0; | ||
1250 | clear_used_math(); | 1247 | clear_used_math(); |
1251 | mxcsr_feature_mask_init(); | 1248 | mxcsr_feature_mask_init(); |
1252 | 1249 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile index 1840c0a5170b..bd54bf67e6fb 100644 --- a/arch/x86/kernel/cpu/cpufreq/Makefile +++ b/arch/x86/kernel/cpu/cpufreq/Makefile | |||
@@ -2,8 +2,8 @@ | |||
2 | # K8 systems. ACPI is preferred to all other hardware-specific drivers. | 2 | # K8 systems. ACPI is preferred to all other hardware-specific drivers. |
3 | # speedstep-* is preferred over p4-clockmod. | 3 | # speedstep-* is preferred over p4-clockmod. |
4 | 4 | ||
5 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o | 5 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o |
6 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o | 6 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o |
7 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o | 7 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o |
8 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | 8 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o |
9 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | 9 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 459168083b77..1d3cddaa40ee 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/msr.h> | 46 | #include <asm/msr.h> |
47 | #include <asm/processor.h> | 47 | #include <asm/processor.h> |
48 | #include <asm/cpufeature.h> | 48 | #include <asm/cpufeature.h> |
49 | #include "mperf.h" | ||
49 | 50 | ||
50 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | 51 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ |
51 | "acpi-cpufreq", msg) | 52 | "acpi-cpufreq", msg) |
@@ -71,8 +72,6 @@ struct acpi_cpufreq_data { | |||
71 | 72 | ||
72 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); | 73 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); |
73 | 74 | ||
74 | static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); | ||
75 | |||
76 | /* acpi_perf_data is a pointer to percpu data. */ | 75 | /* acpi_perf_data is a pointer to percpu data. */ |
77 | static struct acpi_processor_performance *acpi_perf_data; | 76 | static struct acpi_processor_performance *acpi_perf_data; |
78 | 77 | ||
@@ -240,45 +239,6 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
240 | return cmd.val; | 239 | return cmd.val; |
241 | } | 240 | } |
242 | 241 | ||
243 | /* Called via smp_call_function_single(), on the target CPU */ | ||
244 | static void read_measured_perf_ctrs(void *_cur) | ||
245 | { | ||
246 | struct aperfmperf *am = _cur; | ||
247 | |||
248 | get_aperfmperf(am); | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Return the measured active (C0) frequency on this CPU since last call | ||
253 | * to this function. | ||
254 | * Input: cpu number | ||
255 | * Return: Average CPU frequency in terms of max frequency (zero on error) | ||
256 | * | ||
257 | * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance | ||
258 | * over a period of time, while CPU is in C0 state. | ||
259 | * IA32_MPERF counts at the rate of max advertised frequency | ||
260 | * IA32_APERF counts at the rate of actual CPU frequency | ||
261 | * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and | ||
262 | * no meaning should be associated with absolute values of these MSRs. | ||
263 | */ | ||
264 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, | ||
265 | unsigned int cpu) | ||
266 | { | ||
267 | struct aperfmperf perf; | ||
268 | unsigned long ratio; | ||
269 | unsigned int retval; | ||
270 | |||
271 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) | ||
272 | return 0; | ||
273 | |||
274 | ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); | ||
275 | per_cpu(acfreq_old_perf, cpu) = perf; | ||
276 | |||
277 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; | ||
278 | |||
279 | return retval; | ||
280 | } | ||
281 | |||
282 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 242 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
283 | { | 243 | { |
284 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); | 244 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); |
@@ -702,7 +662,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
702 | 662 | ||
703 | /* Check for APERF/MPERF support in hardware */ | 663 | /* Check for APERF/MPERF support in hardware */ |
704 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) | 664 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) |
705 | acpi_cpufreq_driver.getavg = get_measured_perf; | 665 | acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; |
706 | 666 | ||
707 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); | 667 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); |
708 | for (i = 0; i < perf->state_count; i++) | 668 | for (i = 0; i < perf->state_count; i++) |
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.c b/arch/x86/kernel/cpu/cpufreq/mperf.c new file mode 100644 index 000000000000..911e193018ae --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/mperf.c | |||
@@ -0,0 +1,51 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/smp.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/init.h> | ||
5 | #include <linux/cpufreq.h> | ||
6 | #include <linux/slab.h> | ||
7 | |||
8 | #include "mperf.h" | ||
9 | |||
10 | static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); | ||
11 | |||
12 | /* Called via smp_call_function_single(), on the target CPU */ | ||
13 | static void read_measured_perf_ctrs(void *_cur) | ||
14 | { | ||
15 | struct aperfmperf *am = _cur; | ||
16 | |||
17 | get_aperfmperf(am); | ||
18 | } | ||
19 | |||
20 | /* | ||
21 | * Return the measured active (C0) frequency on this CPU since last call | ||
22 | * to this function. | ||
23 | * Input: cpu number | ||
24 | * Return: Average CPU frequency in terms of max frequency (zero on error) | ||
25 | * | ||
26 | * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance | ||
27 | * over a period of time, while CPU is in C0 state. | ||
28 | * IA32_MPERF counts at the rate of max advertised frequency | ||
29 | * IA32_APERF counts at the rate of actual CPU frequency | ||
30 | * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and | ||
31 | * no meaning should be associated with absolute values of these MSRs. | ||
32 | */ | ||
33 | unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, | ||
34 | unsigned int cpu) | ||
35 | { | ||
36 | struct aperfmperf perf; | ||
37 | unsigned long ratio; | ||
38 | unsigned int retval; | ||
39 | |||
40 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) | ||
41 | return 0; | ||
42 | |||
43 | ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); | ||
44 | per_cpu(acfreq_old_perf, cpu) = perf; | ||
45 | |||
46 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; | ||
47 | |||
48 | return retval; | ||
49 | } | ||
50 | EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf); | ||
51 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.h b/arch/x86/kernel/cpu/cpufreq/mperf.h new file mode 100644 index 000000000000..5dbf2950dc22 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/mperf.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* | ||
2 | * (c) 2010 Advanced Micro Devices, Inc. | ||
3 | * Your use of this code is subject to the terms and conditions of the | ||
4 | * GNU general public license version 2. See "COPYING" or | ||
5 | * http://www.gnu.org/licenses/gpl.html | ||
6 | */ | ||
7 | |||
8 | unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, | ||
9 | unsigned int cpu); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index b6215b9798e2..6f3dc8fbbfdc 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1,6 +1,5 @@ | |||
1 | |||
2 | /* | 1 | /* |
3 | * (c) 2003-2006 Advanced Micro Devices, Inc. | 2 | * (c) 2003-2010 Advanced Micro Devices, Inc. |
4 | * Your use of this code is subject to the terms and conditions of the | 3 | * Your use of this code is subject to the terms and conditions of the |
5 | * GNU general public license version 2. See "COPYING" or | 4 | * GNU general public license version 2. See "COPYING" or |
6 | * http://www.gnu.org/licenses/gpl.html | 5 | * http://www.gnu.org/licenses/gpl.html |
@@ -46,6 +45,7 @@ | |||
46 | #define PFX "powernow-k8: " | 45 | #define PFX "powernow-k8: " |
47 | #define VERSION "version 2.20.00" | 46 | #define VERSION "version 2.20.00" |
48 | #include "powernow-k8.h" | 47 | #include "powernow-k8.h" |
48 | #include "mperf.h" | ||
49 | 49 | ||
50 | /* serialize freq changes */ | 50 | /* serialize freq changes */ |
51 | static DEFINE_MUTEX(fidvid_mutex); | 51 | static DEFINE_MUTEX(fidvid_mutex); |
@@ -54,6 +54,12 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); | |||
54 | 54 | ||
55 | static int cpu_family = CPU_OPTERON; | 55 | static int cpu_family = CPU_OPTERON; |
56 | 56 | ||
57 | /* core performance boost */ | ||
58 | static bool cpb_capable, cpb_enabled; | ||
59 | static struct msr __percpu *msrs; | ||
60 | |||
61 | static struct cpufreq_driver cpufreq_amd64_driver; | ||
62 | |||
57 | #ifndef CONFIG_SMP | 63 | #ifndef CONFIG_SMP |
58 | static inline const struct cpumask *cpu_core_mask(int cpu) | 64 | static inline const struct cpumask *cpu_core_mask(int cpu) |
59 | { | 65 | { |
@@ -1249,6 +1255,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1249 | struct powernow_k8_data *data; | 1255 | struct powernow_k8_data *data; |
1250 | struct init_on_cpu init_on_cpu; | 1256 | struct init_on_cpu init_on_cpu; |
1251 | int rc; | 1257 | int rc; |
1258 | struct cpuinfo_x86 *c = &cpu_data(pol->cpu); | ||
1252 | 1259 | ||
1253 | if (!cpu_online(pol->cpu)) | 1260 | if (!cpu_online(pol->cpu)) |
1254 | return -ENODEV; | 1261 | return -ENODEV; |
@@ -1323,6 +1330,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1323 | return -EINVAL; | 1330 | return -EINVAL; |
1324 | } | 1331 | } |
1325 | 1332 | ||
1333 | /* Check for APERF/MPERF support in hardware */ | ||
1334 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) | ||
1335 | cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf; | ||
1336 | |||
1326 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); | 1337 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); |
1327 | 1338 | ||
1328 | if (cpu_family == CPU_HW_PSTATE) | 1339 | if (cpu_family == CPU_HW_PSTATE) |
@@ -1394,8 +1405,77 @@ out: | |||
1394 | return khz; | 1405 | return khz; |
1395 | } | 1406 | } |
1396 | 1407 | ||
1408 | static void _cpb_toggle_msrs(bool t) | ||
1409 | { | ||
1410 | int cpu; | ||
1411 | |||
1412 | get_online_cpus(); | ||
1413 | |||
1414 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); | ||
1415 | |||
1416 | for_each_cpu(cpu, cpu_online_mask) { | ||
1417 | struct msr *reg = per_cpu_ptr(msrs, cpu); | ||
1418 | if (t) | ||
1419 | reg->l &= ~BIT(25); | ||
1420 | else | ||
1421 | reg->l |= BIT(25); | ||
1422 | } | ||
1423 | wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); | ||
1424 | |||
1425 | put_online_cpus(); | ||
1426 | } | ||
1427 | |||
1428 | /* | ||
1429 | * Switch on/off core performance boosting. | ||
1430 | * | ||
1431 | * 0=disable | ||
1432 | * 1=enable. | ||
1433 | */ | ||
1434 | static void cpb_toggle(bool t) | ||
1435 | { | ||
1436 | if (!cpb_capable) | ||
1437 | return; | ||
1438 | |||
1439 | if (t && !cpb_enabled) { | ||
1440 | cpb_enabled = true; | ||
1441 | _cpb_toggle_msrs(t); | ||
1442 | printk(KERN_INFO PFX "Core Boosting enabled.\n"); | ||
1443 | } else if (!t && cpb_enabled) { | ||
1444 | cpb_enabled = false; | ||
1445 | _cpb_toggle_msrs(t); | ||
1446 | printk(KERN_INFO PFX "Core Boosting disabled.\n"); | ||
1447 | } | ||
1448 | } | ||
1449 | |||
1450 | static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, | ||
1451 | size_t count) | ||
1452 | { | ||
1453 | int ret = -EINVAL; | ||
1454 | unsigned long val = 0; | ||
1455 | |||
1456 | ret = strict_strtoul(buf, 10, &val); | ||
1457 | if (!ret && (val == 0 || val == 1) && cpb_capable) | ||
1458 | cpb_toggle(val); | ||
1459 | else | ||
1460 | return -EINVAL; | ||
1461 | |||
1462 | return count; | ||
1463 | } | ||
1464 | |||
1465 | static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) | ||
1466 | { | ||
1467 | return sprintf(buf, "%u\n", cpb_enabled); | ||
1468 | } | ||
1469 | |||
1470 | #define define_one_rw(_name) \ | ||
1471 | static struct freq_attr _name = \ | ||
1472 | __ATTR(_name, 0644, show_##_name, store_##_name) | ||
1473 | |||
1474 | define_one_rw(cpb); | ||
1475 | |||
1397 | static struct freq_attr *powernow_k8_attr[] = { | 1476 | static struct freq_attr *powernow_k8_attr[] = { |
1398 | &cpufreq_freq_attr_scaling_available_freqs, | 1477 | &cpufreq_freq_attr_scaling_available_freqs, |
1478 | &cpb, | ||
1399 | NULL, | 1479 | NULL, |
1400 | }; | 1480 | }; |
1401 | 1481 | ||
@@ -1411,10 +1491,51 @@ static struct cpufreq_driver cpufreq_amd64_driver = { | |||
1411 | .attr = powernow_k8_attr, | 1491 | .attr = powernow_k8_attr, |
1412 | }; | 1492 | }; |
1413 | 1493 | ||
1494 | /* | ||
1495 | * Clear the boost-disable flag on the CPU_DOWN path so that this cpu | ||
1496 | * cannot block the remaining ones from boosting. On the CPU_UP path we | ||
1497 | * simply keep the boost-disable flag in sync with the current global | ||
1498 | * state. | ||
1499 | */ | ||
1500 | static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action, | ||
1501 | void *hcpu) | ||
1502 | { | ||
1503 | unsigned cpu = (long)hcpu; | ||
1504 | u32 lo, hi; | ||
1505 | |||
1506 | switch (action) { | ||
1507 | case CPU_UP_PREPARE: | ||
1508 | case CPU_UP_PREPARE_FROZEN: | ||
1509 | |||
1510 | if (!cpb_enabled) { | ||
1511 | rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); | ||
1512 | lo |= BIT(25); | ||
1513 | wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); | ||
1514 | } | ||
1515 | break; | ||
1516 | |||
1517 | case CPU_DOWN_PREPARE: | ||
1518 | case CPU_DOWN_PREPARE_FROZEN: | ||
1519 | rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); | ||
1520 | lo &= ~BIT(25); | ||
1521 | wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); | ||
1522 | break; | ||
1523 | |||
1524 | default: | ||
1525 | break; | ||
1526 | } | ||
1527 | |||
1528 | return NOTIFY_OK; | ||
1529 | } | ||
1530 | |||
1531 | static struct notifier_block __cpuinitdata cpb_nb = { | ||
1532 | .notifier_call = cpb_notify, | ||
1533 | }; | ||
1534 | |||
1414 | /* driver entry point for init */ | 1535 | /* driver entry point for init */ |
1415 | static int __cpuinit powernowk8_init(void) | 1536 | static int __cpuinit powernowk8_init(void) |
1416 | { | 1537 | { |
1417 | unsigned int i, supported_cpus = 0; | 1538 | unsigned int i, supported_cpus = 0, cpu; |
1418 | 1539 | ||
1419 | for_each_online_cpu(i) { | 1540 | for_each_online_cpu(i) { |
1420 | int rc; | 1541 | int rc; |
@@ -1423,15 +1544,36 @@ static int __cpuinit powernowk8_init(void) | |||
1423 | supported_cpus++; | 1544 | supported_cpus++; |
1424 | } | 1545 | } |
1425 | 1546 | ||
1426 | if (supported_cpus == num_online_cpus()) { | 1547 | if (supported_cpus != num_online_cpus()) |
1427 | printk(KERN_INFO PFX "Found %d %s " | 1548 | return -ENODEV; |
1428 | "processors (%d cpu cores) (" VERSION ")\n", | 1549 | |
1429 | num_online_nodes(), | 1550 | printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", |
1430 | boot_cpu_data.x86_model_id, supported_cpus); | 1551 | num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); |
1431 | return cpufreq_register_driver(&cpufreq_amd64_driver); | 1552 | |
1553 | if (boot_cpu_has(X86_FEATURE_CPB)) { | ||
1554 | |||
1555 | cpb_capable = true; | ||
1556 | |||
1557 | register_cpu_notifier(&cpb_nb); | ||
1558 | |||
1559 | msrs = msrs_alloc(); | ||
1560 | if (!msrs) { | ||
1561 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); | ||
1562 | return -ENOMEM; | ||
1563 | } | ||
1564 | |||
1565 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); | ||
1566 | |||
1567 | for_each_cpu(cpu, cpu_online_mask) { | ||
1568 | struct msr *reg = per_cpu_ptr(msrs, cpu); | ||
1569 | cpb_enabled |= !(!!(reg->l & BIT(25))); | ||
1570 | } | ||
1571 | |||
1572 | printk(KERN_INFO PFX "Core Performance Boosting: %s.\n", | ||
1573 | (cpb_enabled ? "on" : "off")); | ||
1432 | } | 1574 | } |
1433 | 1575 | ||
1434 | return -ENODEV; | 1576 | return cpufreq_register_driver(&cpufreq_amd64_driver); |
1435 | } | 1577 | } |
1436 | 1578 | ||
1437 | /* driver entry point for term */ | 1579 | /* driver entry point for term */ |
@@ -1439,6 +1581,13 @@ static void __exit powernowk8_exit(void) | |||
1439 | { | 1581 | { |
1440 | dprintk("exit\n"); | 1582 | dprintk("exit\n"); |
1441 | 1583 | ||
1584 | if (boot_cpu_has(X86_FEATURE_CPB)) { | ||
1585 | msrs_free(msrs); | ||
1586 | msrs = NULL; | ||
1587 | |||
1588 | unregister_cpu_notifier(&cpb_nb); | ||
1589 | } | ||
1590 | |||
1442 | cpufreq_unregister_driver(&cpufreq_amd64_driver); | 1591 | cpufreq_unregister_driver(&cpufreq_amd64_driver); |
1443 | } | 1592 | } |
1444 | 1593 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 02ce824073cb..df3529b1c02d 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -5,7 +5,6 @@ | |||
5 | * http://www.gnu.org/licenses/gpl.html | 5 | * http://www.gnu.org/licenses/gpl.html |
6 | */ | 6 | */ |
7 | 7 | ||
8 | |||
9 | enum pstate { | 8 | enum pstate { |
10 | HW_PSTATE_INVALID = 0xff, | 9 | HW_PSTATE_INVALID = 0xff, |
11 | HW_PSTATE_0 = 0, | 10 | HW_PSTATE_0 = 0, |
@@ -55,7 +54,6 @@ struct powernow_k8_data { | |||
55 | struct cpumask *available_cores; | 54 | struct cpumask *available_cores; |
56 | }; | 55 | }; |
57 | 56 | ||
58 | |||
59 | /* processor's cpuid instruction support */ | 57 | /* processor's cpuid instruction support */ |
60 | #define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */ | 58 | #define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */ |
61 | #define CPUID_XFAM 0x0ff00000 /* extended family */ | 59 | #define CPUID_XFAM 0x0ff00000 /* extended family */ |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 08be922de33a..dd531cc56a8f 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -21,37 +21,55 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/module.h> | ||
24 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
25 | #include <asm/vmware.h> | ||
26 | #include <asm/hypervisor.h> | 26 | #include <asm/hypervisor.h> |
27 | 27 | ||
28 | static inline void __cpuinit | 28 | /* |
29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) | 29 | * Hypervisor detect order. This is specified explicitly here because |
30 | * some hypervisors might implement compatibility modes for other | ||
31 | * hypervisors and therefore need to be detected in specific sequence. | ||
32 | */ | ||
33 | static const __initconst struct hypervisor_x86 * const hypervisors[] = | ||
30 | { | 34 | { |
31 | if (vmware_platform()) | 35 | &x86_hyper_vmware, |
32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; | 36 | &x86_hyper_ms_hyperv, |
33 | else | 37 | }; |
34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; | ||
35 | } | ||
36 | 38 | ||
37 | static inline void __cpuinit | 39 | const struct hypervisor_x86 *x86_hyper; |
38 | hypervisor_set_feature_bits(struct cpuinfo_x86 *c) | 40 | EXPORT_SYMBOL(x86_hyper); |
41 | |||
42 | static inline void __init | ||
43 | detect_hypervisor_vendor(void) | ||
39 | { | 44 | { |
40 | if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) { | 45 | const struct hypervisor_x86 *h, * const *p; |
41 | vmware_set_feature_bits(c); | 46 | |
42 | return; | 47 | for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { |
48 | h = *p; | ||
49 | if (h->detect()) { | ||
50 | x86_hyper = h; | ||
51 | printk(KERN_INFO "Hypervisor detected: %s\n", h->name); | ||
52 | break; | ||
53 | } | ||
43 | } | 54 | } |
44 | } | 55 | } |
45 | 56 | ||
46 | void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) | 57 | void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) |
47 | { | 58 | { |
48 | detect_hypervisor_vendor(c); | 59 | if (x86_hyper && x86_hyper->set_cpu_features) |
49 | hypervisor_set_feature_bits(c); | 60 | x86_hyper->set_cpu_features(c); |
50 | } | 61 | } |
51 | 62 | ||
52 | void __init init_hypervisor_platform(void) | 63 | void __init init_hypervisor_platform(void) |
53 | { | 64 | { |
65 | |||
66 | detect_hypervisor_vendor(); | ||
67 | |||
68 | if (!x86_hyper) | ||
69 | return; | ||
70 | |||
54 | init_hypervisor(&boot_cpu_data); | 71 | init_hypervisor(&boot_cpu_data); |
55 | if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) | 72 | |
56 | vmware_platform_setup(); | 73 | if (x86_hyper->init_platform) |
74 | x86_hyper->init_platform(); | ||
57 | } | 75 | } |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index f5e5390d3459..85f69cdeae10 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -372,12 +372,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
372 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | 372 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
373 | } | 373 | } |
374 | 374 | ||
375 | if (c->cpuid_level > 6) { | ||
376 | unsigned ecx = cpuid_ecx(6); | ||
377 | if (ecx & 0x01) | ||
378 | set_cpu_cap(c, X86_FEATURE_APERFMPERF); | ||
379 | } | ||
380 | |||
381 | if (cpu_has_xmm2) | 375 | if (cpu_has_xmm2) |
382 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | 376 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
383 | if (cpu_has_ds) { | 377 | if (cpu_has_ds) { |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index b3eeb66c0a51..33eae2062cf5 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -148,13 +148,19 @@ union _cpuid4_leaf_ecx { | |||
148 | u32 full; | 148 | u32 full; |
149 | }; | 149 | }; |
150 | 150 | ||
151 | struct amd_l3_cache { | ||
152 | struct pci_dev *dev; | ||
153 | bool can_disable; | ||
154 | unsigned indices; | ||
155 | u8 subcaches[4]; | ||
156 | }; | ||
157 | |||
151 | struct _cpuid4_info { | 158 | struct _cpuid4_info { |
152 | union _cpuid4_leaf_eax eax; | 159 | union _cpuid4_leaf_eax eax; |
153 | union _cpuid4_leaf_ebx ebx; | 160 | union _cpuid4_leaf_ebx ebx; |
154 | union _cpuid4_leaf_ecx ecx; | 161 | union _cpuid4_leaf_ecx ecx; |
155 | unsigned long size; | 162 | unsigned long size; |
156 | bool can_disable; | 163 | struct amd_l3_cache *l3; |
157 | unsigned int l3_indices; | ||
158 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); | 164 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
159 | }; | 165 | }; |
160 | 166 | ||
@@ -164,8 +170,7 @@ struct _cpuid4_info_regs { | |||
164 | union _cpuid4_leaf_ebx ebx; | 170 | union _cpuid4_leaf_ebx ebx; |
165 | union _cpuid4_leaf_ecx ecx; | 171 | union _cpuid4_leaf_ecx ecx; |
166 | unsigned long size; | 172 | unsigned long size; |
167 | bool can_disable; | 173 | struct amd_l3_cache *l3; |
168 | unsigned int l3_indices; | ||
169 | }; | 174 | }; |
170 | 175 | ||
171 | unsigned short num_cache_leaves; | 176 | unsigned short num_cache_leaves; |
@@ -302,87 +307,163 @@ struct _cache_attr { | |||
302 | }; | 307 | }; |
303 | 308 | ||
304 | #ifdef CONFIG_CPU_SUP_AMD | 309 | #ifdef CONFIG_CPU_SUP_AMD |
305 | static unsigned int __cpuinit amd_calc_l3_indices(void) | 310 | |
311 | /* | ||
312 | * L3 cache descriptors | ||
313 | */ | ||
314 | static struct amd_l3_cache **__cpuinitdata l3_caches; | ||
315 | |||
316 | static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | ||
306 | { | 317 | { |
307 | /* | ||
308 | * We're called over smp_call_function_single() and therefore | ||
309 | * are on the correct cpu. | ||
310 | */ | ||
311 | int cpu = smp_processor_id(); | ||
312 | int node = cpu_to_node(cpu); | ||
313 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
314 | unsigned int sc0, sc1, sc2, sc3; | 318 | unsigned int sc0, sc1, sc2, sc3; |
315 | u32 val = 0; | 319 | u32 val = 0; |
316 | 320 | ||
317 | pci_read_config_dword(dev, 0x1C4, &val); | 321 | pci_read_config_dword(l3->dev, 0x1C4, &val); |
318 | 322 | ||
319 | /* calculate subcache sizes */ | 323 | /* calculate subcache sizes */ |
320 | sc0 = !(val & BIT(0)); | 324 | l3->subcaches[0] = sc0 = !(val & BIT(0)); |
321 | sc1 = !(val & BIT(4)); | 325 | l3->subcaches[1] = sc1 = !(val & BIT(4)); |
322 | sc2 = !(val & BIT(8)) + !(val & BIT(9)); | 326 | l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); |
323 | sc3 = !(val & BIT(12)) + !(val & BIT(13)); | 327 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); |
324 | 328 | ||
325 | return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; | 329 | l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; |
330 | } | ||
331 | |||
332 | static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) | ||
333 | { | ||
334 | struct amd_l3_cache *l3; | ||
335 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
336 | |||
337 | l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); | ||
338 | if (!l3) { | ||
339 | printk(KERN_WARNING "Error allocating L3 struct\n"); | ||
340 | return NULL; | ||
341 | } | ||
342 | |||
343 | l3->dev = dev; | ||
344 | |||
345 | amd_calc_l3_indices(l3); | ||
346 | |||
347 | return l3; | ||
326 | } | 348 | } |
327 | 349 | ||
328 | static void __cpuinit | 350 | static void __cpuinit |
329 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | 351 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) |
330 | { | 352 | { |
331 | if (index < 3) | 353 | int node; |
354 | |||
355 | if (boot_cpu_data.x86 != 0x10) | ||
332 | return; | 356 | return; |
333 | 357 | ||
334 | if (boot_cpu_data.x86 == 0x11) | 358 | if (index < 3) |
335 | return; | 359 | return; |
336 | 360 | ||
337 | /* see errata #382 and #388 */ | 361 | /* see errata #382 and #388 */ |
338 | if ((boot_cpu_data.x86 == 0x10) && | 362 | if (boot_cpu_data.x86_model < 0x8) |
339 | ((boot_cpu_data.x86_model < 0x8) || | 363 | return; |
340 | (boot_cpu_data.x86_mask < 0x1))) | 364 | |
365 | if ((boot_cpu_data.x86_model == 0x8 || | ||
366 | boot_cpu_data.x86_model == 0x9) | ||
367 | && | ||
368 | boot_cpu_data.x86_mask < 0x1) | ||
369 | return; | ||
370 | |||
371 | /* not in virtualized environments */ | ||
372 | if (num_k8_northbridges == 0) | ||
341 | return; | 373 | return; |
342 | 374 | ||
343 | this_leaf->can_disable = true; | 375 | /* |
344 | this_leaf->l3_indices = amd_calc_l3_indices(); | 376 | * Strictly speaking, the amount in @size below is leaked since it is |
377 | * never freed but this is done only on shutdown so it doesn't matter. | ||
378 | */ | ||
379 | if (!l3_caches) { | ||
380 | int size = num_k8_northbridges * sizeof(struct amd_l3_cache *); | ||
381 | |||
382 | l3_caches = kzalloc(size, GFP_ATOMIC); | ||
383 | if (!l3_caches) | ||
384 | return; | ||
385 | } | ||
386 | |||
387 | node = amd_get_nb_id(smp_processor_id()); | ||
388 | |||
389 | if (!l3_caches[node]) { | ||
390 | l3_caches[node] = amd_init_l3_cache(node); | ||
391 | l3_caches[node]->can_disable = true; | ||
392 | } | ||
393 | |||
394 | WARN_ON(!l3_caches[node]); | ||
395 | |||
396 | this_leaf->l3 = l3_caches[node]; | ||
345 | } | 397 | } |
346 | 398 | ||
347 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | 399 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, |
348 | unsigned int index) | 400 | unsigned int slot) |
349 | { | 401 | { |
350 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | 402 | struct pci_dev *dev = this_leaf->l3->dev; |
351 | int node = amd_get_nb_id(cpu); | ||
352 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
353 | unsigned int reg = 0; | 403 | unsigned int reg = 0; |
354 | 404 | ||
355 | if (!this_leaf->can_disable) | 405 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) |
356 | return -EINVAL; | 406 | return -EINVAL; |
357 | 407 | ||
358 | if (!dev) | 408 | if (!dev) |
359 | return -EINVAL; | 409 | return -EINVAL; |
360 | 410 | ||
361 | pci_read_config_dword(dev, 0x1BC + index * 4, ®); | 411 | pci_read_config_dword(dev, 0x1BC + slot * 4, ®); |
362 | return sprintf(buf, "0x%08x\n", reg); | 412 | return sprintf(buf, "0x%08x\n", reg); |
363 | } | 413 | } |
364 | 414 | ||
365 | #define SHOW_CACHE_DISABLE(index) \ | 415 | #define SHOW_CACHE_DISABLE(slot) \ |
366 | static ssize_t \ | 416 | static ssize_t \ |
367 | show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \ | 417 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \ |
368 | { \ | 418 | { \ |
369 | return show_cache_disable(this_leaf, buf, index); \ | 419 | return show_cache_disable(this_leaf, buf, slot); \ |
370 | } | 420 | } |
371 | SHOW_CACHE_DISABLE(0) | 421 | SHOW_CACHE_DISABLE(0) |
372 | SHOW_CACHE_DISABLE(1) | 422 | SHOW_CACHE_DISABLE(1) |
373 | 423 | ||
424 | static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | ||
425 | unsigned slot, unsigned long idx) | ||
426 | { | ||
427 | int i; | ||
428 | |||
429 | idx |= BIT(30); | ||
430 | |||
431 | /* | ||
432 | * disable index in all 4 subcaches | ||
433 | */ | ||
434 | for (i = 0; i < 4; i++) { | ||
435 | u32 reg = idx | (i << 20); | ||
436 | |||
437 | if (!l3->subcaches[i]) | ||
438 | continue; | ||
439 | |||
440 | pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); | ||
441 | |||
442 | /* | ||
443 | * We need to WBINVD on a core on the node containing the L3 | ||
444 | * cache which indices we disable therefore a simple wbinvd() | ||
445 | * is not sufficient. | ||
446 | */ | ||
447 | wbinvd_on_cpu(cpu); | ||
448 | |||
449 | reg |= BIT(31); | ||
450 | pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | |||
374 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | 455 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, |
375 | const char *buf, size_t count, unsigned int index) | 456 | const char *buf, size_t count, |
457 | unsigned int slot) | ||
376 | { | 458 | { |
459 | struct pci_dev *dev = this_leaf->l3->dev; | ||
377 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | 460 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
378 | int node = amd_get_nb_id(cpu); | ||
379 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
380 | unsigned long val = 0; | 461 | unsigned long val = 0; |
381 | 462 | ||
382 | #define SUBCACHE_MASK (3UL << 20) | 463 | #define SUBCACHE_MASK (3UL << 20) |
383 | #define SUBCACHE_INDEX 0xfff | 464 | #define SUBCACHE_INDEX 0xfff |
384 | 465 | ||
385 | if (!this_leaf->can_disable) | 466 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) |
386 | return -EINVAL; | 467 | return -EINVAL; |
387 | 468 | ||
388 | if (!capable(CAP_SYS_ADMIN)) | 469 | if (!capable(CAP_SYS_ADMIN)) |
@@ -396,26 +477,20 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
396 | 477 | ||
397 | /* do not allow writes outside of allowed bits */ | 478 | /* do not allow writes outside of allowed bits */ |
398 | if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || | 479 | if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || |
399 | ((val & SUBCACHE_INDEX) > this_leaf->l3_indices)) | 480 | ((val & SUBCACHE_INDEX) > this_leaf->l3->indices)) |
400 | return -EINVAL; | 481 | return -EINVAL; |
401 | 482 | ||
402 | val |= BIT(30); | 483 | amd_l3_disable_index(this_leaf->l3, cpu, slot, val); |
403 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | 484 | |
404 | /* | ||
405 | * We need to WBINVD on a core on the node containing the L3 cache which | ||
406 | * indices we disable therefore a simple wbinvd() is not sufficient. | ||
407 | */ | ||
408 | wbinvd_on_cpu(cpu); | ||
409 | pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31)); | ||
410 | return count; | 485 | return count; |
411 | } | 486 | } |
412 | 487 | ||
413 | #define STORE_CACHE_DISABLE(index) \ | 488 | #define STORE_CACHE_DISABLE(slot) \ |
414 | static ssize_t \ | 489 | static ssize_t \ |
415 | store_cache_disable_##index(struct _cpuid4_info *this_leaf, \ | 490 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ |
416 | const char *buf, size_t count) \ | 491 | const char *buf, size_t count) \ |
417 | { \ | 492 | { \ |
418 | return store_cache_disable(this_leaf, buf, count, index); \ | 493 | return store_cache_disable(this_leaf, buf, count, slot); \ |
419 | } | 494 | } |
420 | STORE_CACHE_DISABLE(0) | 495 | STORE_CACHE_DISABLE(0) |
421 | STORE_CACHE_DISABLE(1) | 496 | STORE_CACHE_DISABLE(1) |
@@ -443,8 +518,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index, | |||
443 | 518 | ||
444 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { | 519 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
445 | amd_cpuid4(index, &eax, &ebx, &ecx); | 520 | amd_cpuid4(index, &eax, &ebx, &ecx); |
446 | if (boot_cpu_data.x86 >= 0x10) | 521 | amd_check_l3_disable(index, this_leaf); |
447 | amd_check_l3_disable(index, this_leaf); | ||
448 | } else { | 522 | } else { |
449 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 523 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); |
450 | } | 524 | } |
@@ -701,6 +775,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
701 | for (i = 0; i < num_cache_leaves; i++) | 775 | for (i = 0; i < num_cache_leaves; i++) |
702 | cache_remove_shared_cpu_map(cpu, i); | 776 | cache_remove_shared_cpu_map(cpu, i); |
703 | 777 | ||
778 | kfree(per_cpu(ici_cpuid4_info, cpu)->l3); | ||
704 | kfree(per_cpu(ici_cpuid4_info, cpu)); | 779 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
705 | per_cpu(ici_cpuid4_info, cpu) = NULL; | 780 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
706 | } | 781 | } |
@@ -985,7 +1060,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
985 | 1060 | ||
986 | this_leaf = CPUID4_INFO_IDX(cpu, i); | 1061 | this_leaf = CPUID4_INFO_IDX(cpu, i); |
987 | 1062 | ||
988 | if (this_leaf->can_disable) | 1063 | if (this_leaf->l3 && this_leaf->l3->can_disable) |
989 | ktype_cache.default_attrs = default_l3_attrs; | 1064 | ktype_cache.default_attrs = default_l3_attrs; |
990 | else | 1065 | else |
991 | ktype_cache.default_attrs = default_attrs; | 1066 | ktype_cache.default_attrs = default_attrs; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8a6f0afa767e..7a355ddcc64b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -539,7 +539,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
539 | struct mce m; | 539 | struct mce m; |
540 | int i; | 540 | int i; |
541 | 541 | ||
542 | __get_cpu_var(mce_poll_count)++; | 542 | percpu_inc(mce_poll_count); |
543 | 543 | ||
544 | mce_setup(&m); | 544 | mce_setup(&m); |
545 | 545 | ||
@@ -934,7 +934,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
934 | 934 | ||
935 | atomic_inc(&mce_entry); | 935 | atomic_inc(&mce_entry); |
936 | 936 | ||
937 | __get_cpu_var(mce_exception_count)++; | 937 | percpu_inc(mce_exception_count); |
938 | 938 | ||
939 | if (notify_die(DIE_NMI, "machine check", regs, error_code, | 939 | if (notify_die(DIE_NMI, "machine check", regs, error_code, |
940 | 18, SIGKILL) == NOTIFY_STOP) | 940 | 18, SIGKILL) == NOTIFY_STOP) |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c new file mode 100644 index 000000000000..16f41bbe46b6 --- /dev/null +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * HyperV Detection code. | ||
3 | * | ||
4 | * Copyright (C) 2010, Novell, Inc. | ||
5 | * Author : K. Y. Srinivasan <ksrinivasan@novell.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; version 2 of the License. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/hypervisor.h> | ||
17 | #include <asm/hyperv.h> | ||
18 | #include <asm/mshyperv.h> | ||
19 | |||
20 | struct ms_hyperv_info ms_hyperv; | ||
21 | |||
22 | static bool __init ms_hyperv_platform(void) | ||
23 | { | ||
24 | u32 eax; | ||
25 | u32 hyp_signature[3]; | ||
26 | |||
27 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) | ||
28 | return false; | ||
29 | |||
30 | cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, | ||
31 | &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); | ||
32 | |||
33 | return eax >= HYPERV_CPUID_MIN && | ||
34 | eax <= HYPERV_CPUID_MAX && | ||
35 | !memcmp("Microsoft Hv", hyp_signature, 12); | ||
36 | } | ||
37 | |||
38 | static void __init ms_hyperv_init_platform(void) | ||
39 | { | ||
40 | /* | ||
41 | * Extract the features and hints | ||
42 | */ | ||
43 | ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); | ||
44 | ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); | ||
45 | |||
46 | printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", | ||
47 | ms_hyperv.features, ms_hyperv.hints); | ||
48 | } | ||
49 | |||
50 | const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { | ||
51 | .name = "Microsoft HyperV", | ||
52 | .detect = ms_hyperv_platform, | ||
53 | .init_platform = ms_hyperv_init_platform, | ||
54 | }; | ||
55 | EXPORT_SYMBOL(x86_hyper_ms_hyperv); | ||
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index dfdb4dba2320..b9d1ff588445 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -24,8 +24,8 @@ | |||
24 | #include <linux/dmi.h> | 24 | #include <linux/dmi.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <asm/div64.h> | 26 | #include <asm/div64.h> |
27 | #include <asm/vmware.h> | ||
28 | #include <asm/x86_init.h> | 27 | #include <asm/x86_init.h> |
28 | #include <asm/hypervisor.h> | ||
29 | 29 | ||
30 | #define CPUID_VMWARE_INFO_LEAF 0x40000000 | 30 | #define CPUID_VMWARE_INFO_LEAF 0x40000000 |
31 | #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 | 31 | #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 |
@@ -65,7 +65,7 @@ static unsigned long vmware_get_tsc_khz(void) | |||
65 | return tsc_hz; | 65 | return tsc_hz; |
66 | } | 66 | } |
67 | 67 | ||
68 | void __init vmware_platform_setup(void) | 68 | static void __init vmware_platform_setup(void) |
69 | { | 69 | { |
70 | uint32_t eax, ebx, ecx, edx; | 70 | uint32_t eax, ebx, ecx, edx; |
71 | 71 | ||
@@ -83,26 +83,22 @@ void __init vmware_platform_setup(void) | |||
83 | * serial key should be enough, as this will always have a VMware | 83 | * serial key should be enough, as this will always have a VMware |
84 | * specific string when running under VMware hypervisor. | 84 | * specific string when running under VMware hypervisor. |
85 | */ | 85 | */ |
86 | int vmware_platform(void) | 86 | static bool __init vmware_platform(void) |
87 | { | 87 | { |
88 | if (cpu_has_hypervisor) { | 88 | if (cpu_has_hypervisor) { |
89 | unsigned int eax, ebx, ecx, edx; | 89 | unsigned int eax; |
90 | char hyper_vendor_id[13]; | 90 | unsigned int hyper_vendor_id[3]; |
91 | 91 | ||
92 | cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &ebx, &ecx, &edx); | 92 | cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], |
93 | memcpy(hyper_vendor_id + 0, &ebx, 4); | 93 | &hyper_vendor_id[1], &hyper_vendor_id[2]); |
94 | memcpy(hyper_vendor_id + 4, &ecx, 4); | 94 | if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) |
95 | memcpy(hyper_vendor_id + 8, &edx, 4); | 95 | return true; |
96 | hyper_vendor_id[12] = '\0'; | ||
97 | if (!strcmp(hyper_vendor_id, "VMwareVMware")) | ||
98 | return 1; | ||
99 | } else if (dmi_available && dmi_name_in_serial("VMware") && | 96 | } else if (dmi_available && dmi_name_in_serial("VMware") && |
100 | __vmware_platform()) | 97 | __vmware_platform()) |
101 | return 1; | 98 | return true; |
102 | 99 | ||
103 | return 0; | 100 | return false; |
104 | } | 101 | } |
105 | EXPORT_SYMBOL(vmware_platform); | ||
106 | 102 | ||
107 | /* | 103 | /* |
108 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. | 104 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. |
@@ -116,8 +112,16 @@ EXPORT_SYMBOL(vmware_platform); | |||
116 | * so that the kernel could just trust the hypervisor with providing a | 112 | * so that the kernel could just trust the hypervisor with providing a |
117 | * reliable virtual TSC that is suitable for timekeeping. | 113 | * reliable virtual TSC that is suitable for timekeeping. |
118 | */ | 114 | */ |
119 | void __cpuinit vmware_set_feature_bits(struct cpuinfo_x86 *c) | 115 | static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) |
120 | { | 116 | { |
121 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 117 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
122 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); | 118 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); |
123 | } | 119 | } |
120 | |||
121 | const __refconst struct hypervisor_x86 x86_hyper_vmware = { | ||
122 | .name = "VMware", | ||
123 | .detect = vmware_platform, | ||
124 | .set_cpu_features = vmware_set_cpu_features, | ||
125 | .init_platform = vmware_platform_setup, | ||
126 | }; | ||
127 | EXPORT_SYMBOL(x86_hyper_vmware); | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 44a8e0dc6737..cd49141cf153 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <asm/processor-flags.h> | 53 | #include <asm/processor-flags.h> |
54 | #include <asm/ftrace.h> | 54 | #include <asm/ftrace.h> |
55 | #include <asm/irq_vectors.h> | 55 | #include <asm/irq_vectors.h> |
56 | #include <asm/cpufeature.h> | ||
56 | 57 | ||
57 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 58 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
58 | #include <linux/elf-em.h> | 59 | #include <linux/elf-em.h> |
@@ -905,7 +906,25 @@ ENTRY(simd_coprocessor_error) | |||
905 | RING0_INT_FRAME | 906 | RING0_INT_FRAME |
906 | pushl $0 | 907 | pushl $0 |
907 | CFI_ADJUST_CFA_OFFSET 4 | 908 | CFI_ADJUST_CFA_OFFSET 4 |
909 | #ifdef CONFIG_X86_INVD_BUG | ||
910 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | ||
911 | 661: pushl $do_general_protection | ||
912 | 662: | ||
913 | .section .altinstructions,"a" | ||
914 | .balign 4 | ||
915 | .long 661b | ||
916 | .long 663f | ||
917 | .byte X86_FEATURE_XMM | ||
918 | .byte 662b-661b | ||
919 | .byte 664f-663f | ||
920 | .previous | ||
921 | .section .altinstr_replacement,"ax" | ||
922 | 663: pushl $do_simd_coprocessor_error | ||
923 | 664: | ||
924 | .previous | ||
925 | #else | ||
908 | pushl $do_simd_coprocessor_error | 926 | pushl $do_simd_coprocessor_error |
927 | #endif | ||
909 | CFI_ADJUST_CFA_OFFSET 4 | 928 | CFI_ADJUST_CFA_OFFSET 4 |
910 | jmp error_code | 929 | jmp error_code |
911 | CFI_ENDPROC | 930 | CFI_ENDPROC |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 54c31c285488..86cef6b32253 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -102,65 +102,62 @@ void __cpuinit fpu_init(void) | |||
102 | 102 | ||
103 | mxcsr_feature_mask_init(); | 103 | mxcsr_feature_mask_init(); |
104 | /* clean state in init */ | 104 | /* clean state in init */ |
105 | if (cpu_has_xsave) | 105 | current_thread_info()->status = 0; |
106 | current_thread_info()->status = TS_XSAVE; | ||
107 | else | ||
108 | current_thread_info()->status = 0; | ||
109 | clear_used_math(); | 106 | clear_used_math(); |
110 | } | 107 | } |
111 | #endif /* CONFIG_X86_64 */ | 108 | #endif /* CONFIG_X86_64 */ |
112 | 109 | ||
113 | /* | 110 | static void fpu_finit(struct fpu *fpu) |
114 | * The _current_ task is using the FPU for the first time | ||
115 | * so initialize it and set the mxcsr to its default | ||
116 | * value at reset if we support XMM instructions and then | ||
117 | * remeber the current task has used the FPU. | ||
118 | */ | ||
119 | int init_fpu(struct task_struct *tsk) | ||
120 | { | 111 | { |
121 | if (tsk_used_math(tsk)) { | ||
122 | if (HAVE_HWFP && tsk == current) | ||
123 | unlazy_fpu(tsk); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Memory allocation at the first usage of the FPU and other state. | ||
129 | */ | ||
130 | if (!tsk->thread.xstate) { | ||
131 | tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | ||
132 | GFP_KERNEL); | ||
133 | if (!tsk->thread.xstate) | ||
134 | return -ENOMEM; | ||
135 | } | ||
136 | |||
137 | #ifdef CONFIG_X86_32 | 112 | #ifdef CONFIG_X86_32 |
138 | if (!HAVE_HWFP) { | 113 | if (!HAVE_HWFP) { |
139 | memset(tsk->thread.xstate, 0, xstate_size); | 114 | finit_soft_fpu(&fpu->state->soft); |
140 | finit_task(tsk); | 115 | return; |
141 | set_stopped_child_used_math(tsk); | ||
142 | return 0; | ||
143 | } | 116 | } |
144 | #endif | 117 | #endif |
145 | 118 | ||
146 | if (cpu_has_fxsr) { | 119 | if (cpu_has_fxsr) { |
147 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | 120 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; |
148 | 121 | ||
149 | memset(fx, 0, xstate_size); | 122 | memset(fx, 0, xstate_size); |
150 | fx->cwd = 0x37f; | 123 | fx->cwd = 0x37f; |
151 | if (cpu_has_xmm) | 124 | if (cpu_has_xmm) |
152 | fx->mxcsr = MXCSR_DEFAULT; | 125 | fx->mxcsr = MXCSR_DEFAULT; |
153 | } else { | 126 | } else { |
154 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; | 127 | struct i387_fsave_struct *fp = &fpu->state->fsave; |
155 | memset(fp, 0, xstate_size); | 128 | memset(fp, 0, xstate_size); |
156 | fp->cwd = 0xffff037fu; | 129 | fp->cwd = 0xffff037fu; |
157 | fp->swd = 0xffff0000u; | 130 | fp->swd = 0xffff0000u; |
158 | fp->twd = 0xffffffffu; | 131 | fp->twd = 0xffffffffu; |
159 | fp->fos = 0xffff0000u; | 132 | fp->fos = 0xffff0000u; |
160 | } | 133 | } |
134 | } | ||
135 | |||
136 | /* | ||
137 | * The _current_ task is using the FPU for the first time | ||
138 | * so initialize it and set the mxcsr to its default | ||
139 | * value at reset if we support XMM instructions and then | ||
140 | * remeber the current task has used the FPU. | ||
141 | */ | ||
142 | int init_fpu(struct task_struct *tsk) | ||
143 | { | ||
144 | int ret; | ||
145 | |||
146 | if (tsk_used_math(tsk)) { | ||
147 | if (HAVE_HWFP && tsk == current) | ||
148 | unlazy_fpu(tsk); | ||
149 | return 0; | ||
150 | } | ||
151 | |||
161 | /* | 152 | /* |
162 | * Only the device not available exception or ptrace can call init_fpu. | 153 | * Memory allocation at the first usage of the FPU and other state. |
163 | */ | 154 | */ |
155 | ret = fpu_alloc(&tsk->thread.fpu); | ||
156 | if (ret) | ||
157 | return ret; | ||
158 | |||
159 | fpu_finit(&tsk->thread.fpu); | ||
160 | |||
164 | set_stopped_child_used_math(tsk); | 161 | set_stopped_child_used_math(tsk); |
165 | return 0; | 162 | return 0; |
166 | } | 163 | } |
@@ -194,7 +191,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
194 | return ret; | 191 | return ret; |
195 | 192 | ||
196 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 193 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
197 | &target->thread.xstate->fxsave, 0, -1); | 194 | &target->thread.fpu.state->fxsave, 0, -1); |
198 | } | 195 | } |
199 | 196 | ||
200 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | 197 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, |
@@ -211,19 +208,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
211 | return ret; | 208 | return ret; |
212 | 209 | ||
213 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 210 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
214 | &target->thread.xstate->fxsave, 0, -1); | 211 | &target->thread.fpu.state->fxsave, 0, -1); |
215 | 212 | ||
216 | /* | 213 | /* |
217 | * mxcsr reserved bits must be masked to zero for security reasons. | 214 | * mxcsr reserved bits must be masked to zero for security reasons. |
218 | */ | 215 | */ |
219 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 216 | target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; |
220 | 217 | ||
221 | /* | 218 | /* |
222 | * update the header bits in the xsave header, indicating the | 219 | * update the header bits in the xsave header, indicating the |
223 | * presence of FP and SSE state. | 220 | * presence of FP and SSE state. |
224 | */ | 221 | */ |
225 | if (cpu_has_xsave) | 222 | if (cpu_has_xsave) |
226 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; | 223 | target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; |
227 | 224 | ||
228 | return ret; | 225 | return ret; |
229 | } | 226 | } |
@@ -246,14 +243,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, | |||
246 | * memory layout in the thread struct, so that we can copy the entire | 243 | * memory layout in the thread struct, so that we can copy the entire |
247 | * xstateregs to the user using one user_regset_copyout(). | 244 | * xstateregs to the user using one user_regset_copyout(). |
248 | */ | 245 | */ |
249 | memcpy(&target->thread.xstate->fxsave.sw_reserved, | 246 | memcpy(&target->thread.fpu.state->fxsave.sw_reserved, |
250 | xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); | 247 | xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); |
251 | 248 | ||
252 | /* | 249 | /* |
253 | * Copy the xstate memory layout. | 250 | * Copy the xstate memory layout. |
254 | */ | 251 | */ |
255 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 252 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
256 | &target->thread.xstate->xsave, 0, -1); | 253 | &target->thread.fpu.state->xsave, 0, -1); |
257 | return ret; | 254 | return ret; |
258 | } | 255 | } |
259 | 256 | ||
@@ -272,14 +269,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | |||
272 | return ret; | 269 | return ret; |
273 | 270 | ||
274 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 271 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
275 | &target->thread.xstate->xsave, 0, -1); | 272 | &target->thread.fpu.state->xsave, 0, -1); |
276 | 273 | ||
277 | /* | 274 | /* |
278 | * mxcsr reserved bits must be masked to zero for security reasons. | 275 | * mxcsr reserved bits must be masked to zero for security reasons. |
279 | */ | 276 | */ |
280 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 277 | target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; |
281 | 278 | ||
282 | xsave_hdr = &target->thread.xstate->xsave.xsave_hdr; | 279 | xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr; |
283 | 280 | ||
284 | xsave_hdr->xstate_bv &= pcntxt_mask; | 281 | xsave_hdr->xstate_bv &= pcntxt_mask; |
285 | /* | 282 | /* |
@@ -365,7 +362,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) | |||
365 | static void | 362 | static void |
366 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) | 363 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) |
367 | { | 364 | { |
368 | struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; | 365 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; |
369 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; | 366 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; |
370 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; | 367 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; |
371 | int i; | 368 | int i; |
@@ -405,7 +402,7 @@ static void convert_to_fxsr(struct task_struct *tsk, | |||
405 | const struct user_i387_ia32_struct *env) | 402 | const struct user_i387_ia32_struct *env) |
406 | 403 | ||
407 | { | 404 | { |
408 | struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; | 405 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; |
409 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; | 406 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; |
410 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; | 407 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; |
411 | int i; | 408 | int i; |
@@ -445,7 +442,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
445 | 442 | ||
446 | if (!cpu_has_fxsr) { | 443 | if (!cpu_has_fxsr) { |
447 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 444 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
448 | &target->thread.xstate->fsave, 0, | 445 | &target->thread.fpu.state->fsave, 0, |
449 | -1); | 446 | -1); |
450 | } | 447 | } |
451 | 448 | ||
@@ -475,7 +472,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
475 | 472 | ||
476 | if (!cpu_has_fxsr) { | 473 | if (!cpu_has_fxsr) { |
477 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 474 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
478 | &target->thread.xstate->fsave, 0, -1); | 475 | &target->thread.fpu.state->fsave, 0, -1); |
479 | } | 476 | } |
480 | 477 | ||
481 | if (pos > 0 || count < sizeof(env)) | 478 | if (pos > 0 || count < sizeof(env)) |
@@ -490,7 +487,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
490 | * presence of FP. | 487 | * presence of FP. |
491 | */ | 488 | */ |
492 | if (cpu_has_xsave) | 489 | if (cpu_has_xsave) |
493 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; | 490 | target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; |
494 | return ret; | 491 | return ret; |
495 | } | 492 | } |
496 | 493 | ||
@@ -501,7 +498,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
501 | static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) | 498 | static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) |
502 | { | 499 | { |
503 | struct task_struct *tsk = current; | 500 | struct task_struct *tsk = current; |
504 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; | 501 | struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave; |
505 | 502 | ||
506 | fp->status = fp->swd; | 503 | fp->status = fp->swd; |
507 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) | 504 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) |
@@ -512,7 +509,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
512 | static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) | 509 | static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) |
513 | { | 510 | { |
514 | struct task_struct *tsk = current; | 511 | struct task_struct *tsk = current; |
515 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | 512 | struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; |
516 | struct user_i387_ia32_struct env; | 513 | struct user_i387_ia32_struct env; |
517 | int err = 0; | 514 | int err = 0; |
518 | 515 | ||
@@ -547,7 +544,7 @@ static int save_i387_xsave(void __user *buf) | |||
547 | * header as well as change any contents in the memory layout. | 544 | * header as well as change any contents in the memory layout. |
548 | * xrestore as part of sigreturn will capture all the changes. | 545 | * xrestore as part of sigreturn will capture all the changes. |
549 | */ | 546 | */ |
550 | tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; | 547 | tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; |
551 | 548 | ||
552 | if (save_i387_fxsave(fx) < 0) | 549 | if (save_i387_fxsave(fx) < 0) |
553 | return -1; | 550 | return -1; |
@@ -599,7 +596,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
599 | { | 596 | { |
600 | struct task_struct *tsk = current; | 597 | struct task_struct *tsk = current; |
601 | 598 | ||
602 | return __copy_from_user(&tsk->thread.xstate->fsave, buf, | 599 | return __copy_from_user(&tsk->thread.fpu.state->fsave, buf, |
603 | sizeof(struct i387_fsave_struct)); | 600 | sizeof(struct i387_fsave_struct)); |
604 | } | 601 | } |
605 | 602 | ||
@@ -610,10 +607,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, | |||
610 | struct user_i387_ia32_struct env; | 607 | struct user_i387_ia32_struct env; |
611 | int err; | 608 | int err; |
612 | 609 | ||
613 | err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], | 610 | err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0], |
614 | size); | 611 | size); |
615 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 612 | /* mxcsr reserved bits must be masked to zero for security reasons */ |
616 | tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 613 | tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; |
617 | if (err || __copy_from_user(&env, buf, sizeof(env))) | 614 | if (err || __copy_from_user(&env, buf, sizeof(env))) |
618 | return 1; | 615 | return 1; |
619 | convert_to_fxsr(tsk, &env); | 616 | convert_to_fxsr(tsk, &env); |
@@ -629,7 +626,7 @@ static int restore_i387_xsave(void __user *buf) | |||
629 | struct i387_fxsave_struct __user *fx = | 626 | struct i387_fxsave_struct __user *fx = |
630 | (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; | 627 | (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; |
631 | struct xsave_hdr_struct *xsave_hdr = | 628 | struct xsave_hdr_struct *xsave_hdr = |
632 | ¤t->thread.xstate->xsave.xsave_hdr; | 629 | ¤t->thread.fpu.state->xsave.xsave_hdr; |
633 | u64 mask; | 630 | u64 mask; |
634 | int err; | 631 | int err; |
635 | 632 | ||
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 23c167925a5c..2dfd31597443 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/hpet.h> | 16 | #include <asm/hpet.h> |
17 | #include <asm/smp.h> | 17 | #include <asm/smp.h> |
18 | 18 | ||
19 | DEFINE_SPINLOCK(i8253_lock); | 19 | DEFINE_RAW_SPINLOCK(i8253_lock); |
20 | EXPORT_SYMBOL(i8253_lock); | 20 | EXPORT_SYMBOL(i8253_lock); |
21 | 21 | ||
22 | /* | 22 | /* |
@@ -33,7 +33,7 @@ struct clock_event_device *global_clock_event; | |||
33 | static void init_pit_timer(enum clock_event_mode mode, | 33 | static void init_pit_timer(enum clock_event_mode mode, |
34 | struct clock_event_device *evt) | 34 | struct clock_event_device *evt) |
35 | { | 35 | { |
36 | spin_lock(&i8253_lock); | 36 | raw_spin_lock(&i8253_lock); |
37 | 37 | ||
38 | switch (mode) { | 38 | switch (mode) { |
39 | case CLOCK_EVT_MODE_PERIODIC: | 39 | case CLOCK_EVT_MODE_PERIODIC: |
@@ -62,7 +62,7 @@ static void init_pit_timer(enum clock_event_mode mode, | |||
62 | /* Nothing to do here */ | 62 | /* Nothing to do here */ |
63 | break; | 63 | break; |
64 | } | 64 | } |
65 | spin_unlock(&i8253_lock); | 65 | raw_spin_unlock(&i8253_lock); |
66 | } | 66 | } |
67 | 67 | ||
68 | /* | 68 | /* |
@@ -72,10 +72,10 @@ static void init_pit_timer(enum clock_event_mode mode, | |||
72 | */ | 72 | */ |
73 | static int pit_next_event(unsigned long delta, struct clock_event_device *evt) | 73 | static int pit_next_event(unsigned long delta, struct clock_event_device *evt) |
74 | { | 74 | { |
75 | spin_lock(&i8253_lock); | 75 | raw_spin_lock(&i8253_lock); |
76 | outb_pit(delta & 0xff , PIT_CH0); /* LSB */ | 76 | outb_pit(delta & 0xff , PIT_CH0); /* LSB */ |
77 | outb_pit(delta >> 8 , PIT_CH0); /* MSB */ | 77 | outb_pit(delta >> 8 , PIT_CH0); /* MSB */ |
78 | spin_unlock(&i8253_lock); | 78 | raw_spin_unlock(&i8253_lock); |
79 | 79 | ||
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
@@ -130,7 +130,7 @@ static cycle_t pit_read(struct clocksource *cs) | |||
130 | int count; | 130 | int count; |
131 | u32 jifs; | 131 | u32 jifs; |
132 | 132 | ||
133 | spin_lock_irqsave(&i8253_lock, flags); | 133 | raw_spin_lock_irqsave(&i8253_lock, flags); |
134 | /* | 134 | /* |
135 | * Although our caller may have the read side of xtime_lock, | 135 | * Although our caller may have the read side of xtime_lock, |
136 | * this is now a seqlock, and we are cheating in this routine | 136 | * this is now a seqlock, and we are cheating in this routine |
@@ -176,7 +176,7 @@ static cycle_t pit_read(struct clocksource *cs) | |||
176 | old_count = count; | 176 | old_count = count; |
177 | old_jifs = jifs; | 177 | old_jifs = jifs; |
178 | 178 | ||
179 | spin_unlock_irqrestore(&i8253_lock, flags); | 179 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
180 | 180 | ||
181 | count = (LATCH - 1) - count; | 181 | count = (LATCH - 1) - count; |
182 | 182 | ||
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 0ed2d300cd46..990ae7cfc578 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -60,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) | |||
60 | outb(0, 0xF0); | 60 | outb(0, 0xF0); |
61 | if (ignore_fpu_irq || !boot_cpu_data.hard_math) | 61 | if (ignore_fpu_irq || !boot_cpu_data.hard_math) |
62 | return IRQ_NONE; | 62 | return IRQ_NONE; |
63 | math_error((void __user *)get_irq_regs()->ip); | 63 | math_error(get_irq_regs(), 0, 16); |
64 | return IRQ_HANDLED; | 64 | return IRQ_HANDLED; |
65 | } | 65 | } |
66 | 66 | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index f2f56c0967b6..345a4b1fe144 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -542,20 +542,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
542 | struct kprobe_ctlblk *kcb; | 542 | struct kprobe_ctlblk *kcb; |
543 | 543 | ||
544 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); | 544 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
545 | if (*addr != BREAKPOINT_INSTRUCTION) { | ||
546 | /* | ||
547 | * The breakpoint instruction was removed right | ||
548 | * after we hit it. Another cpu has removed | ||
549 | * either a probepoint or a debugger breakpoint | ||
550 | * at this address. In either case, no further | ||
551 | * handling of this interrupt is appropriate. | ||
552 | * Back up over the (now missing) int3 and run | ||
553 | * the original instruction. | ||
554 | */ | ||
555 | regs->ip = (unsigned long)addr; | ||
556 | return 1; | ||
557 | } | ||
558 | |||
559 | /* | 545 | /* |
560 | * We don't want to be preempted for the entire | 546 | * We don't want to be preempted for the entire |
561 | * duration of kprobe processing. We conditionally | 547 | * duration of kprobe processing. We conditionally |
@@ -587,6 +573,19 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
587 | setup_singlestep(p, regs, kcb, 0); | 573 | setup_singlestep(p, regs, kcb, 0); |
588 | return 1; | 574 | return 1; |
589 | } | 575 | } |
576 | } else if (*addr != BREAKPOINT_INSTRUCTION) { | ||
577 | /* | ||
578 | * The breakpoint instruction was removed right | ||
579 | * after we hit it. Another cpu has removed | ||
580 | * either a probepoint or a debugger breakpoint | ||
581 | * at this address. In either case, no further | ||
582 | * handling of this interrupt is appropriate. | ||
583 | * Back up over the (now missing) int3 and run | ||
584 | * the original instruction. | ||
585 | */ | ||
586 | regs->ip = (unsigned long)addr; | ||
587 | preempt_enable_no_resched(); | ||
588 | return 1; | ||
590 | } else if (kprobe_running()) { | 589 | } else if (kprobe_running()) { |
591 | p = __get_cpu_var(current_kprobe); | 590 | p = __get_cpu_var(current_kprobe); |
592 | if (p->break_handler && p->break_handler(p, regs)) { | 591 | if (p->break_handler && p->break_handler(p, regs)) { |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index cceb5bc3c3c2..2cd8c544e41a 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -201,9 +201,9 @@ static int do_microcode_update(const void __user *buf, size_t size) | |||
201 | return error; | 201 | return error; |
202 | } | 202 | } |
203 | 203 | ||
204 | static int microcode_open(struct inode *unused1, struct file *unused2) | 204 | static int microcode_open(struct inode *inode, struct file *file) |
205 | { | 205 | { |
206 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | 206 | return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM; |
207 | } | 207 | } |
208 | 208 | ||
209 | static ssize_t microcode_write(struct file *file, const char __user *buf, | 209 | static ssize_t microcode_write(struct file *file, const char __user *buf, |
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 85a343e28937..356170262a93 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
@@ -343,10 +343,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
343 | int (*get_ucode_data)(void *, const void *, size_t)) | 343 | int (*get_ucode_data)(void *, const void *, size_t)) |
344 | { | 344 | { |
345 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 345 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
346 | u8 *ucode_ptr = data, *new_mc = NULL, *mc; | 346 | u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL; |
347 | int new_rev = uci->cpu_sig.rev; | 347 | int new_rev = uci->cpu_sig.rev; |
348 | unsigned int leftover = size; | 348 | unsigned int leftover = size; |
349 | enum ucode_state state = UCODE_OK; | 349 | enum ucode_state state = UCODE_OK; |
350 | unsigned int curr_mc_size = 0; | ||
350 | 351 | ||
351 | while (leftover) { | 352 | while (leftover) { |
352 | struct microcode_header_intel mc_header; | 353 | struct microcode_header_intel mc_header; |
@@ -361,9 +362,15 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
361 | break; | 362 | break; |
362 | } | 363 | } |
363 | 364 | ||
364 | mc = vmalloc(mc_size); | 365 | /* For performance reasons, reuse mc area when possible */ |
365 | if (!mc) | 366 | if (!mc || mc_size > curr_mc_size) { |
366 | break; | 367 | if (mc) |
368 | vfree(mc); | ||
369 | mc = vmalloc(mc_size); | ||
370 | if (!mc) | ||
371 | break; | ||
372 | curr_mc_size = mc_size; | ||
373 | } | ||
367 | 374 | ||
368 | if (get_ucode_data(mc, ucode_ptr, mc_size) || | 375 | if (get_ucode_data(mc, ucode_ptr, mc_size) || |
369 | microcode_sanity_check(mc) < 0) { | 376 | microcode_sanity_check(mc) < 0) { |
@@ -376,13 +383,16 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
376 | vfree(new_mc); | 383 | vfree(new_mc); |
377 | new_rev = mc_header.rev; | 384 | new_rev = mc_header.rev; |
378 | new_mc = mc; | 385 | new_mc = mc; |
379 | } else | 386 | mc = NULL; /* trigger new vmalloc */ |
380 | vfree(mc); | 387 | } |
381 | 388 | ||
382 | ucode_ptr += mc_size; | 389 | ucode_ptr += mc_size; |
383 | leftover -= mc_size; | 390 | leftover -= mc_size; |
384 | } | 391 | } |
385 | 392 | ||
393 | if (mc) | ||
394 | vfree(mc); | ||
395 | |||
386 | if (leftover) { | 396 | if (leftover) { |
387 | if (new_mc) | 397 | if (new_mc) |
388 | vfree(new_mc); | 398 | vfree(new_mc); |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index e81030f71a8f..5ae5d2426edf 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -115,21 +115,6 @@ static void __init MP_bus_info(struct mpc_bus *m) | |||
115 | printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); | 115 | printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int bad_ioapic(unsigned long address) | ||
119 | { | ||
120 | if (nr_ioapics >= MAX_IO_APICS) { | ||
121 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | ||
122 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); | ||
123 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); | ||
124 | } | ||
125 | if (!address) { | ||
126 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" | ||
127 | " found in table, skipping!\n"); | ||
128 | return 1; | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static void __init MP_ioapic_info(struct mpc_ioapic *m) | 118 | static void __init MP_ioapic_info(struct mpc_ioapic *m) |
134 | { | 119 | { |
135 | if (!(m->flags & MPC_APIC_USABLE)) | 120 | if (!(m->flags & MPC_APIC_USABLE)) |
@@ -138,15 +123,7 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m) | |||
138 | printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", | 123 | printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", |
139 | m->apicid, m->apicver, m->apicaddr); | 124 | m->apicid, m->apicver, m->apicaddr); |
140 | 125 | ||
141 | if (bad_ioapic(m->apicaddr)) | 126 | mp_register_ioapic(m->apicid, m->apicaddr, gsi_end + 1); |
142 | return; | ||
143 | |||
144 | mp_ioapics[nr_ioapics].apicaddr = m->apicaddr; | ||
145 | mp_ioapics[nr_ioapics].apicid = m->apicid; | ||
146 | mp_ioapics[nr_ioapics].type = m->type; | ||
147 | mp_ioapics[nr_ioapics].apicver = m->apicver; | ||
148 | mp_ioapics[nr_ioapics].flags = m->flags; | ||
149 | nr_ioapics++; | ||
150 | } | 127 | } |
151 | 128 | ||
152 | static void print_MP_intsrc_info(struct mpc_intsrc *m) | 129 | static void print_MP_intsrc_info(struct mpc_intsrc *m) |
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c index 0aad8670858e..e796448f0eb5 100644 --- a/arch/x86/kernel/mrst.c +++ b/arch/x86/kernel/mrst.c | |||
@@ -237,4 +237,9 @@ void __init x86_mrst_early_setup(void) | |||
237 | x86_init.pci.fixup_irqs = x86_init_noop; | 237 | x86_init.pci.fixup_irqs = x86_init_noop; |
238 | 238 | ||
239 | legacy_pic = &null_legacy_pic; | 239 | legacy_pic = &null_legacy_pic; |
240 | |||
241 | /* Avoid searching for BIOS MP tables */ | ||
242 | x86_init.mpparse.find_smp_config = x86_init_noop; | ||
243 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | ||
244 | |||
240 | } | 245 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index eccdb57094e3..e7e35219b32f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -31,24 +31,22 @@ struct kmem_cache *task_xstate_cachep; | |||
31 | 31 | ||
32 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 32 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
33 | { | 33 | { |
34 | int ret; | ||
35 | |||
34 | *dst = *src; | 36 | *dst = *src; |
35 | if (src->thread.xstate) { | 37 | if (fpu_allocated(&src->thread.fpu)) { |
36 | dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | 38 | memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); |
37 | GFP_KERNEL); | 39 | ret = fpu_alloc(&dst->thread.fpu); |
38 | if (!dst->thread.xstate) | 40 | if (ret) |
39 | return -ENOMEM; | 41 | return ret; |
40 | WARN_ON((unsigned long)dst->thread.xstate & 15); | 42 | fpu_copy(&dst->thread.fpu, &src->thread.fpu); |
41 | memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); | ||
42 | } | 43 | } |
43 | return 0; | 44 | return 0; |
44 | } | 45 | } |
45 | 46 | ||
46 | void free_thread_xstate(struct task_struct *tsk) | 47 | void free_thread_xstate(struct task_struct *tsk) |
47 | { | 48 | { |
48 | if (tsk->thread.xstate) { | 49 | fpu_free(&tsk->thread.fpu); |
49 | kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); | ||
50 | tsk->thread.xstate = NULL; | ||
51 | } | ||
52 | } | 50 | } |
53 | 51 | ||
54 | void free_thread_info(struct thread_info *ti) | 52 | void free_thread_info(struct thread_info *ti) |
@@ -548,11 +546,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
548 | * check OSVW bit for CPUs that are not affected | 546 | * check OSVW bit for CPUs that are not affected |
549 | * by erratum #400 | 547 | * by erratum #400 |
550 | */ | 548 | */ |
551 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); | 549 | if (cpu_has(c, X86_FEATURE_OSVW)) { |
552 | if (val >= 2) { | 550 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); |
553 | rdmsrl(MSR_AMD64_OSVW_STATUS, val); | 551 | if (val >= 2) { |
554 | if (!(val & BIT(1))) | 552 | rdmsrl(MSR_AMD64_OSVW_STATUS, val); |
555 | goto no_c1e_idle; | 553 | if (!(val & BIT(1))) |
554 | goto no_c1e_idle; | ||
555 | } | ||
556 | } | 556 | } |
557 | return 1; | 557 | return 1; |
558 | } | 558 | } |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 75090c589b7a..8d128783af47 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -309,7 +309,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
309 | 309 | ||
310 | /* we're going to use this soon, after a few expensive things */ | 310 | /* we're going to use this soon, after a few expensive things */ |
311 | if (preload_fpu) | 311 | if (preload_fpu) |
312 | prefetch(next->xstate); | 312 | prefetch(next->fpu.state); |
313 | 313 | ||
314 | /* | 314 | /* |
315 | * Reload esp0. | 315 | * Reload esp0. |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 50cc84ac0a0d..3c2422a99f1f 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -388,7 +388,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
388 | 388 | ||
389 | /* we're going to use this soon, after a few expensive things */ | 389 | /* we're going to use this soon, after a few expensive things */ |
390 | if (preload_fpu) | 390 | if (preload_fpu) |
391 | prefetch(next->xstate); | 391 | prefetch(next->fpu.state); |
392 | 392 | ||
393 | /* | 393 | /* |
394 | * Reload esp0, LDT and the page table pointer: | 394 | * Reload esp0, LDT and the page table pointer: |
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/kernel/sfi.c index 34e099382651..7ded57896c0a 100644 --- a/arch/x86/kernel/sfi.c +++ b/arch/x86/kernel/sfi.c | |||
@@ -81,7 +81,6 @@ static int __init sfi_parse_cpus(struct sfi_table_header *table) | |||
81 | #endif /* CONFIG_X86_LOCAL_APIC */ | 81 | #endif /* CONFIG_X86_LOCAL_APIC */ |
82 | 82 | ||
83 | #ifdef CONFIG_X86_IO_APIC | 83 | #ifdef CONFIG_X86_IO_APIC |
84 | static u32 gsi_base; | ||
85 | 84 | ||
86 | static int __init sfi_parse_ioapic(struct sfi_table_header *table) | 85 | static int __init sfi_parse_ioapic(struct sfi_table_header *table) |
87 | { | 86 | { |
@@ -94,8 +93,7 @@ static int __init sfi_parse_ioapic(struct sfi_table_header *table) | |||
94 | pentry = (struct sfi_apic_table_entry *)sb->pentry; | 93 | pentry = (struct sfi_apic_table_entry *)sb->pentry; |
95 | 94 | ||
96 | for (i = 0; i < num; i++) { | 95 | for (i = 0; i < num; i++) { |
97 | mp_register_ioapic(i, pentry->phys_addr, gsi_base); | 96 | mp_register_ioapic(i, pentry->phys_addr, gsi_end + 1); |
98 | gsi_base += io_apic_get_redir_entries(i); | ||
99 | pentry++; | 97 | pentry++; |
100 | } | 98 | } |
101 | 99 | ||
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 86c9f91b48ae..cc2c60474fd0 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -175,6 +175,9 @@ static void add_mac_region(phys_addr_t start, unsigned long size) | |||
175 | struct tboot_mac_region *mr; | 175 | struct tboot_mac_region *mr; |
176 | phys_addr_t end = start + size; | 176 | phys_addr_t end = start + size; |
177 | 177 | ||
178 | if (tboot->num_mac_regions >= MAX_TB_MAC_REGIONS) | ||
179 | panic("tboot: Too many MAC regions\n"); | ||
180 | |||
178 | if (start && size) { | 181 | if (start && size) { |
179 | mr = &tboot->mac_regions[tboot->num_mac_regions++]; | 182 | mr = &tboot->mac_regions[tboot->num_mac_regions++]; |
180 | mr->start = round_down(start, PAGE_SIZE); | 183 | mr->start = round_down(start, PAGE_SIZE); |
@@ -184,18 +187,17 @@ static void add_mac_region(phys_addr_t start, unsigned long size) | |||
184 | 187 | ||
185 | static int tboot_setup_sleep(void) | 188 | static int tboot_setup_sleep(void) |
186 | { | 189 | { |
190 | int i; | ||
191 | |||
187 | tboot->num_mac_regions = 0; | 192 | tboot->num_mac_regions = 0; |
188 | 193 | ||
189 | /* S3 resume code */ | 194 | for (i = 0; i < e820.nr_map; i++) { |
190 | add_mac_region(acpi_wakeup_address, WAKEUP_SIZE); | 195 | if ((e820.map[i].type != E820_RAM) |
196 | && (e820.map[i].type != E820_RESERVED_KERN)) | ||
197 | continue; | ||
191 | 198 | ||
192 | #ifdef CONFIG_X86_TRAMPOLINE | 199 | add_mac_region(e820.map[i].addr, e820.map[i].size); |
193 | /* AP trampoline code */ | 200 | } |
194 | add_mac_region(virt_to_phys(trampoline_base), TRAMPOLINE_SIZE); | ||
195 | #endif | ||
196 | |||
197 | /* kernel code + data + bss */ | ||
198 | add_mac_region(virt_to_phys(_text), _end - _text); | ||
199 | 201 | ||
200 | tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address; | 202 | tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address; |
201 | 203 | ||
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 17b03dd3a6b5..7fea555929e2 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * SGI UltraViolet TLB flush routines. | 2 | * SGI UltraViolet TLB flush routines. |
3 | * | 3 | * |
4 | * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI. | 4 | * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI. |
5 | * | 5 | * |
6 | * This code is released under the GNU General Public License version 2 or | 6 | * This code is released under the GNU General Public License version 2 or |
7 | * later. | 7 | * later. |
@@ -20,42 +20,67 @@ | |||
20 | #include <asm/idle.h> | 20 | #include <asm/idle.h> |
21 | #include <asm/tsc.h> | 21 | #include <asm/tsc.h> |
22 | #include <asm/irq_vectors.h> | 22 | #include <asm/irq_vectors.h> |
23 | #include <asm/timer.h> | ||
23 | 24 | ||
24 | static struct bau_control **uv_bau_table_bases __read_mostly; | 25 | struct msg_desc { |
25 | static int uv_bau_retry_limit __read_mostly; | 26 | struct bau_payload_queue_entry *msg; |
27 | int msg_slot; | ||
28 | int sw_ack_slot; | ||
29 | struct bau_payload_queue_entry *va_queue_first; | ||
30 | struct bau_payload_queue_entry *va_queue_last; | ||
31 | }; | ||
26 | 32 | ||
27 | /* base pnode in this partition */ | 33 | #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL |
28 | static int uv_partition_base_pnode __read_mostly; | 34 | |
35 | static int uv_bau_max_concurrent __read_mostly; | ||
36 | |||
37 | static int nobau; | ||
38 | static int __init setup_nobau(char *arg) | ||
39 | { | ||
40 | nobau = 1; | ||
41 | return 0; | ||
42 | } | ||
43 | early_param("nobau", setup_nobau); | ||
29 | 44 | ||
30 | static unsigned long uv_mmask __read_mostly; | 45 | /* base pnode in this partition */ |
46 | static int uv_partition_base_pnode __read_mostly; | ||
47 | /* position of pnode (which is nasid>>1): */ | ||
48 | static int uv_nshift __read_mostly; | ||
49 | static unsigned long uv_mmask __read_mostly; | ||
31 | 50 | ||
32 | static DEFINE_PER_CPU(struct ptc_stats, ptcstats); | 51 | static DEFINE_PER_CPU(struct ptc_stats, ptcstats); |
33 | static DEFINE_PER_CPU(struct bau_control, bau_control); | 52 | static DEFINE_PER_CPU(struct bau_control, bau_control); |
53 | static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); | ||
54 | |||
55 | struct reset_args { | ||
56 | int sender; | ||
57 | }; | ||
34 | 58 | ||
35 | /* | 59 | /* |
36 | * Determine the first node on a blade. | 60 | * Determine the first node on a uvhub. 'Nodes' are used for kernel |
61 | * memory allocation. | ||
37 | */ | 62 | */ |
38 | static int __init blade_to_first_node(int blade) | 63 | static int __init uvhub_to_first_node(int uvhub) |
39 | { | 64 | { |
40 | int node, b; | 65 | int node, b; |
41 | 66 | ||
42 | for_each_online_node(node) { | 67 | for_each_online_node(node) { |
43 | b = uv_node_to_blade_id(node); | 68 | b = uv_node_to_blade_id(node); |
44 | if (blade == b) | 69 | if (uvhub == b) |
45 | return node; | 70 | return node; |
46 | } | 71 | } |
47 | return -1; /* shouldn't happen */ | 72 | return -1; |
48 | } | 73 | } |
49 | 74 | ||
50 | /* | 75 | /* |
51 | * Determine the apicid of the first cpu on a blade. | 76 | * Determine the apicid of the first cpu on a uvhub. |
52 | */ | 77 | */ |
53 | static int __init blade_to_first_apicid(int blade) | 78 | static int __init uvhub_to_first_apicid(int uvhub) |
54 | { | 79 | { |
55 | int cpu; | 80 | int cpu; |
56 | 81 | ||
57 | for_each_present_cpu(cpu) | 82 | for_each_present_cpu(cpu) |
58 | if (blade == uv_cpu_to_blade_id(cpu)) | 83 | if (uvhub == uv_cpu_to_blade_id(cpu)) |
59 | return per_cpu(x86_cpu_to_apicid, cpu); | 84 | return per_cpu(x86_cpu_to_apicid, cpu); |
60 | return -1; | 85 | return -1; |
61 | } | 86 | } |
@@ -68,195 +93,459 @@ static int __init blade_to_first_apicid(int blade) | |||
68 | * clear of the Timeout bit (as well) will free the resource. No reply will | 93 | * clear of the Timeout bit (as well) will free the resource. No reply will |
69 | * be sent (the hardware will only do one reply per message). | 94 | * be sent (the hardware will only do one reply per message). |
70 | */ | 95 | */ |
71 | static void uv_reply_to_message(int resource, | 96 | static inline void uv_reply_to_message(struct msg_desc *mdp, |
72 | struct bau_payload_queue_entry *msg, | 97 | struct bau_control *bcp) |
73 | struct bau_msg_status *msp) | ||
74 | { | 98 | { |
75 | unsigned long dw; | 99 | unsigned long dw; |
100 | struct bau_payload_queue_entry *msg; | ||
76 | 101 | ||
77 | dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource); | 102 | msg = mdp->msg; |
103 | if (!msg->canceled) { | ||
104 | dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) | | ||
105 | msg->sw_ack_vector; | ||
106 | uv_write_local_mmr( | ||
107 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); | ||
108 | } | ||
78 | msg->replied_to = 1; | 109 | msg->replied_to = 1; |
79 | msg->sw_ack_vector = 0; | 110 | msg->sw_ack_vector = 0; |
80 | if (msp) | ||
81 | msp->seen_by.bits = 0; | ||
82 | uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); | ||
83 | } | 111 | } |
84 | 112 | ||
85 | /* | 113 | /* |
86 | * Do all the things a cpu should do for a TLB shootdown message. | 114 | * Process the receipt of a RETRY message |
87 | * Other cpu's may come here at the same time for this message. | ||
88 | */ | 115 | */ |
89 | static void uv_bau_process_message(struct bau_payload_queue_entry *msg, | 116 | static inline void uv_bau_process_retry_msg(struct msg_desc *mdp, |
90 | int msg_slot, int sw_ack_slot) | 117 | struct bau_control *bcp) |
91 | { | 118 | { |
92 | unsigned long this_cpu_mask; | 119 | int i; |
93 | struct bau_msg_status *msp; | 120 | int cancel_count = 0; |
94 | int cpu; | 121 | int slot2; |
122 | unsigned long msg_res; | ||
123 | unsigned long mmr = 0; | ||
124 | struct bau_payload_queue_entry *msg; | ||
125 | struct bau_payload_queue_entry *msg2; | ||
126 | struct ptc_stats *stat; | ||
95 | 127 | ||
96 | msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; | 128 | msg = mdp->msg; |
97 | cpu = uv_blade_processor_id(); | 129 | stat = &per_cpu(ptcstats, bcp->cpu); |
98 | msg->number_of_cpus = | 130 | stat->d_retries++; |
99 | uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); | 131 | /* |
100 | this_cpu_mask = 1UL << cpu; | 132 | * cancel any message from msg+1 to the retry itself |
101 | if (msp->seen_by.bits & this_cpu_mask) | 133 | */ |
102 | return; | 134 | for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { |
103 | atomic_or_long(&msp->seen_by.bits, this_cpu_mask); | 135 | if (msg2 > mdp->va_queue_last) |
136 | msg2 = mdp->va_queue_first; | ||
137 | if (msg2 == msg) | ||
138 | break; | ||
139 | |||
140 | /* same conditions for cancellation as uv_do_reset */ | ||
141 | if ((msg2->replied_to == 0) && (msg2->canceled == 0) && | ||
142 | (msg2->sw_ack_vector) && ((msg2->sw_ack_vector & | ||
143 | msg->sw_ack_vector) == 0) && | ||
144 | (msg2->sending_cpu == msg->sending_cpu) && | ||
145 | (msg2->msg_type != MSG_NOOP)) { | ||
146 | slot2 = msg2 - mdp->va_queue_first; | ||
147 | mmr = uv_read_local_mmr | ||
148 | (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); | ||
149 | msg_res = ((msg2->sw_ack_vector << 8) | | ||
150 | msg2->sw_ack_vector); | ||
151 | /* | ||
152 | * This is a message retry; clear the resources held | ||
153 | * by the previous message only if they timed out. | ||
154 | * If it has not timed out we have an unexpected | ||
155 | * situation to report. | ||
156 | */ | ||
157 | if (mmr & (msg_res << 8)) { | ||
158 | /* | ||
159 | * is the resource timed out? | ||
160 | * make everyone ignore the cancelled message. | ||
161 | */ | ||
162 | msg2->canceled = 1; | ||
163 | stat->d_canceled++; | ||
164 | cancel_count++; | ||
165 | uv_write_local_mmr( | ||
166 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, | ||
167 | (msg_res << 8) | msg_res); | ||
168 | } else | ||
169 | printk(KERN_INFO "note bau retry: no effect\n"); | ||
170 | } | ||
171 | } | ||
172 | if (!cancel_count) | ||
173 | stat->d_nocanceled++; | ||
174 | } | ||
104 | 175 | ||
105 | if (msg->replied_to == 1) | 176 | /* |
106 | return; | 177 | * Do all the things a cpu should do for a TLB shootdown message. |
178 | * Other cpu's may come here at the same time for this message. | ||
179 | */ | ||
180 | static void uv_bau_process_message(struct msg_desc *mdp, | ||
181 | struct bau_control *bcp) | ||
182 | { | ||
183 | int msg_ack_count; | ||
184 | short socket_ack_count = 0; | ||
185 | struct ptc_stats *stat; | ||
186 | struct bau_payload_queue_entry *msg; | ||
187 | struct bau_control *smaster = bcp->socket_master; | ||
107 | 188 | ||
189 | /* | ||
190 | * This must be a normal message, or retry of a normal message | ||
191 | */ | ||
192 | msg = mdp->msg; | ||
193 | stat = &per_cpu(ptcstats, bcp->cpu); | ||
108 | if (msg->address == TLB_FLUSH_ALL) { | 194 | if (msg->address == TLB_FLUSH_ALL) { |
109 | local_flush_tlb(); | 195 | local_flush_tlb(); |
110 | __get_cpu_var(ptcstats).alltlb++; | 196 | stat->d_alltlb++; |
111 | } else { | 197 | } else { |
112 | __flush_tlb_one(msg->address); | 198 | __flush_tlb_one(msg->address); |
113 | __get_cpu_var(ptcstats).onetlb++; | 199 | stat->d_onetlb++; |
114 | } | 200 | } |
201 | stat->d_requestee++; | ||
202 | |||
203 | /* | ||
204 | * One cpu on each uvhub has the additional job on a RETRY | ||
205 | * of releasing the resource held by the message that is | ||
206 | * being retried. That message is identified by sending | ||
207 | * cpu number. | ||
208 | */ | ||
209 | if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) | ||
210 | uv_bau_process_retry_msg(mdp, bcp); | ||
115 | 211 | ||
116 | __get_cpu_var(ptcstats).requestee++; | 212 | /* |
213 | * This is a sw_ack message, so we have to reply to it. | ||
214 | * Count each responding cpu on the socket. This avoids | ||
215 | * pinging the count's cache line back and forth between | ||
216 | * the sockets. | ||
217 | */ | ||
218 | socket_ack_count = atomic_add_short_return(1, (struct atomic_short *) | ||
219 | &smaster->socket_acknowledge_count[mdp->msg_slot]); | ||
220 | if (socket_ack_count == bcp->cpus_in_socket) { | ||
221 | /* | ||
222 | * Both sockets dump their completed count total into | ||
223 | * the message's count. | ||
224 | */ | ||
225 | smaster->socket_acknowledge_count[mdp->msg_slot] = 0; | ||
226 | msg_ack_count = atomic_add_short_return(socket_ack_count, | ||
227 | (struct atomic_short *)&msg->acknowledge_count); | ||
228 | |||
229 | if (msg_ack_count == bcp->cpus_in_uvhub) { | ||
230 | /* | ||
231 | * All cpus in uvhub saw it; reply | ||
232 | */ | ||
233 | uv_reply_to_message(mdp, bcp); | ||
234 | } | ||
235 | } | ||
117 | 236 | ||
118 | atomic_inc_short(&msg->acknowledge_count); | 237 | return; |
119 | if (msg->number_of_cpus == msg->acknowledge_count) | ||
120 | uv_reply_to_message(sw_ack_slot, msg, msp); | ||
121 | } | 238 | } |
122 | 239 | ||
123 | /* | 240 | /* |
124 | * Examine the payload queue on one distribution node to see | 241 | * Determine the first cpu on a uvhub. |
125 | * which messages have not been seen, and which cpu(s) have not seen them. | 242 | */ |
243 | static int uvhub_to_first_cpu(int uvhub) | ||
244 | { | ||
245 | int cpu; | ||
246 | for_each_present_cpu(cpu) | ||
247 | if (uvhub == uv_cpu_to_blade_id(cpu)) | ||
248 | return cpu; | ||
249 | return -1; | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Last resort when we get a large number of destination timeouts is | ||
254 | * to clear resources held by a given cpu. | ||
255 | * Do this with IPI so that all messages in the BAU message queue | ||
256 | * can be identified by their nonzero sw_ack_vector field. | ||
126 | * | 257 | * |
127 | * Returns the number of cpu's that have not responded. | 258 | * This is entered for a single cpu on the uvhub. |
259 | * The sender want's this uvhub to free a specific message's | ||
260 | * sw_ack resources. | ||
128 | */ | 261 | */ |
129 | static int uv_examine_destination(struct bau_control *bau_tablesp, int sender) | 262 | static void |
263 | uv_do_reset(void *ptr) | ||
130 | { | 264 | { |
131 | struct bau_payload_queue_entry *msg; | ||
132 | struct bau_msg_status *msp; | ||
133 | int count = 0; | ||
134 | int i; | 265 | int i; |
135 | int j; | 266 | int slot; |
267 | int count = 0; | ||
268 | unsigned long mmr; | ||
269 | unsigned long msg_res; | ||
270 | struct bau_control *bcp; | ||
271 | struct reset_args *rap; | ||
272 | struct bau_payload_queue_entry *msg; | ||
273 | struct ptc_stats *stat; | ||
136 | 274 | ||
137 | for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE; | 275 | bcp = &per_cpu(bau_control, smp_processor_id()); |
138 | msg++, i++) { | 276 | rap = (struct reset_args *)ptr; |
139 | if ((msg->sending_cpu == sender) && (!msg->replied_to)) { | 277 | stat = &per_cpu(ptcstats, bcp->cpu); |
140 | msp = bau_tablesp->msg_statuses + i; | 278 | stat->d_resets++; |
141 | printk(KERN_DEBUG | 279 | |
142 | "blade %d: address:%#lx %d of %d, not cpu(s): ", | 280 | /* |
143 | i, msg->address, msg->acknowledge_count, | 281 | * We're looking for the given sender, and |
144 | msg->number_of_cpus); | 282 | * will free its sw_ack resource. |
145 | for (j = 0; j < msg->number_of_cpus; j++) { | 283 | * If all cpu's finally responded after the timeout, its |
146 | if (!((1L << j) & msp->seen_by.bits)) { | 284 | * message 'replied_to' was set. |
147 | count++; | 285 | */ |
148 | printk("%d ", j); | 286 | for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { |
149 | } | 287 | /* uv_do_reset: same conditions for cancellation as |
288 | uv_bau_process_retry_msg() */ | ||
289 | if ((msg->replied_to == 0) && | ||
290 | (msg->canceled == 0) && | ||
291 | (msg->sending_cpu == rap->sender) && | ||
292 | (msg->sw_ack_vector) && | ||
293 | (msg->msg_type != MSG_NOOP)) { | ||
294 | /* | ||
295 | * make everyone else ignore this message | ||
296 | */ | ||
297 | msg->canceled = 1; | ||
298 | slot = msg - bcp->va_queue_first; | ||
299 | count++; | ||
300 | /* | ||
301 | * only reset the resource if it is still pending | ||
302 | */ | ||
303 | mmr = uv_read_local_mmr | ||
304 | (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); | ||
305 | msg_res = ((msg->sw_ack_vector << 8) | | ||
306 | msg->sw_ack_vector); | ||
307 | if (mmr & msg_res) { | ||
308 | stat->d_rcanceled++; | ||
309 | uv_write_local_mmr( | ||
310 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, | ||
311 | msg_res); | ||
150 | } | 312 | } |
151 | printk("\n"); | ||
152 | } | 313 | } |
153 | } | 314 | } |
154 | return count; | 315 | return; |
155 | } | 316 | } |
156 | 317 | ||
157 | /* | 318 | /* |
158 | * Examine the payload queue on all the distribution nodes to see | 319 | * Use IPI to get all target uvhubs to release resources held by |
159 | * which messages have not been seen, and which cpu(s) have not seen them. | 320 | * a given sending cpu number. |
160 | * | ||
161 | * Returns the number of cpu's that have not responded. | ||
162 | */ | 321 | */ |
163 | static int uv_examine_destinations(struct bau_target_nodemask *distribution) | 322 | static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution, |
323 | int sender) | ||
164 | { | 324 | { |
165 | int sender; | 325 | int uvhub; |
166 | int i; | 326 | int cpu; |
167 | int count = 0; | 327 | cpumask_t mask; |
328 | struct reset_args reset_args; | ||
329 | |||
330 | reset_args.sender = sender; | ||
168 | 331 | ||
169 | sender = smp_processor_id(); | 332 | cpus_clear(mask); |
170 | for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) { | 333 | /* find a single cpu for each uvhub in this distribution mask */ |
171 | if (!bau_node_isset(i, distribution)) | 334 | for (uvhub = 0; |
335 | uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE; | ||
336 | uvhub++) { | ||
337 | if (!bau_uvhub_isset(uvhub, distribution)) | ||
172 | continue; | 338 | continue; |
173 | count += uv_examine_destination(uv_bau_table_bases[i], sender); | 339 | /* find a cpu for this uvhub */ |
340 | cpu = uvhub_to_first_cpu(uvhub); | ||
341 | cpu_set(cpu, mask); | ||
174 | } | 342 | } |
175 | return count; | 343 | /* IPI all cpus; Preemption is already disabled */ |
344 | smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1); | ||
345 | return; | ||
346 | } | ||
347 | |||
348 | static inline unsigned long | ||
349 | cycles_2_us(unsigned long long cyc) | ||
350 | { | ||
351 | unsigned long long ns; | ||
352 | unsigned long us; | ||
353 | ns = (cyc * per_cpu(cyc2ns, smp_processor_id())) | ||
354 | >> CYC2NS_SCALE_FACTOR; | ||
355 | us = ns / 1000; | ||
356 | return us; | ||
176 | } | 357 | } |
177 | 358 | ||
178 | /* | 359 | /* |
179 | * wait for completion of a broadcast message | 360 | * wait for all cpus on this hub to finish their sends and go quiet |
180 | * | 361 | * leaves uvhub_quiesce set so that no new broadcasts are started by |
181 | * return COMPLETE, RETRY or GIVEUP | 362 | * bau_flush_send_and_wait() |
363 | */ | ||
364 | static inline void | ||
365 | quiesce_local_uvhub(struct bau_control *hmaster) | ||
366 | { | ||
367 | atomic_add_short_return(1, (struct atomic_short *) | ||
368 | &hmaster->uvhub_quiesce); | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * mark this quiet-requestor as done | ||
373 | */ | ||
374 | static inline void | ||
375 | end_uvhub_quiesce(struct bau_control *hmaster) | ||
376 | { | ||
377 | atomic_add_short_return(-1, (struct atomic_short *) | ||
378 | &hmaster->uvhub_quiesce); | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Wait for completion of a broadcast software ack message | ||
383 | * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP | ||
182 | */ | 384 | */ |
183 | static int uv_wait_completion(struct bau_desc *bau_desc, | 385 | static int uv_wait_completion(struct bau_desc *bau_desc, |
184 | unsigned long mmr_offset, int right_shift) | 386 | unsigned long mmr_offset, int right_shift, int this_cpu, |
387 | struct bau_control *bcp, struct bau_control *smaster, long try) | ||
185 | { | 388 | { |
186 | int exams = 0; | 389 | int relaxes = 0; |
187 | long destination_timeouts = 0; | ||
188 | long source_timeouts = 0; | ||
189 | unsigned long descriptor_status; | 390 | unsigned long descriptor_status; |
391 | unsigned long mmr; | ||
392 | unsigned long mask; | ||
393 | cycles_t ttime; | ||
394 | cycles_t timeout_time; | ||
395 | struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu); | ||
396 | struct bau_control *hmaster; | ||
397 | |||
398 | hmaster = bcp->uvhub_master; | ||
399 | timeout_time = get_cycles() + bcp->timeout_interval; | ||
190 | 400 | ||
401 | /* spin on the status MMR, waiting for it to go idle */ | ||
191 | while ((descriptor_status = (((unsigned long) | 402 | while ((descriptor_status = (((unsigned long) |
192 | uv_read_local_mmr(mmr_offset) >> | 403 | uv_read_local_mmr(mmr_offset) >> |
193 | right_shift) & UV_ACT_STATUS_MASK)) != | 404 | right_shift) & UV_ACT_STATUS_MASK)) != |
194 | DESC_STATUS_IDLE) { | 405 | DESC_STATUS_IDLE) { |
195 | if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { | ||
196 | source_timeouts++; | ||
197 | if (source_timeouts > SOURCE_TIMEOUT_LIMIT) | ||
198 | source_timeouts = 0; | ||
199 | __get_cpu_var(ptcstats).s_retry++; | ||
200 | return FLUSH_RETRY; | ||
201 | } | ||
202 | /* | 406 | /* |
203 | * spin here looking for progress at the destinations | 407 | * Our software ack messages may be blocked because there are |
408 | * no swack resources available. As long as none of them | ||
409 | * has timed out hardware will NACK our message and its | ||
410 | * state will stay IDLE. | ||
204 | */ | 411 | */ |
205 | if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) { | 412 | if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { |
206 | destination_timeouts++; | 413 | stat->s_stimeout++; |
207 | if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) { | 414 | return FLUSH_GIVEUP; |
208 | /* | 415 | } else if (descriptor_status == |
209 | * returns number of cpus not responding | 416 | DESC_STATUS_DESTINATION_TIMEOUT) { |
210 | */ | 417 | stat->s_dtimeout++; |
211 | if (uv_examine_destinations | 418 | ttime = get_cycles(); |
212 | (&bau_desc->distribution) == 0) { | 419 | |
213 | __get_cpu_var(ptcstats).d_retry++; | 420 | /* |
214 | return FLUSH_RETRY; | 421 | * Our retries may be blocked by all destination |
215 | } | 422 | * swack resources being consumed, and a timeout |
216 | exams++; | 423 | * pending. In that case hardware returns the |
217 | if (exams >= uv_bau_retry_limit) { | 424 | * ERROR that looks like a destination timeout. |
218 | printk(KERN_DEBUG | 425 | */ |
219 | "uv_flush_tlb_others"); | 426 | if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) { |
220 | printk("giving up on cpu %d\n", | 427 | bcp->conseccompletes = 0; |
221 | smp_processor_id()); | 428 | return FLUSH_RETRY_PLUGGED; |
429 | } | ||
430 | |||
431 | bcp->conseccompletes = 0; | ||
432 | return FLUSH_RETRY_TIMEOUT; | ||
433 | } else { | ||
434 | /* | ||
435 | * descriptor_status is still BUSY | ||
436 | */ | ||
437 | cpu_relax(); | ||
438 | relaxes++; | ||
439 | if (relaxes >= 10000) { | ||
440 | relaxes = 0; | ||
441 | if (get_cycles() > timeout_time) { | ||
442 | quiesce_local_uvhub(hmaster); | ||
443 | |||
444 | /* single-thread the register change */ | ||
445 | spin_lock(&hmaster->masks_lock); | ||
446 | mmr = uv_read_local_mmr(mmr_offset); | ||
447 | mask = 0UL; | ||
448 | mask |= (3UL < right_shift); | ||
449 | mask = ~mask; | ||
450 | mmr &= mask; | ||
451 | uv_write_local_mmr(mmr_offset, mmr); | ||
452 | spin_unlock(&hmaster->masks_lock); | ||
453 | end_uvhub_quiesce(hmaster); | ||
454 | stat->s_busy++; | ||
222 | return FLUSH_GIVEUP; | 455 | return FLUSH_GIVEUP; |
223 | } | 456 | } |
224 | /* | ||
225 | * delays can hang the simulator | ||
226 | udelay(1000); | ||
227 | */ | ||
228 | destination_timeouts = 0; | ||
229 | } | 457 | } |
230 | } | 458 | } |
231 | cpu_relax(); | ||
232 | } | 459 | } |
460 | bcp->conseccompletes++; | ||
233 | return FLUSH_COMPLETE; | 461 | return FLUSH_COMPLETE; |
234 | } | 462 | } |
235 | 463 | ||
464 | static inline cycles_t | ||
465 | sec_2_cycles(unsigned long sec) | ||
466 | { | ||
467 | unsigned long ns; | ||
468 | cycles_t cyc; | ||
469 | |||
470 | ns = sec * 1000000000; | ||
471 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); | ||
472 | return cyc; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * conditionally add 1 to *v, unless *v is >= u | ||
477 | * return 0 if we cannot add 1 to *v because it is >= u | ||
478 | * return 1 if we can add 1 to *v because it is < u | ||
479 | * the add is atomic | ||
480 | * | ||
481 | * This is close to atomic_add_unless(), but this allows the 'u' value | ||
482 | * to be lowered below the current 'v'. atomic_add_unless can only stop | ||
483 | * on equal. | ||
484 | */ | ||
485 | static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) | ||
486 | { | ||
487 | spin_lock(lock); | ||
488 | if (atomic_read(v) >= u) { | ||
489 | spin_unlock(lock); | ||
490 | return 0; | ||
491 | } | ||
492 | atomic_inc(v); | ||
493 | spin_unlock(lock); | ||
494 | return 1; | ||
495 | } | ||
496 | |||
236 | /** | 497 | /** |
237 | * uv_flush_send_and_wait | 498 | * uv_flush_send_and_wait |
238 | * | 499 | * |
239 | * Send a broadcast and wait for a broadcast message to complete. | 500 | * Send a broadcast and wait for it to complete. |
240 | * | 501 | * |
241 | * The flush_mask contains the cpus the broadcast was sent to. | 502 | * The flush_mask contains the cpus the broadcast is to be sent to, plus |
503 | * cpus that are on the local uvhub. | ||
242 | * | 504 | * |
243 | * Returns NULL if all remote flushing was done. The mask is zeroed. | 505 | * Returns NULL if all flushing represented in the mask was done. The mask |
506 | * is zeroed. | ||
244 | * Returns @flush_mask if some remote flushing remains to be done. The | 507 | * Returns @flush_mask if some remote flushing remains to be done. The |
245 | * mask will have some bits still set. | 508 | * mask will have some bits still set, representing any cpus on the local |
509 | * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed. | ||
246 | */ | 510 | */ |
247 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, | 511 | const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc, |
248 | struct bau_desc *bau_desc, | 512 | struct cpumask *flush_mask, |
249 | struct cpumask *flush_mask) | 513 | struct bau_control *bcp) |
250 | { | 514 | { |
251 | int completion_status = 0; | ||
252 | int right_shift; | 515 | int right_shift; |
253 | int tries = 0; | 516 | int uvhub; |
254 | int pnode; | ||
255 | int bit; | 517 | int bit; |
518 | int completion_status = 0; | ||
519 | int seq_number = 0; | ||
520 | long try = 0; | ||
521 | int cpu = bcp->uvhub_cpu; | ||
522 | int this_cpu = bcp->cpu; | ||
523 | int this_uvhub = bcp->uvhub; | ||
256 | unsigned long mmr_offset; | 524 | unsigned long mmr_offset; |
257 | unsigned long index; | 525 | unsigned long index; |
258 | cycles_t time1; | 526 | cycles_t time1; |
259 | cycles_t time2; | 527 | cycles_t time2; |
528 | struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu); | ||
529 | struct bau_control *smaster = bcp->socket_master; | ||
530 | struct bau_control *hmaster = bcp->uvhub_master; | ||
531 | |||
532 | /* | ||
533 | * Spin here while there are hmaster->max_concurrent or more active | ||
534 | * descriptors. This is the per-uvhub 'throttle'. | ||
535 | */ | ||
536 | if (!atomic_inc_unless_ge(&hmaster->uvhub_lock, | ||
537 | &hmaster->active_descriptor_count, | ||
538 | hmaster->max_concurrent)) { | ||
539 | stat->s_throttles++; | ||
540 | do { | ||
541 | cpu_relax(); | ||
542 | } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock, | ||
543 | &hmaster->active_descriptor_count, | ||
544 | hmaster->max_concurrent)); | ||
545 | } | ||
546 | |||
547 | while (hmaster->uvhub_quiesce) | ||
548 | cpu_relax(); | ||
260 | 549 | ||
261 | if (cpu < UV_CPUS_PER_ACT_STATUS) { | 550 | if (cpu < UV_CPUS_PER_ACT_STATUS) { |
262 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; | 551 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; |
@@ -268,24 +557,108 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, | |||
268 | } | 557 | } |
269 | time1 = get_cycles(); | 558 | time1 = get_cycles(); |
270 | do { | 559 | do { |
271 | tries++; | 560 | /* |
561 | * Every message from any given cpu gets a unique message | ||
562 | * sequence number. But retries use that same number. | ||
563 | * Our message may have timed out at the destination because | ||
564 | * all sw-ack resources are in use and there is a timeout | ||
565 | * pending there. In that case, our last send never got | ||
566 | * placed into the queue and we need to persist until it | ||
567 | * does. | ||
568 | * | ||
569 | * Make any retry a type MSG_RETRY so that the destination will | ||
570 | * free any resource held by a previous message from this cpu. | ||
571 | */ | ||
572 | if (try == 0) { | ||
573 | /* use message type set by the caller the first time */ | ||
574 | seq_number = bcp->message_number++; | ||
575 | } else { | ||
576 | /* use RETRY type on all the rest; same sequence */ | ||
577 | bau_desc->header.msg_type = MSG_RETRY; | ||
578 | stat->s_retry_messages++; | ||
579 | } | ||
580 | bau_desc->header.sequence = seq_number; | ||
272 | index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | | 581 | index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | |
273 | cpu; | 582 | bcp->uvhub_cpu; |
583 | bcp->send_message = get_cycles(); | ||
584 | |||
274 | uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); | 585 | uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); |
586 | |||
587 | try++; | ||
275 | completion_status = uv_wait_completion(bau_desc, mmr_offset, | 588 | completion_status = uv_wait_completion(bau_desc, mmr_offset, |
276 | right_shift); | 589 | right_shift, this_cpu, bcp, smaster, try); |
277 | } while (completion_status == FLUSH_RETRY); | 590 | |
591 | if (completion_status == FLUSH_RETRY_PLUGGED) { | ||
592 | /* | ||
593 | * Our retries may be blocked by all destination swack | ||
594 | * resources being consumed, and a timeout pending. In | ||
595 | * that case hardware immediately returns the ERROR | ||
596 | * that looks like a destination timeout. | ||
597 | */ | ||
598 | udelay(TIMEOUT_DELAY); | ||
599 | bcp->plugged_tries++; | ||
600 | if (bcp->plugged_tries >= PLUGSB4RESET) { | ||
601 | bcp->plugged_tries = 0; | ||
602 | quiesce_local_uvhub(hmaster); | ||
603 | spin_lock(&hmaster->queue_lock); | ||
604 | uv_reset_with_ipi(&bau_desc->distribution, | ||
605 | this_cpu); | ||
606 | spin_unlock(&hmaster->queue_lock); | ||
607 | end_uvhub_quiesce(hmaster); | ||
608 | bcp->ipi_attempts++; | ||
609 | stat->s_resets_plug++; | ||
610 | } | ||
611 | } else if (completion_status == FLUSH_RETRY_TIMEOUT) { | ||
612 | hmaster->max_concurrent = 1; | ||
613 | bcp->timeout_tries++; | ||
614 | udelay(TIMEOUT_DELAY); | ||
615 | if (bcp->timeout_tries >= TIMEOUTSB4RESET) { | ||
616 | bcp->timeout_tries = 0; | ||
617 | quiesce_local_uvhub(hmaster); | ||
618 | spin_lock(&hmaster->queue_lock); | ||
619 | uv_reset_with_ipi(&bau_desc->distribution, | ||
620 | this_cpu); | ||
621 | spin_unlock(&hmaster->queue_lock); | ||
622 | end_uvhub_quiesce(hmaster); | ||
623 | bcp->ipi_attempts++; | ||
624 | stat->s_resets_timeout++; | ||
625 | } | ||
626 | } | ||
627 | if (bcp->ipi_attempts >= 3) { | ||
628 | bcp->ipi_attempts = 0; | ||
629 | completion_status = FLUSH_GIVEUP; | ||
630 | break; | ||
631 | } | ||
632 | cpu_relax(); | ||
633 | } while ((completion_status == FLUSH_RETRY_PLUGGED) || | ||
634 | (completion_status == FLUSH_RETRY_TIMEOUT)); | ||
278 | time2 = get_cycles(); | 635 | time2 = get_cycles(); |
279 | __get_cpu_var(ptcstats).sflush += (time2 - time1); | ||
280 | if (tries > 1) | ||
281 | __get_cpu_var(ptcstats).retriesok++; | ||
282 | 636 | ||
283 | if (completion_status == FLUSH_GIVEUP) { | 637 | if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5) |
638 | && (hmaster->max_concurrent < hmaster->max_concurrent_constant)) | ||
639 | hmaster->max_concurrent++; | ||
640 | |||
641 | /* | ||
642 | * hold any cpu not timing out here; no other cpu currently held by | ||
643 | * the 'throttle' should enter the activation code | ||
644 | */ | ||
645 | while (hmaster->uvhub_quiesce) | ||
646 | cpu_relax(); | ||
647 | atomic_dec(&hmaster->active_descriptor_count); | ||
648 | |||
649 | /* guard against cycles wrap */ | ||
650 | if (time2 > time1) | ||
651 | stat->s_time += (time2 - time1); | ||
652 | else | ||
653 | stat->s_requestor--; /* don't count this one */ | ||
654 | if (completion_status == FLUSH_COMPLETE && try > 1) | ||
655 | stat->s_retriesok++; | ||
656 | else if (completion_status == FLUSH_GIVEUP) { | ||
284 | /* | 657 | /* |
285 | * Cause the caller to do an IPI-style TLB shootdown on | 658 | * Cause the caller to do an IPI-style TLB shootdown on |
286 | * the cpu's, all of which are still in the mask. | 659 | * the target cpu's, all of which are still in the mask. |
287 | */ | 660 | */ |
288 | __get_cpu_var(ptcstats).ptc_i++; | 661 | stat->s_giveup++; |
289 | return flush_mask; | 662 | return flush_mask; |
290 | } | 663 | } |
291 | 664 | ||
@@ -294,18 +667,17 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, | |||
294 | * use the IPI method of shootdown on them. | 667 | * use the IPI method of shootdown on them. |
295 | */ | 668 | */ |
296 | for_each_cpu(bit, flush_mask) { | 669 | for_each_cpu(bit, flush_mask) { |
297 | pnode = uv_cpu_to_pnode(bit); | 670 | uvhub = uv_cpu_to_blade_id(bit); |
298 | if (pnode == this_pnode) | 671 | if (uvhub == this_uvhub) |
299 | continue; | 672 | continue; |
300 | cpumask_clear_cpu(bit, flush_mask); | 673 | cpumask_clear_cpu(bit, flush_mask); |
301 | } | 674 | } |
302 | if (!cpumask_empty(flush_mask)) | 675 | if (!cpumask_empty(flush_mask)) |
303 | return flush_mask; | 676 | return flush_mask; |
677 | |||
304 | return NULL; | 678 | return NULL; |
305 | } | 679 | } |
306 | 680 | ||
307 | static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); | ||
308 | |||
309 | /** | 681 | /** |
310 | * uv_flush_tlb_others - globally purge translation cache of a virtual | 682 | * uv_flush_tlb_others - globally purge translation cache of a virtual |
311 | * address or all TLB's | 683 | * address or all TLB's |
@@ -322,8 +694,8 @@ static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); | |||
322 | * The caller has derived the cpumask from the mm_struct. This function | 694 | * The caller has derived the cpumask from the mm_struct. This function |
323 | * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) | 695 | * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) |
324 | * | 696 | * |
325 | * The cpumask is converted into a nodemask of the nodes containing | 697 | * The cpumask is converted into a uvhubmask of the uvhubs containing |
326 | * the cpus. | 698 | * those cpus. |
327 | * | 699 | * |
328 | * Note that this function should be called with preemption disabled. | 700 | * Note that this function should be called with preemption disabled. |
329 | * | 701 | * |
@@ -335,52 +707,82 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
335 | struct mm_struct *mm, | 707 | struct mm_struct *mm, |
336 | unsigned long va, unsigned int cpu) | 708 | unsigned long va, unsigned int cpu) |
337 | { | 709 | { |
338 | struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); | 710 | int remotes; |
339 | int i; | 711 | int tcpu; |
340 | int bit; | 712 | int uvhub; |
341 | int pnode; | ||
342 | int uv_cpu; | ||
343 | int this_pnode; | ||
344 | int locals = 0; | 713 | int locals = 0; |
345 | struct bau_desc *bau_desc; | 714 | struct bau_desc *bau_desc; |
715 | struct cpumask *flush_mask; | ||
716 | struct ptc_stats *stat; | ||
717 | struct bau_control *bcp; | ||
346 | 718 | ||
347 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | 719 | if (nobau) |
720 | return cpumask; | ||
348 | 721 | ||
349 | uv_cpu = uv_blade_processor_id(); | 722 | bcp = &per_cpu(bau_control, cpu); |
350 | this_pnode = uv_hub_info->pnode; | 723 | /* |
351 | bau_desc = __get_cpu_var(bau_control).descriptor_base; | 724 | * Each sending cpu has a per-cpu mask which it fills from the caller's |
352 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; | 725 | * cpu mask. Only remote cpus are converted to uvhubs and copied. |
726 | */ | ||
727 | flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); | ||
728 | /* | ||
729 | * copy cpumask to flush_mask, removing current cpu | ||
730 | * (current cpu should already have been flushed by the caller and | ||
731 | * should never be returned if we return flush_mask) | ||
732 | */ | ||
733 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | ||
734 | if (cpu_isset(cpu, *cpumask)) | ||
735 | locals++; /* current cpu was targeted */ | ||
353 | 736 | ||
354 | bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | 737 | bau_desc = bcp->descriptor_base; |
738 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; | ||
355 | 739 | ||
356 | i = 0; | 740 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
357 | for_each_cpu(bit, flush_mask) { | 741 | remotes = 0; |
358 | pnode = uv_cpu_to_pnode(bit); | 742 | for_each_cpu(tcpu, flush_mask) { |
359 | BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); | 743 | uvhub = uv_cpu_to_blade_id(tcpu); |
360 | if (pnode == this_pnode) { | 744 | if (uvhub == bcp->uvhub) { |
361 | locals++; | 745 | locals++; |
362 | continue; | 746 | continue; |
363 | } | 747 | } |
364 | bau_node_set(pnode - uv_partition_base_pnode, | 748 | bau_uvhub_set(uvhub, &bau_desc->distribution); |
365 | &bau_desc->distribution); | 749 | remotes++; |
366 | i++; | ||
367 | } | 750 | } |
368 | if (i == 0) { | 751 | if (remotes == 0) { |
369 | /* | 752 | /* |
370 | * no off_node flushing; return status for local node | 753 | * No off_hub flushing; return status for local hub. |
754 | * Return the caller's mask if all were local (the current | ||
755 | * cpu may be in that mask). | ||
371 | */ | 756 | */ |
372 | if (locals) | 757 | if (locals) |
373 | return flush_mask; | 758 | return cpumask; |
374 | else | 759 | else |
375 | return NULL; | 760 | return NULL; |
376 | } | 761 | } |
377 | __get_cpu_var(ptcstats).requestor++; | 762 | stat = &per_cpu(ptcstats, cpu); |
378 | __get_cpu_var(ptcstats).ntargeted += i; | 763 | stat->s_requestor++; |
764 | stat->s_ntargcpu += remotes; | ||
765 | remotes = bau_uvhub_weight(&bau_desc->distribution); | ||
766 | stat->s_ntarguvhub += remotes; | ||
767 | if (remotes >= 16) | ||
768 | stat->s_ntarguvhub16++; | ||
769 | else if (remotes >= 8) | ||
770 | stat->s_ntarguvhub8++; | ||
771 | else if (remotes >= 4) | ||
772 | stat->s_ntarguvhub4++; | ||
773 | else if (remotes >= 2) | ||
774 | stat->s_ntarguvhub2++; | ||
775 | else | ||
776 | stat->s_ntarguvhub1++; | ||
379 | 777 | ||
380 | bau_desc->payload.address = va; | 778 | bau_desc->payload.address = va; |
381 | bau_desc->payload.sending_cpu = cpu; | 779 | bau_desc->payload.sending_cpu = cpu; |
382 | 780 | ||
383 | return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); | 781 | /* |
782 | * uv_flush_send_and_wait returns null if all cpu's were messaged, or | ||
783 | * the adjusted flush_mask if any cpu's were not messaged. | ||
784 | */ | ||
785 | return uv_flush_send_and_wait(bau_desc, flush_mask, bcp); | ||
384 | } | 786 | } |
385 | 787 | ||
386 | /* | 788 | /* |
@@ -389,87 +791,70 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
389 | * | 791 | * |
390 | * We received a broadcast assist message. | 792 | * We received a broadcast assist message. |
391 | * | 793 | * |
392 | * Interrupts may have been disabled; this interrupt could represent | 794 | * Interrupts are disabled; this interrupt could represent |
393 | * the receipt of several messages. | 795 | * the receipt of several messages. |
394 | * | 796 | * |
395 | * All cores/threads on this node get this interrupt. | 797 | * All cores/threads on this hub get this interrupt. |
396 | * The last one to see it does the s/w ack. | 798 | * The last one to see it does the software ack. |
397 | * (the resource will not be freed until noninterruptable cpus see this | 799 | * (the resource will not be freed until noninterruptable cpus see this |
398 | * interrupt; hardware will timeout the s/w ack and reply ERROR) | 800 | * interrupt; hardware may timeout the s/w ack and reply ERROR) |
399 | */ | 801 | */ |
400 | void uv_bau_message_interrupt(struct pt_regs *regs) | 802 | void uv_bau_message_interrupt(struct pt_regs *regs) |
401 | { | 803 | { |
402 | struct bau_payload_queue_entry *va_queue_first; | ||
403 | struct bau_payload_queue_entry *va_queue_last; | ||
404 | struct bau_payload_queue_entry *msg; | ||
405 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
406 | cycles_t time1; | ||
407 | cycles_t time2; | ||
408 | int msg_slot; | ||
409 | int sw_ack_slot; | ||
410 | int fw; | ||
411 | int count = 0; | 804 | int count = 0; |
412 | unsigned long local_pnode; | 805 | cycles_t time_start; |
413 | 806 | struct bau_payload_queue_entry *msg; | |
414 | ack_APIC_irq(); | 807 | struct bau_control *bcp; |
415 | exit_idle(); | 808 | struct ptc_stats *stat; |
416 | irq_enter(); | 809 | struct msg_desc msgdesc; |
417 | 810 | ||
418 | time1 = get_cycles(); | 811 | time_start = get_cycles(); |
419 | 812 | bcp = &per_cpu(bau_control, smp_processor_id()); | |
420 | local_pnode = uv_blade_to_pnode(uv_numa_blade_id()); | 813 | stat = &per_cpu(ptcstats, smp_processor_id()); |
421 | 814 | msgdesc.va_queue_first = bcp->va_queue_first; | |
422 | va_queue_first = __get_cpu_var(bau_control).va_queue_first; | 815 | msgdesc.va_queue_last = bcp->va_queue_last; |
423 | va_queue_last = __get_cpu_var(bau_control).va_queue_last; | 816 | msg = bcp->bau_msg_head; |
424 | |||
425 | msg = __get_cpu_var(bau_control).bau_msg_head; | ||
426 | while (msg->sw_ack_vector) { | 817 | while (msg->sw_ack_vector) { |
427 | count++; | 818 | count++; |
428 | fw = msg->sw_ack_vector; | 819 | msgdesc.msg_slot = msg - msgdesc.va_queue_first; |
429 | msg_slot = msg - va_queue_first; | 820 | msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1; |
430 | sw_ack_slot = ffs(fw) - 1; | 821 | msgdesc.msg = msg; |
431 | 822 | uv_bau_process_message(&msgdesc, bcp); | |
432 | uv_bau_process_message(msg, msg_slot, sw_ack_slot); | ||
433 | |||
434 | msg++; | 823 | msg++; |
435 | if (msg > va_queue_last) | 824 | if (msg > msgdesc.va_queue_last) |
436 | msg = va_queue_first; | 825 | msg = msgdesc.va_queue_first; |
437 | __get_cpu_var(bau_control).bau_msg_head = msg; | 826 | bcp->bau_msg_head = msg; |
438 | } | 827 | } |
828 | stat->d_time += (get_cycles() - time_start); | ||
439 | if (!count) | 829 | if (!count) |
440 | __get_cpu_var(ptcstats).nomsg++; | 830 | stat->d_nomsg++; |
441 | else if (count > 1) | 831 | else if (count > 1) |
442 | __get_cpu_var(ptcstats).multmsg++; | 832 | stat->d_multmsg++; |
443 | 833 | ack_APIC_irq(); | |
444 | time2 = get_cycles(); | ||
445 | __get_cpu_var(ptcstats).dflush += (time2 - time1); | ||
446 | |||
447 | irq_exit(); | ||
448 | set_irq_regs(old_regs); | ||
449 | } | 834 | } |
450 | 835 | ||
451 | /* | 836 | /* |
452 | * uv_enable_timeouts | 837 | * uv_enable_timeouts |
453 | * | 838 | * |
454 | * Each target blade (i.e. blades that have cpu's) needs to have | 839 | * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have |
455 | * shootdown message timeouts enabled. The timeout does not cause | 840 | * shootdown message timeouts enabled. The timeout does not cause |
456 | * an interrupt, but causes an error message to be returned to | 841 | * an interrupt, but causes an error message to be returned to |
457 | * the sender. | 842 | * the sender. |
458 | */ | 843 | */ |
459 | static void uv_enable_timeouts(void) | 844 | static void uv_enable_timeouts(void) |
460 | { | 845 | { |
461 | int blade; | 846 | int uvhub; |
462 | int nblades; | 847 | int nuvhubs; |
463 | int pnode; | 848 | int pnode; |
464 | unsigned long mmr_image; | 849 | unsigned long mmr_image; |
465 | 850 | ||
466 | nblades = uv_num_possible_blades(); | 851 | nuvhubs = uv_num_possible_blades(); |
467 | 852 | ||
468 | for (blade = 0; blade < nblades; blade++) { | 853 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
469 | if (!uv_blade_nr_possible_cpus(blade)) | 854 | if (!uv_blade_nr_possible_cpus(uvhub)) |
470 | continue; | 855 | continue; |
471 | 856 | ||
472 | pnode = uv_blade_to_pnode(blade); | 857 | pnode = uv_blade_to_pnode(uvhub); |
473 | mmr_image = | 858 | mmr_image = |
474 | uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); | 859 | uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); |
475 | /* | 860 | /* |
@@ -479,16 +864,16 @@ static void uv_enable_timeouts(void) | |||
479 | * To program the period, the SOFT_ACK_MODE must be off. | 864 | * To program the period, the SOFT_ACK_MODE must be off. |
480 | */ | 865 | */ |
481 | mmr_image &= ~((unsigned long)1 << | 866 | mmr_image &= ~((unsigned long)1 << |
482 | UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); | 867 | UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); |
483 | uv_write_global_mmr64 | 868 | uv_write_global_mmr64 |
484 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | 869 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
485 | /* | 870 | /* |
486 | * Set the 4-bit period. | 871 | * Set the 4-bit period. |
487 | */ | 872 | */ |
488 | mmr_image &= ~((unsigned long)0xf << | 873 | mmr_image &= ~((unsigned long)0xf << |
489 | UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); | 874 | UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT); |
490 | mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << | 875 | mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << |
491 | UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); | 876 | UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT); |
492 | uv_write_global_mmr64 | 877 | uv_write_global_mmr64 |
493 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | 878 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
494 | /* | 879 | /* |
@@ -497,7 +882,7 @@ static void uv_enable_timeouts(void) | |||
497 | * indicated in bits 2:0 (7 causes all of them to timeout). | 882 | * indicated in bits 2:0 (7 causes all of them to timeout). |
498 | */ | 883 | */ |
499 | mmr_image |= ((unsigned long)1 << | 884 | mmr_image |= ((unsigned long)1 << |
500 | UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); | 885 | UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); |
501 | uv_write_global_mmr64 | 886 | uv_write_global_mmr64 |
502 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | 887 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
503 | } | 888 | } |
@@ -522,9 +907,20 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data) | |||
522 | { | 907 | { |
523 | } | 908 | } |
524 | 909 | ||
910 | static inline unsigned long long | ||
911 | millisec_2_cycles(unsigned long millisec) | ||
912 | { | ||
913 | unsigned long ns; | ||
914 | unsigned long long cyc; | ||
915 | |||
916 | ns = millisec * 1000; | ||
917 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); | ||
918 | return cyc; | ||
919 | } | ||
920 | |||
525 | /* | 921 | /* |
526 | * Display the statistics thru /proc | 922 | * Display the statistics thru /proc. |
527 | * data points to the cpu number | 923 | * 'data' points to the cpu number |
528 | */ | 924 | */ |
529 | static int uv_ptc_seq_show(struct seq_file *file, void *data) | 925 | static int uv_ptc_seq_show(struct seq_file *file, void *data) |
530 | { | 926 | { |
@@ -535,78 +931,155 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data) | |||
535 | 931 | ||
536 | if (!cpu) { | 932 | if (!cpu) { |
537 | seq_printf(file, | 933 | seq_printf(file, |
538 | "# cpu requestor requestee one all sretry dretry ptc_i "); | 934 | "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 "); |
539 | seq_printf(file, | 935 | seq_printf(file, |
540 | "sw_ack sflush dflush sok dnomsg dmult starget\n"); | 936 | "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto "); |
937 | seq_printf(file, | ||
938 | "retries rok resetp resett giveup sto bz throt "); | ||
939 | seq_printf(file, | ||
940 | "sw_ack recv rtime all "); | ||
941 | seq_printf(file, | ||
942 | "one mult none retry canc nocan reset rcan\n"); | ||
541 | } | 943 | } |
542 | if (cpu < num_possible_cpus() && cpu_online(cpu)) { | 944 | if (cpu < num_possible_cpus() && cpu_online(cpu)) { |
543 | stat = &per_cpu(ptcstats, cpu); | 945 | stat = &per_cpu(ptcstats, cpu); |
544 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ", | 946 | /* source side statistics */ |
545 | cpu, stat->requestor, | 947 | seq_printf(file, |
546 | stat->requestee, stat->onetlb, stat->alltlb, | 948 | "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", |
547 | stat->s_retry, stat->d_retry, stat->ptc_i); | 949 | cpu, stat->s_requestor, cycles_2_us(stat->s_time), |
548 | seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", | 950 | stat->s_ntarguvhub, stat->s_ntarguvhub16, |
951 | stat->s_ntarguvhub8, stat->s_ntarguvhub4, | ||
952 | stat->s_ntarguvhub2, stat->s_ntarguvhub1, | ||
953 | stat->s_ntargcpu, stat->s_dtimeout); | ||
954 | seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", | ||
955 | stat->s_retry_messages, stat->s_retriesok, | ||
956 | stat->s_resets_plug, stat->s_resets_timeout, | ||
957 | stat->s_giveup, stat->s_stimeout, | ||
958 | stat->s_busy, stat->s_throttles); | ||
959 | /* destination side statistics */ | ||
960 | seq_printf(file, | ||
961 | "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", | ||
549 | uv_read_global_mmr64(uv_cpu_to_pnode(cpu), | 962 | uv_read_global_mmr64(uv_cpu_to_pnode(cpu), |
550 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), | 963 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), |
551 | stat->sflush, stat->dflush, | 964 | stat->d_requestee, cycles_2_us(stat->d_time), |
552 | stat->retriesok, stat->nomsg, | 965 | stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, |
553 | stat->multmsg, stat->ntargeted); | 966 | stat->d_nomsg, stat->d_retries, stat->d_canceled, |
967 | stat->d_nocanceled, stat->d_resets, | ||
968 | stat->d_rcanceled); | ||
554 | } | 969 | } |
555 | 970 | ||
556 | return 0; | 971 | return 0; |
557 | } | 972 | } |
558 | 973 | ||
559 | /* | 974 | /* |
975 | * -1: resetf the statistics | ||
560 | * 0: display meaning of the statistics | 976 | * 0: display meaning of the statistics |
561 | * >0: retry limit | 977 | * >0: maximum concurrent active descriptors per uvhub (throttle) |
562 | */ | 978 | */ |
563 | static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, | 979 | static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, |
564 | size_t count, loff_t *data) | 980 | size_t count, loff_t *data) |
565 | { | 981 | { |
566 | long newmode; | 982 | int cpu; |
983 | long input_arg; | ||
567 | char optstr[64]; | 984 | char optstr[64]; |
985 | struct ptc_stats *stat; | ||
986 | struct bau_control *bcp; | ||
568 | 987 | ||
569 | if (count == 0 || count > sizeof(optstr)) | 988 | if (count == 0 || count > sizeof(optstr)) |
570 | return -EINVAL; | 989 | return -EINVAL; |
571 | if (copy_from_user(optstr, user, count)) | 990 | if (copy_from_user(optstr, user, count)) |
572 | return -EFAULT; | 991 | return -EFAULT; |
573 | optstr[count - 1] = '\0'; | 992 | optstr[count - 1] = '\0'; |
574 | if (strict_strtoul(optstr, 10, &newmode) < 0) { | 993 | if (strict_strtol(optstr, 10, &input_arg) < 0) { |
575 | printk(KERN_DEBUG "%s is invalid\n", optstr); | 994 | printk(KERN_DEBUG "%s is invalid\n", optstr); |
576 | return -EINVAL; | 995 | return -EINVAL; |
577 | } | 996 | } |
578 | 997 | ||
579 | if (newmode == 0) { | 998 | if (input_arg == 0) { |
580 | printk(KERN_DEBUG "# cpu: cpu number\n"); | 999 | printk(KERN_DEBUG "# cpu: cpu number\n"); |
1000 | printk(KERN_DEBUG "Sender statistics:\n"); | ||
1001 | printk(KERN_DEBUG | ||
1002 | "sent: number of shootdown messages sent\n"); | ||
1003 | printk(KERN_DEBUG | ||
1004 | "stime: time spent sending messages\n"); | ||
1005 | printk(KERN_DEBUG | ||
1006 | "numuvhubs: number of hubs targeted with shootdown\n"); | ||
1007 | printk(KERN_DEBUG | ||
1008 | "numuvhubs16: number times 16 or more hubs targeted\n"); | ||
1009 | printk(KERN_DEBUG | ||
1010 | "numuvhubs8: number times 8 or more hubs targeted\n"); | ||
1011 | printk(KERN_DEBUG | ||
1012 | "numuvhubs4: number times 4 or more hubs targeted\n"); | ||
1013 | printk(KERN_DEBUG | ||
1014 | "numuvhubs2: number times 2 or more hubs targeted\n"); | ||
1015 | printk(KERN_DEBUG | ||
1016 | "numuvhubs1: number times 1 hub targeted\n"); | ||
1017 | printk(KERN_DEBUG | ||
1018 | "numcpus: number of cpus targeted with shootdown\n"); | ||
1019 | printk(KERN_DEBUG | ||
1020 | "dto: number of destination timeouts\n"); | ||
1021 | printk(KERN_DEBUG | ||
1022 | "retries: destination timeout retries sent\n"); | ||
1023 | printk(KERN_DEBUG | ||
1024 | "rok: : destination timeouts successfully retried\n"); | ||
1025 | printk(KERN_DEBUG | ||
1026 | "resetp: ipi-style resource resets for plugs\n"); | ||
1027 | printk(KERN_DEBUG | ||
1028 | "resett: ipi-style resource resets for timeouts\n"); | ||
1029 | printk(KERN_DEBUG | ||
1030 | "giveup: fall-backs to ipi-style shootdowns\n"); | ||
1031 | printk(KERN_DEBUG | ||
1032 | "sto: number of source timeouts\n"); | ||
1033 | printk(KERN_DEBUG | ||
1034 | "bz: number of stay-busy's\n"); | ||
1035 | printk(KERN_DEBUG | ||
1036 | "throt: number times spun in throttle\n"); | ||
1037 | printk(KERN_DEBUG "Destination side statistics:\n"); | ||
581 | printk(KERN_DEBUG | 1038 | printk(KERN_DEBUG |
582 | "requestor: times this cpu was the flush requestor\n"); | 1039 | "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); |
583 | printk(KERN_DEBUG | 1040 | printk(KERN_DEBUG |
584 | "requestee: times this cpu was requested to flush its TLBs\n"); | 1041 | "recv: shootdown messages received\n"); |
585 | printk(KERN_DEBUG | 1042 | printk(KERN_DEBUG |
586 | "one: times requested to flush a single address\n"); | 1043 | "rtime: time spent processing messages\n"); |
587 | printk(KERN_DEBUG | 1044 | printk(KERN_DEBUG |
588 | "all: times requested to flush all TLB's\n"); | 1045 | "all: shootdown all-tlb messages\n"); |
589 | printk(KERN_DEBUG | 1046 | printk(KERN_DEBUG |
590 | "sretry: number of retries of source-side timeouts\n"); | 1047 | "one: shootdown one-tlb messages\n"); |
591 | printk(KERN_DEBUG | 1048 | printk(KERN_DEBUG |
592 | "dretry: number of retries of destination-side timeouts\n"); | 1049 | "mult: interrupts that found multiple messages\n"); |
593 | printk(KERN_DEBUG | 1050 | printk(KERN_DEBUG |
594 | "ptc_i: times UV fell through to IPI-style flushes\n"); | 1051 | "none: interrupts that found no messages\n"); |
595 | printk(KERN_DEBUG | 1052 | printk(KERN_DEBUG |
596 | "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); | 1053 | "retry: number of retry messages processed\n"); |
597 | printk(KERN_DEBUG | 1054 | printk(KERN_DEBUG |
598 | "sflush_us: cycles spent in uv_flush_tlb_others()\n"); | 1055 | "canc: number messages canceled by retries\n"); |
599 | printk(KERN_DEBUG | 1056 | printk(KERN_DEBUG |
600 | "dflush_us: cycles spent in handling flush requests\n"); | 1057 | "nocan: number retries that found nothing to cancel\n"); |
601 | printk(KERN_DEBUG "sok: successes on retry\n"); | ||
602 | printk(KERN_DEBUG "dnomsg: interrupts with no message\n"); | ||
603 | printk(KERN_DEBUG | 1058 | printk(KERN_DEBUG |
604 | "dmult: interrupts with multiple messages\n"); | 1059 | "reset: number of ipi-style reset requests processed\n"); |
605 | printk(KERN_DEBUG "starget: nodes targeted\n"); | 1060 | printk(KERN_DEBUG |
1061 | "rcan: number messages canceled by reset requests\n"); | ||
1062 | } else if (input_arg == -1) { | ||
1063 | for_each_present_cpu(cpu) { | ||
1064 | stat = &per_cpu(ptcstats, cpu); | ||
1065 | memset(stat, 0, sizeof(struct ptc_stats)); | ||
1066 | } | ||
606 | } else { | 1067 | } else { |
607 | uv_bau_retry_limit = newmode; | 1068 | uv_bau_max_concurrent = input_arg; |
608 | printk(KERN_DEBUG "timeout retry limit:%d\n", | 1069 | bcp = &per_cpu(bau_control, smp_processor_id()); |
609 | uv_bau_retry_limit); | 1070 | if (uv_bau_max_concurrent < 1 || |
1071 | uv_bau_max_concurrent > bcp->cpus_in_uvhub) { | ||
1072 | printk(KERN_DEBUG | ||
1073 | "Error: BAU max concurrent %d; %d is invalid\n", | ||
1074 | bcp->max_concurrent, uv_bau_max_concurrent); | ||
1075 | return -EINVAL; | ||
1076 | } | ||
1077 | printk(KERN_DEBUG "Set BAU max concurrent:%d\n", | ||
1078 | uv_bau_max_concurrent); | ||
1079 | for_each_present_cpu(cpu) { | ||
1080 | bcp = &per_cpu(bau_control, cpu); | ||
1081 | bcp->max_concurrent = uv_bau_max_concurrent; | ||
1082 | } | ||
610 | } | 1083 | } |
611 | 1084 | ||
612 | return count; | 1085 | return count; |
@@ -650,79 +1123,30 @@ static int __init uv_ptc_init(void) | |||
650 | } | 1123 | } |
651 | 1124 | ||
652 | /* | 1125 | /* |
653 | * begin the initialization of the per-blade control structures | ||
654 | */ | ||
655 | static struct bau_control * __init uv_table_bases_init(int blade, int node) | ||
656 | { | ||
657 | int i; | ||
658 | struct bau_msg_status *msp; | ||
659 | struct bau_control *bau_tabp; | ||
660 | |||
661 | bau_tabp = | ||
662 | kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node); | ||
663 | BUG_ON(!bau_tabp); | ||
664 | |||
665 | bau_tabp->msg_statuses = | ||
666 | kmalloc_node(sizeof(struct bau_msg_status) * | ||
667 | DEST_Q_SIZE, GFP_KERNEL, node); | ||
668 | BUG_ON(!bau_tabp->msg_statuses); | ||
669 | |||
670 | for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++) | ||
671 | bau_cpubits_clear(&msp->seen_by, (int) | ||
672 | uv_blade_nr_possible_cpus(blade)); | ||
673 | |||
674 | uv_bau_table_bases[blade] = bau_tabp; | ||
675 | |||
676 | return bau_tabp; | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * finish the initialization of the per-blade control structures | ||
681 | */ | ||
682 | static void __init | ||
683 | uv_table_bases_finish(int blade, | ||
684 | struct bau_control *bau_tablesp, | ||
685 | struct bau_desc *adp) | ||
686 | { | ||
687 | struct bau_control *bcp; | ||
688 | int cpu; | ||
689 | |||
690 | for_each_present_cpu(cpu) { | ||
691 | if (blade != uv_cpu_to_blade_id(cpu)) | ||
692 | continue; | ||
693 | |||
694 | bcp = (struct bau_control *)&per_cpu(bau_control, cpu); | ||
695 | bcp->bau_msg_head = bau_tablesp->va_queue_first; | ||
696 | bcp->va_queue_first = bau_tablesp->va_queue_first; | ||
697 | bcp->va_queue_last = bau_tablesp->va_queue_last; | ||
698 | bcp->msg_statuses = bau_tablesp->msg_statuses; | ||
699 | bcp->descriptor_base = adp; | ||
700 | } | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * initialize the sending side's sending buffers | 1126 | * initialize the sending side's sending buffers |
705 | */ | 1127 | */ |
706 | static struct bau_desc * __init | 1128 | static void |
707 | uv_activation_descriptor_init(int node, int pnode) | 1129 | uv_activation_descriptor_init(int node, int pnode) |
708 | { | 1130 | { |
709 | int i; | 1131 | int i; |
1132 | int cpu; | ||
710 | unsigned long pa; | 1133 | unsigned long pa; |
711 | unsigned long m; | 1134 | unsigned long m; |
712 | unsigned long n; | 1135 | unsigned long n; |
713 | struct bau_desc *adp; | 1136 | struct bau_desc *bau_desc; |
714 | struct bau_desc *ad2; | 1137 | struct bau_desc *bd2; |
1138 | struct bau_control *bcp; | ||
715 | 1139 | ||
716 | /* | 1140 | /* |
717 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) | 1141 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) |
718 | * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade | 1142 | * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub |
719 | */ | 1143 | */ |
720 | adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* | 1144 | bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* |
721 | UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); | 1145 | UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); |
722 | BUG_ON(!adp); | 1146 | BUG_ON(!bau_desc); |
723 | 1147 | ||
724 | pa = uv_gpa(adp); /* need the real nasid*/ | 1148 | pa = uv_gpa(bau_desc); /* need the real nasid*/ |
725 | n = uv_gpa_to_pnode(pa); | 1149 | n = pa >> uv_nshift; |
726 | m = pa & uv_mmask; | 1150 | m = pa & uv_mmask; |
727 | 1151 | ||
728 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, | 1152 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, |
@@ -731,96 +1155,188 @@ uv_activation_descriptor_init(int node, int pnode) | |||
731 | /* | 1155 | /* |
732 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each | 1156 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each |
733 | * cpu even though we only use the first one; one descriptor can | 1157 | * cpu even though we only use the first one; one descriptor can |
734 | * describe a broadcast to 256 nodes. | 1158 | * describe a broadcast to 256 uv hubs. |
735 | */ | 1159 | */ |
736 | for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); | 1160 | for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); |
737 | i++, ad2++) { | 1161 | i++, bd2++) { |
738 | memset(ad2, 0, sizeof(struct bau_desc)); | 1162 | memset(bd2, 0, sizeof(struct bau_desc)); |
739 | ad2->header.sw_ack_flag = 1; | 1163 | bd2->header.sw_ack_flag = 1; |
740 | /* | 1164 | /* |
741 | * base_dest_nodeid is the first node in the partition, so | 1165 | * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub |
742 | * the bit map will indicate partition-relative node numbers. | 1166 | * in the partition. The bit map will indicate uvhub numbers, |
743 | * note that base_dest_nodeid is actually a nasid. | 1167 | * which are 0-N in a partition. Pnodes are unique system-wide. |
744 | */ | 1168 | */ |
745 | ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; | 1169 | bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; |
746 | ad2->header.dest_subnodeid = 0x10; /* the LB */ | 1170 | bd2->header.dest_subnodeid = 0x10; /* the LB */ |
747 | ad2->header.command = UV_NET_ENDPOINT_INTD; | 1171 | bd2->header.command = UV_NET_ENDPOINT_INTD; |
748 | ad2->header.int_both = 1; | 1172 | bd2->header.int_both = 1; |
749 | /* | 1173 | /* |
750 | * all others need to be set to zero: | 1174 | * all others need to be set to zero: |
751 | * fairness chaining multilevel count replied_to | 1175 | * fairness chaining multilevel count replied_to |
752 | */ | 1176 | */ |
753 | } | 1177 | } |
754 | return adp; | 1178 | for_each_present_cpu(cpu) { |
1179 | if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) | ||
1180 | continue; | ||
1181 | bcp = &per_cpu(bau_control, cpu); | ||
1182 | bcp->descriptor_base = bau_desc; | ||
1183 | } | ||
755 | } | 1184 | } |
756 | 1185 | ||
757 | /* | 1186 | /* |
758 | * initialize the destination side's receiving buffers | 1187 | * initialize the destination side's receiving buffers |
1188 | * entered for each uvhub in the partition | ||
1189 | * - node is first node (kernel memory notion) on the uvhub | ||
1190 | * - pnode is the uvhub's physical identifier | ||
759 | */ | 1191 | */ |
760 | static struct bau_payload_queue_entry * __init | 1192 | static void |
761 | uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | 1193 | uv_payload_queue_init(int node, int pnode) |
762 | { | 1194 | { |
763 | struct bau_payload_queue_entry *pqp; | ||
764 | unsigned long pa; | ||
765 | int pn; | 1195 | int pn; |
1196 | int cpu; | ||
766 | char *cp; | 1197 | char *cp; |
1198 | unsigned long pa; | ||
1199 | struct bau_payload_queue_entry *pqp; | ||
1200 | struct bau_payload_queue_entry *pqp_malloc; | ||
1201 | struct bau_control *bcp; | ||
767 | 1202 | ||
768 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( | 1203 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( |
769 | (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), | 1204 | (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), |
770 | GFP_KERNEL, node); | 1205 | GFP_KERNEL, node); |
771 | BUG_ON(!pqp); | 1206 | BUG_ON(!pqp); |
1207 | pqp_malloc = pqp; | ||
772 | 1208 | ||
773 | cp = (char *)pqp + 31; | 1209 | cp = (char *)pqp + 31; |
774 | pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); | 1210 | pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); |
775 | bau_tablesp->va_queue_first = pqp; | 1211 | |
1212 | for_each_present_cpu(cpu) { | ||
1213 | if (pnode != uv_cpu_to_pnode(cpu)) | ||
1214 | continue; | ||
1215 | /* for every cpu on this pnode: */ | ||
1216 | bcp = &per_cpu(bau_control, cpu); | ||
1217 | bcp->va_queue_first = pqp; | ||
1218 | bcp->bau_msg_head = pqp; | ||
1219 | bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1); | ||
1220 | } | ||
776 | /* | 1221 | /* |
777 | * need the pnode of where the memory was really allocated | 1222 | * need the pnode of where the memory was really allocated |
778 | */ | 1223 | */ |
779 | pa = uv_gpa(pqp); | 1224 | pa = uv_gpa(pqp); |
780 | pn = uv_gpa_to_pnode(pa); | 1225 | pn = pa >> uv_nshift; |
781 | uv_write_global_mmr64(pnode, | 1226 | uv_write_global_mmr64(pnode, |
782 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, | 1227 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, |
783 | ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | | 1228 | ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | |
784 | uv_physnodeaddr(pqp)); | 1229 | uv_physnodeaddr(pqp)); |
785 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, | 1230 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, |
786 | uv_physnodeaddr(pqp)); | 1231 | uv_physnodeaddr(pqp)); |
787 | bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1); | ||
788 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, | 1232 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, |
789 | (unsigned long) | 1233 | (unsigned long) |
790 | uv_physnodeaddr(bau_tablesp->va_queue_last)); | 1234 | uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1))); |
1235 | /* in effect, all msg_type's are set to MSG_NOOP */ | ||
791 | memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); | 1236 | memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); |
792 | |||
793 | return pqp; | ||
794 | } | 1237 | } |
795 | 1238 | ||
796 | /* | 1239 | /* |
797 | * Initialization of each UV blade's structures | 1240 | * Initialization of each UV hub's structures |
798 | */ | 1241 | */ |
799 | static int __init uv_init_blade(int blade) | 1242 | static void __init uv_init_uvhub(int uvhub, int vector) |
800 | { | 1243 | { |
801 | int node; | 1244 | int node; |
802 | int pnode; | 1245 | int pnode; |
803 | unsigned long pa; | ||
804 | unsigned long apicid; | 1246 | unsigned long apicid; |
805 | struct bau_desc *adp; | 1247 | |
806 | struct bau_payload_queue_entry *pqp; | 1248 | node = uvhub_to_first_node(uvhub); |
807 | struct bau_control *bau_tablesp; | 1249 | pnode = uv_blade_to_pnode(uvhub); |
808 | 1250 | uv_activation_descriptor_init(node, pnode); | |
809 | node = blade_to_first_node(blade); | 1251 | uv_payload_queue_init(node, pnode); |
810 | bau_tablesp = uv_table_bases_init(blade, node); | ||
811 | pnode = uv_blade_to_pnode(blade); | ||
812 | adp = uv_activation_descriptor_init(node, pnode); | ||
813 | pqp = uv_payload_queue_init(node, pnode, bau_tablesp); | ||
814 | uv_table_bases_finish(blade, bau_tablesp, adp); | ||
815 | /* | 1252 | /* |
816 | * the below initialization can't be in firmware because the | 1253 | * the below initialization can't be in firmware because the |
817 | * messaging IRQ will be determined by the OS | 1254 | * messaging IRQ will be determined by the OS |
818 | */ | 1255 | */ |
819 | apicid = blade_to_first_apicid(blade); | 1256 | apicid = uvhub_to_first_apicid(uvhub); |
820 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); | ||
821 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 1257 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
822 | ((apicid << 32) | UV_BAU_MESSAGE)); | 1258 | ((apicid << 32) | vector)); |
823 | return 0; | 1259 | } |
1260 | |||
1261 | /* | ||
1262 | * initialize the bau_control structure for each cpu | ||
1263 | */ | ||
1264 | static void uv_init_per_cpu(int nuvhubs) | ||
1265 | { | ||
1266 | int i, j, k; | ||
1267 | int cpu; | ||
1268 | int pnode; | ||
1269 | int uvhub; | ||
1270 | short socket = 0; | ||
1271 | struct bau_control *bcp; | ||
1272 | struct uvhub_desc *bdp; | ||
1273 | struct socket_desc *sdp; | ||
1274 | struct bau_control *hmaster = NULL; | ||
1275 | struct bau_control *smaster = NULL; | ||
1276 | struct socket_desc { | ||
1277 | short num_cpus; | ||
1278 | short cpu_number[16]; | ||
1279 | }; | ||
1280 | struct uvhub_desc { | ||
1281 | short num_sockets; | ||
1282 | short num_cpus; | ||
1283 | short uvhub; | ||
1284 | short pnode; | ||
1285 | struct socket_desc socket[2]; | ||
1286 | }; | ||
1287 | struct uvhub_desc *uvhub_descs; | ||
1288 | |||
1289 | uvhub_descs = (struct uvhub_desc *) | ||
1290 | kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); | ||
1291 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); | ||
1292 | for_each_present_cpu(cpu) { | ||
1293 | bcp = &per_cpu(bau_control, cpu); | ||
1294 | memset(bcp, 0, sizeof(struct bau_control)); | ||
1295 | spin_lock_init(&bcp->masks_lock); | ||
1296 | bcp->max_concurrent = uv_bau_max_concurrent; | ||
1297 | pnode = uv_cpu_hub_info(cpu)->pnode; | ||
1298 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; | ||
1299 | bdp = &uvhub_descs[uvhub]; | ||
1300 | bdp->num_cpus++; | ||
1301 | bdp->uvhub = uvhub; | ||
1302 | bdp->pnode = pnode; | ||
1303 | /* time interval to catch a hardware stay-busy bug */ | ||
1304 | bcp->timeout_interval = millisec_2_cycles(3); | ||
1305 | /* kludge: assume uv_hub.h is constant */ | ||
1306 | socket = (cpu_physical_id(cpu)>>5)&1; | ||
1307 | if (socket >= bdp->num_sockets) | ||
1308 | bdp->num_sockets = socket+1; | ||
1309 | sdp = &bdp->socket[socket]; | ||
1310 | sdp->cpu_number[sdp->num_cpus] = cpu; | ||
1311 | sdp->num_cpus++; | ||
1312 | } | ||
1313 | socket = 0; | ||
1314 | for_each_possible_blade(uvhub) { | ||
1315 | bdp = &uvhub_descs[uvhub]; | ||
1316 | for (i = 0; i < bdp->num_sockets; i++) { | ||
1317 | sdp = &bdp->socket[i]; | ||
1318 | for (j = 0; j < sdp->num_cpus; j++) { | ||
1319 | cpu = sdp->cpu_number[j]; | ||
1320 | bcp = &per_cpu(bau_control, cpu); | ||
1321 | bcp->cpu = cpu; | ||
1322 | if (j == 0) { | ||
1323 | smaster = bcp; | ||
1324 | if (i == 0) | ||
1325 | hmaster = bcp; | ||
1326 | } | ||
1327 | bcp->cpus_in_uvhub = bdp->num_cpus; | ||
1328 | bcp->cpus_in_socket = sdp->num_cpus; | ||
1329 | bcp->socket_master = smaster; | ||
1330 | bcp->uvhub_master = hmaster; | ||
1331 | for (k = 0; k < DEST_Q_SIZE; k++) | ||
1332 | bcp->socket_acknowledge_count[k] = 0; | ||
1333 | bcp->uvhub_cpu = | ||
1334 | uv_cpu_hub_info(cpu)->blade_processor_id; | ||
1335 | } | ||
1336 | socket++; | ||
1337 | } | ||
1338 | } | ||
1339 | kfree(uvhub_descs); | ||
824 | } | 1340 | } |
825 | 1341 | ||
826 | /* | 1342 | /* |
@@ -828,38 +1344,54 @@ static int __init uv_init_blade(int blade) | |||
828 | */ | 1344 | */ |
829 | static int __init uv_bau_init(void) | 1345 | static int __init uv_bau_init(void) |
830 | { | 1346 | { |
831 | int blade; | 1347 | int uvhub; |
832 | int nblades; | 1348 | int pnode; |
1349 | int nuvhubs; | ||
833 | int cur_cpu; | 1350 | int cur_cpu; |
1351 | int vector; | ||
1352 | unsigned long mmr; | ||
834 | 1353 | ||
835 | if (!is_uv_system()) | 1354 | if (!is_uv_system()) |
836 | return 0; | 1355 | return 0; |
837 | 1356 | ||
1357 | if (nobau) | ||
1358 | return 0; | ||
1359 | |||
838 | for_each_possible_cpu(cur_cpu) | 1360 | for_each_possible_cpu(cur_cpu) |
839 | zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), | 1361 | zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), |
840 | GFP_KERNEL, cpu_to_node(cur_cpu)); | 1362 | GFP_KERNEL, cpu_to_node(cur_cpu)); |
841 | 1363 | ||
842 | uv_bau_retry_limit = 1; | 1364 | uv_bau_max_concurrent = MAX_BAU_CONCURRENT; |
1365 | uv_nshift = uv_hub_info->m_val; | ||
843 | uv_mmask = (1UL << uv_hub_info->m_val) - 1; | 1366 | uv_mmask = (1UL << uv_hub_info->m_val) - 1; |
844 | nblades = uv_num_possible_blades(); | 1367 | nuvhubs = uv_num_possible_blades(); |
845 | 1368 | ||
846 | uv_bau_table_bases = (struct bau_control **) | 1369 | uv_init_per_cpu(nuvhubs); |
847 | kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); | ||
848 | BUG_ON(!uv_bau_table_bases); | ||
849 | 1370 | ||
850 | uv_partition_base_pnode = 0x7fffffff; | 1371 | uv_partition_base_pnode = 0x7fffffff; |
851 | for (blade = 0; blade < nblades; blade++) | 1372 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) |
852 | if (uv_blade_nr_possible_cpus(blade) && | 1373 | if (uv_blade_nr_possible_cpus(uvhub) && |
853 | (uv_blade_to_pnode(blade) < uv_partition_base_pnode)) | 1374 | (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) |
854 | uv_partition_base_pnode = uv_blade_to_pnode(blade); | 1375 | uv_partition_base_pnode = uv_blade_to_pnode(uvhub); |
855 | for (blade = 0; blade < nblades; blade++) | 1376 | |
856 | if (uv_blade_nr_possible_cpus(blade)) | 1377 | vector = UV_BAU_MESSAGE; |
857 | uv_init_blade(blade); | 1378 | for_each_possible_blade(uvhub) |
858 | 1379 | if (uv_blade_nr_possible_cpus(uvhub)) | |
859 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); | 1380 | uv_init_uvhub(uvhub, vector); |
1381 | |||
860 | uv_enable_timeouts(); | 1382 | uv_enable_timeouts(); |
1383 | alloc_intr_gate(vector, uv_bau_message_intr1); | ||
1384 | |||
1385 | for_each_possible_blade(uvhub) { | ||
1386 | pnode = uv_blade_to_pnode(uvhub); | ||
1387 | /* INIT the bau */ | ||
1388 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, | ||
1389 | ((unsigned long)1 << 63)); | ||
1390 | mmr = 1; /* should be 1 to broadcast to both sockets */ | ||
1391 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr); | ||
1392 | } | ||
861 | 1393 | ||
862 | return 0; | 1394 | return 0; |
863 | } | 1395 | } |
864 | __initcall(uv_bau_init); | 1396 | core_initcall(uv_bau_init); |
865 | __initcall(uv_ptc_init); | 1397 | core_initcall(uv_ptc_init); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 36f1bd9f8e76..02cfb9b8f5b1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -108,15 +108,6 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
108 | dec_preempt_count(); | 108 | dec_preempt_count(); |
109 | } | 109 | } |
110 | 110 | ||
111 | #ifdef CONFIG_X86_32 | ||
112 | static inline void | ||
113 | die_if_kernel(const char *str, struct pt_regs *regs, long err) | ||
114 | { | ||
115 | if (!user_mode_vm(regs)) | ||
116 | die(str, regs, err); | ||
117 | } | ||
118 | #endif | ||
119 | |||
120 | static void __kprobes | 111 | static void __kprobes |
121 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, | 112 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
122 | long error_code, siginfo_t *info) | 113 | long error_code, siginfo_t *info) |
@@ -585,55 +576,67 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
585 | return; | 576 | return; |
586 | } | 577 | } |
587 | 578 | ||
588 | #ifdef CONFIG_X86_64 | ||
589 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | ||
590 | { | ||
591 | if (fixup_exception(regs)) | ||
592 | return 1; | ||
593 | |||
594 | notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE); | ||
595 | /* Illegal floating point operation in the kernel */ | ||
596 | current->thread.trap_no = trapnr; | ||
597 | die(str, regs, 0); | ||
598 | return 0; | ||
599 | } | ||
600 | #endif | ||
601 | |||
602 | /* | 579 | /* |
603 | * Note that we play around with the 'TS' bit in an attempt to get | 580 | * Note that we play around with the 'TS' bit in an attempt to get |
604 | * the correct behaviour even in the presence of the asynchronous | 581 | * the correct behaviour even in the presence of the asynchronous |
605 | * IRQ13 behaviour | 582 | * IRQ13 behaviour |
606 | */ | 583 | */ |
607 | void math_error(void __user *ip) | 584 | void math_error(struct pt_regs *regs, int error_code, int trapnr) |
608 | { | 585 | { |
609 | struct task_struct *task; | 586 | struct task_struct *task = current; |
610 | siginfo_t info; | 587 | siginfo_t info; |
611 | unsigned short cwd, swd, err; | 588 | unsigned short err; |
589 | char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; | ||
590 | |||
591 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) | ||
592 | return; | ||
593 | conditional_sti(regs); | ||
594 | |||
595 | if (!user_mode_vm(regs)) | ||
596 | { | ||
597 | if (!fixup_exception(regs)) { | ||
598 | task->thread.error_code = error_code; | ||
599 | task->thread.trap_no = trapnr; | ||
600 | die(str, regs, error_code); | ||
601 | } | ||
602 | return; | ||
603 | } | ||
612 | 604 | ||
613 | /* | 605 | /* |
614 | * Save the info for the exception handler and clear the error. | 606 | * Save the info for the exception handler and clear the error. |
615 | */ | 607 | */ |
616 | task = current; | ||
617 | save_init_fpu(task); | 608 | save_init_fpu(task); |
618 | task->thread.trap_no = 16; | 609 | task->thread.trap_no = trapnr; |
619 | task->thread.error_code = 0; | 610 | task->thread.error_code = error_code; |
620 | info.si_signo = SIGFPE; | 611 | info.si_signo = SIGFPE; |
621 | info.si_errno = 0; | 612 | info.si_errno = 0; |
622 | info.si_addr = ip; | 613 | info.si_addr = (void __user *)regs->ip; |
623 | /* | 614 | if (trapnr == 16) { |
624 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | 615 | unsigned short cwd, swd; |
625 | * status. 0x3f is the exception bits in these regs, 0x200 is the | 616 | /* |
626 | * C1 reg you need in case of a stack fault, 0x040 is the stack | 617 | * (~cwd & swd) will mask out exceptions that are not set to unmasked |
627 | * fault bit. We should only be taking one exception at a time, | 618 | * status. 0x3f is the exception bits in these regs, 0x200 is the |
628 | * so if this combination doesn't produce any single exception, | 619 | * C1 reg you need in case of a stack fault, 0x040 is the stack |
629 | * then we have a bad program that isn't synchronizing its FPU usage | 620 | * fault bit. We should only be taking one exception at a time, |
630 | * and it will suffer the consequences since we won't be able to | 621 | * so if this combination doesn't produce any single exception, |
631 | * fully reproduce the context of the exception | 622 | * then we have a bad program that isn't synchronizing its FPU usage |
632 | */ | 623 | * and it will suffer the consequences since we won't be able to |
633 | cwd = get_fpu_cwd(task); | 624 | * fully reproduce the context of the exception |
634 | swd = get_fpu_swd(task); | 625 | */ |
626 | cwd = get_fpu_cwd(task); | ||
627 | swd = get_fpu_swd(task); | ||
635 | 628 | ||
636 | err = swd & ~cwd; | 629 | err = swd & ~cwd; |
630 | } else { | ||
631 | /* | ||
632 | * The SIMD FPU exceptions are handled a little differently, as there | ||
633 | * is only a single status/control register. Thus, to determine which | ||
634 | * unmasked exception was caught we must mask the exception mask bits | ||
635 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | ||
636 | */ | ||
637 | unsigned short mxcsr = get_fpu_mxcsr(task); | ||
638 | err = ~(mxcsr >> 7) & mxcsr; | ||
639 | } | ||
637 | 640 | ||
638 | if (err & 0x001) { /* Invalid op */ | 641 | if (err & 0x001) { /* Invalid op */ |
639 | /* | 642 | /* |
@@ -662,97 +665,17 @@ void math_error(void __user *ip) | |||
662 | 665 | ||
663 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) | 666 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) |
664 | { | 667 | { |
665 | conditional_sti(regs); | ||
666 | |||
667 | #ifdef CONFIG_X86_32 | 668 | #ifdef CONFIG_X86_32 |
668 | ignore_fpu_irq = 1; | 669 | ignore_fpu_irq = 1; |
669 | #else | ||
670 | if (!user_mode(regs) && | ||
671 | kernel_math_error(regs, "kernel x87 math error", 16)) | ||
672 | return; | ||
673 | #endif | 670 | #endif |
674 | 671 | ||
675 | math_error((void __user *)regs->ip); | 672 | math_error(regs, error_code, 16); |
676 | } | ||
677 | |||
678 | static void simd_math_error(void __user *ip) | ||
679 | { | ||
680 | struct task_struct *task; | ||
681 | siginfo_t info; | ||
682 | unsigned short mxcsr; | ||
683 | |||
684 | /* | ||
685 | * Save the info for the exception handler and clear the error. | ||
686 | */ | ||
687 | task = current; | ||
688 | save_init_fpu(task); | ||
689 | task->thread.trap_no = 19; | ||
690 | task->thread.error_code = 0; | ||
691 | info.si_signo = SIGFPE; | ||
692 | info.si_errno = 0; | ||
693 | info.si_code = __SI_FAULT; | ||
694 | info.si_addr = ip; | ||
695 | /* | ||
696 | * The SIMD FPU exceptions are handled a little differently, as there | ||
697 | * is only a single status/control register. Thus, to determine which | ||
698 | * unmasked exception was caught we must mask the exception mask bits | ||
699 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | ||
700 | */ | ||
701 | mxcsr = get_fpu_mxcsr(task); | ||
702 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | ||
703 | case 0x000: | ||
704 | default: | ||
705 | break; | ||
706 | case 0x001: /* Invalid Op */ | ||
707 | info.si_code = FPE_FLTINV; | ||
708 | break; | ||
709 | case 0x002: /* Denormalize */ | ||
710 | case 0x010: /* Underflow */ | ||
711 | info.si_code = FPE_FLTUND; | ||
712 | break; | ||
713 | case 0x004: /* Zero Divide */ | ||
714 | info.si_code = FPE_FLTDIV; | ||
715 | break; | ||
716 | case 0x008: /* Overflow */ | ||
717 | info.si_code = FPE_FLTOVF; | ||
718 | break; | ||
719 | case 0x020: /* Precision */ | ||
720 | info.si_code = FPE_FLTRES; | ||
721 | break; | ||
722 | } | ||
723 | force_sig_info(SIGFPE, &info, task); | ||
724 | } | 673 | } |
725 | 674 | ||
726 | dotraplinkage void | 675 | dotraplinkage void |
727 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) | 676 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) |
728 | { | 677 | { |
729 | conditional_sti(regs); | 678 | math_error(regs, error_code, 19); |
730 | |||
731 | #ifdef CONFIG_X86_32 | ||
732 | if (cpu_has_xmm) { | ||
733 | /* Handle SIMD FPU exceptions on PIII+ processors. */ | ||
734 | ignore_fpu_irq = 1; | ||
735 | simd_math_error((void __user *)regs->ip); | ||
736 | return; | ||
737 | } | ||
738 | /* | ||
739 | * Handle strange cache flush from user space exception | ||
740 | * in all other cases. This is undocumented behaviour. | ||
741 | */ | ||
742 | if (regs->flags & X86_VM_MASK) { | ||
743 | handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); | ||
744 | return; | ||
745 | } | ||
746 | current->thread.trap_no = 19; | ||
747 | current->thread.error_code = error_code; | ||
748 | die_if_kernel("cache flush denied", regs, error_code); | ||
749 | force_sig(SIGSEGV, current); | ||
750 | #else | ||
751 | if (!user_mode(regs) && | ||
752 | kernel_math_error(regs, "kernel simd math error", 19)) | ||
753 | return; | ||
754 | simd_math_error((void __user *)regs->ip); | ||
755 | #endif | ||
756 | } | 679 | } |
757 | 680 | ||
758 | dotraplinkage void | 681 | dotraplinkage void |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 1d40336b030a..1132129db792 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c | |||
@@ -44,7 +44,7 @@ static void uv_ack_apic(unsigned int irq) | |||
44 | ack_APIC_irq(); | 44 | ack_APIC_irq(); |
45 | } | 45 | } |
46 | 46 | ||
47 | struct irq_chip uv_irq_chip = { | 47 | static struct irq_chip uv_irq_chip = { |
48 | .name = "UV-CORE", | 48 | .name = "UV-CORE", |
49 | .startup = uv_noop_ret, | 49 | .startup = uv_noop_ret, |
50 | .shutdown = uv_noop, | 50 | .shutdown = uv_noop, |
@@ -141,7 +141,7 @@ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) | |||
141 | */ | 141 | */ |
142 | static int | 142 | static int |
143 | arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | 143 | arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, |
144 | unsigned long mmr_offset, int restrict) | 144 | unsigned long mmr_offset, int limit) |
145 | { | 145 | { |
146 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | 146 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
147 | struct irq_desc *desc = irq_to_desc(irq); | 147 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -160,7 +160,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
160 | if (err != 0) | 160 | if (err != 0) |
161 | return err; | 161 | return err; |
162 | 162 | ||
163 | if (restrict == UV_AFFINITY_CPU) | 163 | if (limit == UV_AFFINITY_CPU) |
164 | desc->status |= IRQ_NO_BALANCING; | 164 | desc->status |= IRQ_NO_BALANCING; |
165 | else | 165 | else |
166 | desc->status |= IRQ_MOVE_PCNTXT; | 166 | desc->status |= IRQ_MOVE_PCNTXT; |
@@ -214,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
214 | unsigned long mmr_value; | 214 | unsigned long mmr_value; |
215 | struct uv_IO_APIC_route_entry *entry; | 215 | struct uv_IO_APIC_route_entry *entry; |
216 | unsigned long mmr_offset; | 216 | unsigned long mmr_offset; |
217 | unsigned mmr_pnode; | 217 | int mmr_pnode; |
218 | 218 | ||
219 | if (set_desc_affinity(desc, mask, &dest)) | 219 | if (set_desc_affinity(desc, mask, &dest)) |
220 | return -1; | 220 | return -1; |
@@ -248,7 +248,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
248 | * interrupt is raised. | 248 | * interrupt is raised. |
249 | */ | 249 | */ |
250 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, | 250 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, |
251 | unsigned long mmr_offset, int restrict) | 251 | unsigned long mmr_offset, int limit) |
252 | { | 252 | { |
253 | int irq, ret; | 253 | int irq, ret; |
254 | 254 | ||
@@ -258,7 +258,7 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, | |||
258 | return -EBUSY; | 258 | return -EBUSY; |
259 | 259 | ||
260 | ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, | 260 | ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, |
261 | restrict); | 261 | limit); |
262 | if (ret == irq) | 262 | if (ret == irq) |
263 | uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); | 263 | uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); |
264 | else | 264 | else |
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 693920b22496..1b950d151e58 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(memcpy); | |||
54 | EXPORT_SYMBOL(__memcpy); | 54 | EXPORT_SYMBOL(__memcpy); |
55 | 55 | ||
56 | EXPORT_SYMBOL(empty_zero_page); | 56 | EXPORT_SYMBOL(empty_zero_page); |
57 | EXPORT_SYMBOL(init_level4_pgt); | ||
58 | #ifndef CONFIG_PARAVIRT | 57 | #ifndef CONFIG_PARAVIRT |
59 | EXPORT_SYMBOL(native_load_gs_index); | 58 | EXPORT_SYMBOL(native_load_gs_index); |
60 | #endif | 59 | #endif |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 782c3a362ec6..37e68fc5e24a 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -99,7 +99,7 @@ int save_i387_xstate(void __user *buf) | |||
99 | if (err) | 99 | if (err) |
100 | return err; | 100 | return err; |
101 | 101 | ||
102 | if (task_thread_info(tsk)->status & TS_XSAVE) | 102 | if (use_xsave()) |
103 | err = xsave_user(buf); | 103 | err = xsave_user(buf); |
104 | else | 104 | else |
105 | err = fxsave_user(buf); | 105 | err = fxsave_user(buf); |
@@ -109,14 +109,14 @@ int save_i387_xstate(void __user *buf) | |||
109 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 109 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
110 | stts(); | 110 | stts(); |
111 | } else { | 111 | } else { |
112 | if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, | 112 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, |
113 | xstate_size)) | 113 | xstate_size)) |
114 | return -1; | 114 | return -1; |
115 | } | 115 | } |
116 | 116 | ||
117 | clear_used_math(); /* trigger finit */ | 117 | clear_used_math(); /* trigger finit */ |
118 | 118 | ||
119 | if (task_thread_info(tsk)->status & TS_XSAVE) { | 119 | if (use_xsave()) { |
120 | struct _fpstate __user *fx = buf; | 120 | struct _fpstate __user *fx = buf; |
121 | struct _xstate __user *x = buf; | 121 | struct _xstate __user *x = buf; |
122 | u64 xstate_bv; | 122 | u64 xstate_bv; |
@@ -225,7 +225,7 @@ int restore_i387_xstate(void __user *buf) | |||
225 | clts(); | 225 | clts(); |
226 | task_thread_info(current)->status |= TS_USEDFPU; | 226 | task_thread_info(current)->status |= TS_USEDFPU; |
227 | } | 227 | } |
228 | if (task_thread_info(tsk)->status & TS_XSAVE) | 228 | if (use_xsave()) |
229 | err = restore_user_xstate(buf); | 229 | err = restore_user_xstate(buf); |
230 | else | 230 | else |
231 | err = fxrstor_checking((__force struct i387_fxsave_struct *) | 231 | err = fxrstor_checking((__force struct i387_fxsave_struct *) |