diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/base.S | 3 | ||||
-rw-r--r-- | arch/s390/kernel/dis.c | 8 | ||||
-rw-r--r-- | arch/s390/kernel/ipl.c | 11 | ||||
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 19 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 261 | ||||
-rw-r--r-- | arch/s390/kernel/sysinfo.c | 8 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 63 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 58 |
8 files changed, 336 insertions, 95 deletions
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 797a823a2275..f74a53d339b0 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -97,7 +97,8 @@ ENTRY(diag308_reset) | |||
97 | lg %r4,0(%r4) # Save PSW | 97 | lg %r4,0(%r4) # Save PSW |
98 | sturg %r4,%r3 # Use sturg, because of large pages | 98 | sturg %r4,%r3 # Use sturg, because of large pages |
99 | lghi %r1,1 | 99 | lghi %r1,1 |
100 | diag %r1,%r1,0x308 | 100 | lghi %r0,0 |
101 | diag %r0,%r1,0x308 | ||
101 | .Lrestart_part2: | 102 | .Lrestart_part2: |
102 | lhi %r0,0 # Load r0 with zero | 103 | lhi %r0,0 # Load r0 with zero |
103 | lhi %r1,2 # Use mode 2 = ESAME (dump) | 104 | lhi %r1,2 # Use mode 2 = ESAME (dump) |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index d46d0b0b2cda..533430307da8 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -137,7 +137,7 @@ enum { | |||
137 | INSTR_RSI_RRP, | 137 | INSTR_RSI_RRP, |
138 | INSTR_RSL_LRDFU, INSTR_RSL_R0RD, | 138 | INSTR_RSL_LRDFU, INSTR_RSL_R0RD, |
139 | INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, | 139 | INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, |
140 | INSTR_RSY_RDRM, | 140 | INSTR_RSY_RDRM, INSTR_RSY_RMRD, |
141 | INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, | 141 | INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, |
142 | INSTR_RS_RURD, | 142 | INSTR_RS_RURD, |
143 | INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM, | 143 | INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM, |
@@ -307,6 +307,7 @@ static const unsigned char formats[][7] = { | |||
307 | [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, | 307 | [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, |
308 | [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, | 308 | [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, |
309 | [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, | 309 | [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, |
310 | [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, | ||
310 | [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, | 311 | [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, |
311 | [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, | 312 | [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, |
312 | [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, | 313 | [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, |
@@ -450,7 +451,8 @@ enum { | |||
450 | LONG_INSN_VERLLV, | 451 | LONG_INSN_VERLLV, |
451 | LONG_INSN_VESRAV, | 452 | LONG_INSN_VESRAV, |
452 | LONG_INSN_VESRLV, | 453 | LONG_INSN_VESRLV, |
453 | LONG_INSN_VSBCBI | 454 | LONG_INSN_VSBCBI, |
455 | LONG_INSN_STCCTM | ||
454 | }; | 456 | }; |
455 | 457 | ||
456 | static char *long_insn_name[] = { | 458 | static char *long_insn_name[] = { |
@@ -530,6 +532,7 @@ static char *long_insn_name[] = { | |||
530 | [LONG_INSN_VESRAV] = "vesrav", | 532 | [LONG_INSN_VESRAV] = "vesrav", |
531 | [LONG_INSN_VESRLV] = "vesrlv", | 533 | [LONG_INSN_VESRLV] = "vesrlv", |
532 | [LONG_INSN_VSBCBI] = "vsbcbi", | 534 | [LONG_INSN_VSBCBI] = "vsbcbi", |
535 | [LONG_INSN_STCCTM] = "stcctm", | ||
533 | }; | 536 | }; |
534 | 537 | ||
535 | static struct s390_insn opcode[] = { | 538 | static struct s390_insn opcode[] = { |
@@ -1655,6 +1658,7 @@ static struct s390_insn opcode_eb[] = { | |||
1655 | { "lric", 0x60, INSTR_RSY_RDRM }, | 1658 | { "lric", 0x60, INSTR_RSY_RDRM }, |
1656 | { "stric", 0x61, INSTR_RSY_RDRM }, | 1659 | { "stric", 0x61, INSTR_RSY_RDRM }, |
1657 | { "mric", 0x62, INSTR_RSY_RDRM }, | 1660 | { "mric", 0x62, INSTR_RSY_RDRM }, |
1661 | { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, | ||
1658 | #endif | 1662 | #endif |
1659 | { "rll", 0x1d, INSTR_RSY_RRRD }, | 1663 | { "rll", 0x1d, INSTR_RSY_RRRD }, |
1660 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, | 1664 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 39badb9ca0b3..5c8651f36509 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -2074,7 +2074,8 @@ static void do_reset_calls(void) | |||
2074 | 2074 | ||
2075 | u32 dump_prefix_page; | 2075 | u32 dump_prefix_page; |
2076 | 2076 | ||
2077 | void s390_reset_system(void (*func)(void *), void *data) | 2077 | void s390_reset_system(void (*fn_pre)(void), |
2078 | void (*fn_post)(void *), void *data) | ||
2078 | { | 2079 | { |
2079 | struct _lowcore *lc; | 2080 | struct _lowcore *lc; |
2080 | 2081 | ||
@@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data) | |||
2112 | /* Store status at absolute zero */ | 2113 | /* Store status at absolute zero */ |
2113 | store_status(); | 2114 | store_status(); |
2114 | 2115 | ||
2116 | /* Call function before reset */ | ||
2117 | if (fn_pre) | ||
2118 | fn_pre(); | ||
2115 | do_reset_calls(); | 2119 | do_reset_calls(); |
2116 | if (func) | 2120 | /* Call function after reset */ |
2117 | func(data); | 2121 | if (fn_post) |
2122 | fn_post(data); | ||
2118 | } | 2123 | } |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 4685337fa7c6..fb0901ec4306 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void) | |||
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | arch_initcall(machine_kdump_pm_init); | 105 | arch_initcall(machine_kdump_pm_init); |
106 | #endif | ||
107 | 106 | ||
108 | /* | 107 | /* |
109 | * Start kdump: We expect here that a store status has been done on our CPU | 108 | * Start kdump: We expect here that a store status has been done on our CPU |
110 | */ | 109 | */ |
111 | static void __do_machine_kdump(void *image) | 110 | static void __do_machine_kdump(void *image) |
112 | { | 111 | { |
113 | #ifdef CONFIG_CRASH_DUMP | ||
114 | int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; | 112 | int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; |
115 | 113 | ||
116 | setup_regs(); | ||
117 | __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); | 114 | __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); |
118 | start_kdump(1); | 115 | start_kdump(1); |
119 | #endif | ||
120 | } | 116 | } |
117 | #endif | ||
121 | 118 | ||
122 | /* | 119 | /* |
123 | * Check if kdump checksums are valid: We call purgatory with parameter "0" | 120 | * Check if kdump checksums are valid: We call purgatory with parameter "0" |
@@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data) | |||
249 | */ | 246 | */ |
250 | static void __machine_kexec(void *data) | 247 | static void __machine_kexec(void *data) |
251 | { | 248 | { |
252 | struct kimage *image = data; | ||
253 | |||
254 | __arch_local_irq_stosm(0x04); /* enable DAT */ | 249 | __arch_local_irq_stosm(0x04); /* enable DAT */ |
255 | pfault_fini(); | 250 | pfault_fini(); |
256 | tracing_off(); | 251 | tracing_off(); |
257 | debug_locks_off(); | 252 | debug_locks_off(); |
258 | if (image->type == KEXEC_TYPE_CRASH) { | 253 | #ifdef CONFIG_CRASH_DUMP |
254 | if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) { | ||
255 | |||
259 | lgr_info_log(); | 256 | lgr_info_log(); |
260 | s390_reset_system(__do_machine_kdump, data); | 257 | s390_reset_system(setup_regs, __do_machine_kdump, data); |
261 | } else { | 258 | } else |
262 | s390_reset_system(__do_machine_kexec, data); | 259 | #endif |
263 | } | 260 | s390_reset_system(NULL, __do_machine_kexec, data); |
264 | disabled_wait((unsigned long) __builtin_return_address(0)); | 261 | disabled_wait((unsigned long) __builtin_return_address(0)); |
265 | } | 262 | } |
266 | 263 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 0b499f5cbe19..370ff3a092a3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -71,9 +71,30 @@ struct pcpu { | |||
71 | }; | 71 | }; |
72 | 72 | ||
73 | static u8 boot_cpu_type; | 73 | static u8 boot_cpu_type; |
74 | static u16 boot_cpu_address; | ||
75 | static struct pcpu pcpu_devices[NR_CPUS]; | 74 | static struct pcpu pcpu_devices[NR_CPUS]; |
76 | 75 | ||
76 | unsigned int smp_cpu_mt_shift; | ||
77 | EXPORT_SYMBOL(smp_cpu_mt_shift); | ||
78 | |||
79 | unsigned int smp_cpu_mtid; | ||
80 | EXPORT_SYMBOL(smp_cpu_mtid); | ||
81 | |||
82 | static unsigned int smp_max_threads __initdata = -1U; | ||
83 | |||
84 | static int __init early_nosmt(char *s) | ||
85 | { | ||
86 | smp_max_threads = 1; | ||
87 | return 0; | ||
88 | } | ||
89 | early_param("nosmt", early_nosmt); | ||
90 | |||
91 | static int __init early_smt(char *s) | ||
92 | { | ||
93 | get_option(&s, &smp_max_threads); | ||
94 | return 0; | ||
95 | } | ||
96 | early_param("smt", early_smt); | ||
97 | |||
77 | /* | 98 | /* |
78 | * The smp_cpu_state_mutex must be held when changing the state or polarization | 99 | * The smp_cpu_state_mutex must be held when changing the state or polarization |
79 | * member of a pcpu data structure within the pcpu_devices arreay. | 100 | * member of a pcpu data structure within the pcpu_devices arreay. |
@@ -132,7 +153,7 @@ static inline int pcpu_running(struct pcpu *pcpu) | |||
132 | /* | 153 | /* |
133 | * Find struct pcpu by cpu address. | 154 | * Find struct pcpu by cpu address. |
134 | */ | 155 | */ |
135 | static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) | 156 | static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address) |
136 | { | 157 | { |
137 | int cpu; | 158 | int cpu; |
138 | 159 | ||
@@ -299,6 +320,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), | |||
299 | } | 320 | } |
300 | 321 | ||
301 | /* | 322 | /* |
323 | * Enable additional logical cpus for multi-threading. | ||
324 | */ | ||
325 | static int pcpu_set_smt(unsigned int mtid) | ||
326 | { | ||
327 | register unsigned long reg1 asm ("1") = (unsigned long) mtid; | ||
328 | int cc; | ||
329 | |||
330 | if (smp_cpu_mtid == mtid) | ||
331 | return 0; | ||
332 | asm volatile( | ||
333 | " sigp %1,0,%2 # sigp set multi-threading\n" | ||
334 | " ipm %0\n" | ||
335 | " srl %0,28\n" | ||
336 | : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING) | ||
337 | : "cc"); | ||
338 | if (cc == 0) { | ||
339 | smp_cpu_mtid = mtid; | ||
340 | smp_cpu_mt_shift = 0; | ||
341 | while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift)) | ||
342 | smp_cpu_mt_shift++; | ||
343 | pcpu_devices[0].address = stap(); | ||
344 | } | ||
345 | return cc; | ||
346 | } | ||
347 | |||
348 | /* | ||
302 | * Call function on an online CPU. | 349 | * Call function on an online CPU. |
303 | */ | 350 | */ |
304 | void smp_call_online_cpu(void (*func)(void *), void *data) | 351 | void smp_call_online_cpu(void (*func)(void *), void *data) |
@@ -512,22 +559,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); | |||
512 | 559 | ||
513 | #ifdef CONFIG_CRASH_DUMP | 560 | #ifdef CONFIG_CRASH_DUMP |
514 | 561 | ||
515 | static void __init smp_get_save_area(int cpu, u16 address) | 562 | static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu) |
516 | { | 563 | { |
517 | void *lc = pcpu_devices[0].lowcore; | 564 | void *lc = pcpu_devices[0].lowcore; |
518 | struct save_area_ext *sa_ext; | 565 | struct save_area_ext *sa_ext; |
519 | unsigned long vx_sa; | 566 | unsigned long vx_sa; |
520 | 567 | ||
521 | if (is_kdump_kernel()) | ||
522 | return; | ||
523 | if (!OLDMEM_BASE && (address == boot_cpu_address || | ||
524 | ipl_info.type != IPL_TYPE_FCP_DUMP)) | ||
525 | return; | ||
526 | sa_ext = dump_save_area_create(cpu); | 568 | sa_ext = dump_save_area_create(cpu); |
527 | if (!sa_ext) | 569 | if (!sa_ext) |
528 | panic("could not allocate memory for save area\n"); | 570 | panic("could not allocate memory for save area\n"); |
529 | if (address == boot_cpu_address) { | 571 | if (is_boot_cpu) { |
530 | /* Copy the registers of the boot cpu. */ | 572 | /* Copy the registers of the boot CPU. */ |
531 | copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), | 573 | copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), |
532 | SAVE_AREA_BASE - PAGE_SIZE, 0); | 574 | SAVE_AREA_BASE - PAGE_SIZE, 0); |
533 | if (MACHINE_HAS_VX) | 575 | if (MACHINE_HAS_VX) |
@@ -548,6 +590,64 @@ static void __init smp_get_save_area(int cpu, u16 address) | |||
548 | free_page(vx_sa); | 590 | free_page(vx_sa); |
549 | } | 591 | } |
550 | 592 | ||
593 | /* | ||
594 | * Collect CPU state of the previous, crashed system. | ||
595 | * There are four cases: | ||
596 | * 1) standard zfcp dump | ||
597 | * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP | ||
598 | * The state for all CPUs except the boot CPU needs to be collected | ||
599 | * with sigp stop-and-store-status. The boot CPU state is located in | ||
600 | * the absolute lowcore of the memory stored in the HSA. The zcore code | ||
601 | * will allocate the save area and copy the boot CPU state from the HSA. | ||
602 | * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory) | ||
603 | * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP | ||
604 | * The state for all CPUs except the boot CPU needs to be collected | ||
605 | * with sigp stop-and-store-status. The firmware or the boot-loader | ||
606 | * stored the registers of the boot CPU in the absolute lowcore in the | ||
607 | * memory of the old system. | ||
608 | * 3) kdump and the old kernel did not store the CPU state, | ||
609 | * or stand-alone kdump for DASD | ||
610 | * condition: OLDMEM_BASE != NULL && !is_kdump_kernel() | ||
611 | * The state for all CPUs except the boot CPU needs to be collected | ||
612 | * with sigp stop-and-store-status. The kexec code or the boot-loader | ||
613 | * stored the registers of the boot CPU in the memory of the old system. | ||
614 | * 4) kdump and the old kernel stored the CPU state | ||
615 | * condition: OLDMEM_BASE != NULL && is_kdump_kernel() | ||
616 | * The state of all CPUs is stored in ELF sections in the memory of the | ||
617 | * old system. The ELF sections are picked up by the crash_dump code | ||
618 | * via elfcorehdr_addr. | ||
619 | */ | ||
620 | static void __init smp_store_cpu_states(struct sclp_cpu_info *info) | ||
621 | { | ||
622 | unsigned int cpu, address, i, j; | ||
623 | int is_boot_cpu; | ||
624 | |||
625 | if (is_kdump_kernel()) | ||
626 | /* Previous system stored the CPU states. Nothing to do. */ | ||
627 | return; | ||
628 | if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP)) | ||
629 | /* No previous system present, normal boot. */ | ||
630 | return; | ||
631 | /* Set multi-threading state to the previous system. */ | ||
632 | pcpu_set_smt(sclp_get_mtid_prev()); | ||
633 | /* Collect CPU states. */ | ||
634 | cpu = 0; | ||
635 | for (i = 0; i < info->configured; i++) { | ||
636 | /* Skip CPUs with different CPU type. */ | ||
637 | if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) | ||
638 | continue; | ||
639 | for (j = 0; j <= smp_cpu_mtid; j++, cpu++) { | ||
640 | address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j; | ||
641 | is_boot_cpu = (address == pcpu_devices[0].address); | ||
642 | if (is_boot_cpu && !OLDMEM_BASE) | ||
643 | /* Skip boot CPU for standard zfcp dump. */ | ||
644 | continue; | ||
645 | /* Get state for this CPu. */ | ||
646 | __smp_store_cpu_state(cpu, address, is_boot_cpu); | ||
647 | } | ||
648 | } | ||
649 | } | ||
650 | |||
551 | int smp_store_status(int cpu) | 651 | int smp_store_status(int cpu) |
552 | { | 652 | { |
553 | unsigned long vx_sa; | 653 | unsigned long vx_sa; |
@@ -565,10 +665,6 @@ int smp_store_status(int cpu) | |||
565 | return 0; | 665 | return 0; |
566 | } | 666 | } |
567 | 667 | ||
568 | #else /* CONFIG_CRASH_DUMP */ | ||
569 | |||
570 | static inline void smp_get_save_area(int cpu, u16 address) { } | ||
571 | |||
572 | #endif /* CONFIG_CRASH_DUMP */ | 668 | #endif /* CONFIG_CRASH_DUMP */ |
573 | 669 | ||
574 | void smp_cpu_set_polarization(int cpu, int val) | 670 | void smp_cpu_set_polarization(int cpu, int val) |
@@ -590,11 +686,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) | |||
590 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 686 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
591 | if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { | 687 | if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { |
592 | use_sigp_detection = 1; | 688 | use_sigp_detection = 1; |
593 | for (address = 0; address <= MAX_CPU_ADDRESS; address++) { | 689 | for (address = 0; address <= MAX_CPU_ADDRESS; |
690 | address += (1U << smp_cpu_mt_shift)) { | ||
594 | if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == | 691 | if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == |
595 | SIGP_CC_NOT_OPERATIONAL) | 692 | SIGP_CC_NOT_OPERATIONAL) |
596 | continue; | 693 | continue; |
597 | info->cpu[info->configured].address = address; | 694 | info->cpu[info->configured].core_id = |
695 | address >> smp_cpu_mt_shift; | ||
598 | info->configured++; | 696 | info->configured++; |
599 | } | 697 | } |
600 | info->combined = info->configured; | 698 | info->combined = info->configured; |
@@ -608,7 +706,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) | |||
608 | { | 706 | { |
609 | struct pcpu *pcpu; | 707 | struct pcpu *pcpu; |
610 | cpumask_t avail; | 708 | cpumask_t avail; |
611 | int cpu, nr, i; | 709 | int cpu, nr, i, j; |
710 | u16 address; | ||
612 | 711 | ||
613 | nr = 0; | 712 | nr = 0; |
614 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); | 713 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); |
@@ -616,51 +715,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) | |||
616 | for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { | 715 | for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { |
617 | if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) | 716 | if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) |
618 | continue; | 717 | continue; |
619 | if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) | 718 | address = info->cpu[i].core_id << smp_cpu_mt_shift; |
620 | continue; | 719 | for (j = 0; j <= smp_cpu_mtid; j++) { |
621 | pcpu = pcpu_devices + cpu; | 720 | if (pcpu_find_address(cpu_present_mask, address + j)) |
622 | pcpu->address = info->cpu[i].address; | 721 | continue; |
623 | pcpu->state = (i >= info->configured) ? | 722 | pcpu = pcpu_devices + cpu; |
624 | CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; | 723 | pcpu->address = address + j; |
625 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 724 | pcpu->state = |
626 | set_cpu_present(cpu, true); | 725 | (cpu >= info->configured*(smp_cpu_mtid + 1)) ? |
627 | if (sysfs_add && smp_add_present_cpu(cpu) != 0) | 726 | CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; |
628 | set_cpu_present(cpu, false); | 727 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
629 | else | 728 | set_cpu_present(cpu, true); |
630 | nr++; | 729 | if (sysfs_add && smp_add_present_cpu(cpu) != 0) |
631 | cpu = cpumask_next(cpu, &avail); | 730 | set_cpu_present(cpu, false); |
731 | else | ||
732 | nr++; | ||
733 | cpu = cpumask_next(cpu, &avail); | ||
734 | if (cpu >= nr_cpu_ids) | ||
735 | break; | ||
736 | } | ||
632 | } | 737 | } |
633 | return nr; | 738 | return nr; |
634 | } | 739 | } |
635 | 740 | ||
636 | static void __init smp_detect_cpus(void) | 741 | static void __init smp_detect_cpus(void) |
637 | { | 742 | { |
638 | unsigned int cpu, c_cpus, s_cpus; | 743 | unsigned int cpu, mtid, c_cpus, s_cpus; |
639 | struct sclp_cpu_info *info; | 744 | struct sclp_cpu_info *info; |
745 | u16 address; | ||
640 | 746 | ||
747 | /* Get CPU information */ | ||
641 | info = smp_get_cpu_info(); | 748 | info = smp_get_cpu_info(); |
642 | if (!info) | 749 | if (!info) |
643 | panic("smp_detect_cpus failed to allocate memory\n"); | 750 | panic("smp_detect_cpus failed to allocate memory\n"); |
751 | |||
752 | /* Find boot CPU type */ | ||
644 | if (info->has_cpu_type) { | 753 | if (info->has_cpu_type) { |
645 | for (cpu = 0; cpu < info->combined; cpu++) { | 754 | address = stap(); |
646 | if (info->cpu[cpu].address != boot_cpu_address) | 755 | for (cpu = 0; cpu < info->combined; cpu++) |
647 | continue; | 756 | if (info->cpu[cpu].core_id == address) { |
648 | /* The boot cpu dictates the cpu type. */ | 757 | /* The boot cpu dictates the cpu type. */ |
649 | boot_cpu_type = info->cpu[cpu].type; | 758 | boot_cpu_type = info->cpu[cpu].type; |
650 | break; | 759 | break; |
651 | } | 760 | } |
761 | if (cpu >= info->combined) | ||
762 | panic("Could not find boot CPU type"); | ||
652 | } | 763 | } |
764 | |||
765 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) | ||
766 | /* Collect CPU state of previous system */ | ||
767 | smp_store_cpu_states(info); | ||
768 | #endif | ||
769 | |||
770 | /* Set multi-threading state for the current system */ | ||
771 | mtid = sclp_get_mtid(boot_cpu_type); | ||
772 | mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; | ||
773 | pcpu_set_smt(mtid); | ||
774 | |||
775 | /* Print number of CPUs */ | ||
653 | c_cpus = s_cpus = 0; | 776 | c_cpus = s_cpus = 0; |
654 | for (cpu = 0; cpu < info->combined; cpu++) { | 777 | for (cpu = 0; cpu < info->combined; cpu++) { |
655 | if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) | 778 | if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) |
656 | continue; | 779 | continue; |
657 | if (cpu < info->configured) { | 780 | if (cpu < info->configured) |
658 | smp_get_save_area(c_cpus, info->cpu[cpu].address); | 781 | c_cpus += smp_cpu_mtid + 1; |
659 | c_cpus++; | 782 | else |
660 | } else | 783 | s_cpus += smp_cpu_mtid + 1; |
661 | s_cpus++; | ||
662 | } | 784 | } |
663 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); | 785 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); |
786 | |||
787 | /* Add CPUs present at boot */ | ||
664 | get_online_cpus(); | 788 | get_online_cpus(); |
665 | __smp_rescan_cpus(info, 0); | 789 | __smp_rescan_cpus(info, 0); |
666 | put_online_cpus(); | 790 | put_online_cpus(); |
@@ -696,12 +820,23 @@ static void smp_start_secondary(void *cpuvoid) | |||
696 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) | 820 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
697 | { | 821 | { |
698 | struct pcpu *pcpu; | 822 | struct pcpu *pcpu; |
699 | int rc; | 823 | int base, i, rc; |
700 | 824 | ||
701 | pcpu = pcpu_devices + cpu; | 825 | pcpu = pcpu_devices + cpu; |
702 | if (pcpu->state != CPU_STATE_CONFIGURED) | 826 | if (pcpu->state != CPU_STATE_CONFIGURED) |
703 | return -EIO; | 827 | return -EIO; |
704 | if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != | 828 | base = cpu - (cpu % (smp_cpu_mtid + 1)); |
829 | for (i = 0; i <= smp_cpu_mtid; i++) { | ||
830 | if (base + i < nr_cpu_ids) | ||
831 | if (cpu_online(base + i)) | ||
832 | break; | ||
833 | } | ||
834 | /* | ||
835 | * If this is the first CPU of the core to get online | ||
836 | * do an initial CPU reset. | ||
837 | */ | ||
838 | if (i > smp_cpu_mtid && | ||
839 | pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) != | ||
705 | SIGP_CC_ORDER_CODE_ACCEPTED) | 840 | SIGP_CC_ORDER_CODE_ACCEPTED) |
706 | return -EIO; | 841 | return -EIO; |
707 | 842 | ||
@@ -774,7 +909,8 @@ void __init smp_fill_possible_mask(void) | |||
774 | { | 909 | { |
775 | unsigned int possible, sclp, cpu; | 910 | unsigned int possible, sclp, cpu; |
776 | 911 | ||
777 | sclp = sclp_get_max_cpu() ?: nr_cpu_ids; | 912 | sclp = min(smp_max_threads, sclp_get_mtid_max() + 1); |
913 | sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids; | ||
778 | possible = setup_possible_cpus ?: nr_cpu_ids; | 914 | possible = setup_possible_cpus ?: nr_cpu_ids; |
779 | possible = min(possible, sclp); | 915 | possible = min(possible, sclp); |
780 | for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) | 916 | for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) |
@@ -796,9 +932,8 @@ void __init smp_prepare_boot_cpu(void) | |||
796 | { | 932 | { |
797 | struct pcpu *pcpu = pcpu_devices; | 933 | struct pcpu *pcpu = pcpu_devices; |
798 | 934 | ||
799 | boot_cpu_address = stap(); | ||
800 | pcpu->state = CPU_STATE_CONFIGURED; | 935 | pcpu->state = CPU_STATE_CONFIGURED; |
801 | pcpu->address = boot_cpu_address; | 936 | pcpu->address = stap(); |
802 | pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); | 937 | pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); |
803 | pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE | 938 | pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE |
804 | + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); | 939 | + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); |
@@ -848,7 +983,7 @@ static ssize_t cpu_configure_store(struct device *dev, | |||
848 | const char *buf, size_t count) | 983 | const char *buf, size_t count) |
849 | { | 984 | { |
850 | struct pcpu *pcpu; | 985 | struct pcpu *pcpu; |
851 | int cpu, val, rc; | 986 | int cpu, val, rc, i; |
852 | char delim; | 987 | char delim; |
853 | 988 | ||
854 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | 989 | if (sscanf(buf, "%d %c", &val, &delim) != 1) |
@@ -860,29 +995,43 @@ static ssize_t cpu_configure_store(struct device *dev, | |||
860 | rc = -EBUSY; | 995 | rc = -EBUSY; |
861 | /* disallow configuration changes of online cpus and cpu 0 */ | 996 | /* disallow configuration changes of online cpus and cpu 0 */ |
862 | cpu = dev->id; | 997 | cpu = dev->id; |
863 | if (cpu_online(cpu) || cpu == 0) | 998 | cpu -= cpu % (smp_cpu_mtid + 1); |
999 | if (cpu == 0) | ||
864 | goto out; | 1000 | goto out; |
1001 | for (i = 0; i <= smp_cpu_mtid; i++) | ||
1002 | if (cpu_online(cpu + i)) | ||
1003 | goto out; | ||
865 | pcpu = pcpu_devices + cpu; | 1004 | pcpu = pcpu_devices + cpu; |
866 | rc = 0; | 1005 | rc = 0; |
867 | switch (val) { | 1006 | switch (val) { |
868 | case 0: | 1007 | case 0: |
869 | if (pcpu->state != CPU_STATE_CONFIGURED) | 1008 | if (pcpu->state != CPU_STATE_CONFIGURED) |
870 | break; | 1009 | break; |
871 | rc = sclp_cpu_deconfigure(pcpu->address); | 1010 | rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift); |
872 | if (rc) | 1011 | if (rc) |
873 | break; | 1012 | break; |
874 | pcpu->state = CPU_STATE_STANDBY; | 1013 | for (i = 0; i <= smp_cpu_mtid; i++) { |
875 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 1014 | if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) |
1015 | continue; | ||
1016 | pcpu[i].state = CPU_STATE_STANDBY; | ||
1017 | smp_cpu_set_polarization(cpu + i, | ||
1018 | POLARIZATION_UNKNOWN); | ||
1019 | } | ||
876 | topology_expect_change(); | 1020 | topology_expect_change(); |
877 | break; | 1021 | break; |
878 | case 1: | 1022 | case 1: |
879 | if (pcpu->state != CPU_STATE_STANDBY) | 1023 | if (pcpu->state != CPU_STATE_STANDBY) |
880 | break; | 1024 | break; |
881 | rc = sclp_cpu_configure(pcpu->address); | 1025 | rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift); |
882 | if (rc) | 1026 | if (rc) |
883 | break; | 1027 | break; |
884 | pcpu->state = CPU_STATE_CONFIGURED; | 1028 | for (i = 0; i <= smp_cpu_mtid; i++) { |
885 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 1029 | if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) |
1030 | continue; | ||
1031 | pcpu[i].state = CPU_STATE_CONFIGURED; | ||
1032 | smp_cpu_set_polarization(cpu + i, | ||
1033 | POLARIZATION_UNKNOWN); | ||
1034 | } | ||
886 | topology_expect_change(); | 1035 | topology_expect_change(); |
887 | break; | 1036 | break; |
888 | default: | 1037 | default: |
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 811f542b8ed4..85565f1ff474 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c | |||
@@ -194,6 +194,14 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info) | |||
194 | seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); | 194 | seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); |
195 | seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); | 195 | seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); |
196 | seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); | 196 | seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); |
197 | if (info->mt_installed & 0x80) { | ||
198 | seq_printf(m, "LPAR CPUs G-MTID: %d\n", | ||
199 | info->mt_general & 0x1f); | ||
200 | seq_printf(m, "LPAR CPUs S-MTID: %d\n", | ||
201 | info->mt_installed & 0x1f); | ||
202 | seq_printf(m, "LPAR CPUs PS-MTID: %d\n", | ||
203 | info->mt_psmtid & 0x1f); | ||
204 | } | ||
197 | } | 205 | } |
198 | 206 | ||
199 | static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) | 207 | static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index b93bed76ea94..24ee33f1af24 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | |||
59 | return mask; | 59 | return mask; |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, | 62 | static cpumask_t cpu_thread_map(unsigned int cpu) |
63 | { | ||
64 | cpumask_t mask; | ||
65 | int i; | ||
66 | |||
67 | cpumask_copy(&mask, cpumask_of(cpu)); | ||
68 | if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) | ||
69 | return mask; | ||
70 | cpu -= cpu % (smp_cpu_mtid + 1); | ||
71 | for (i = 0; i <= smp_cpu_mtid; i++) | ||
72 | if (cpu_present(cpu + i)) | ||
73 | cpumask_set_cpu(cpu + i, &mask); | ||
74 | return mask; | ||
75 | } | ||
76 | |||
77 | static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core, | ||
63 | struct mask_info *book, | 78 | struct mask_info *book, |
64 | struct mask_info *socket, | 79 | struct mask_info *socket, |
65 | int one_socket_per_cpu) | 80 | int one_socket_per_cpu) |
66 | { | 81 | { |
67 | unsigned int cpu; | 82 | unsigned int core; |
68 | 83 | ||
69 | for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) { | 84 | for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) { |
70 | unsigned int rcpu; | 85 | unsigned int rcore; |
71 | int lcpu; | 86 | int lcpu, i; |
72 | 87 | ||
73 | rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; | 88 | rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; |
74 | lcpu = smp_find_processor_id(rcpu); | 89 | lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); |
75 | if (lcpu < 0) | 90 | if (lcpu < 0) |
76 | continue; | 91 | continue; |
77 | cpumask_set_cpu(lcpu, &book->mask); | 92 | for (i = 0; i <= smp_cpu_mtid; i++) { |
78 | cpu_topology[lcpu].book_id = book->id; | 93 | cpu_topology[lcpu + i].book_id = book->id; |
79 | cpumask_set_cpu(lcpu, &socket->mask); | 94 | cpu_topology[lcpu + i].core_id = rcore; |
80 | cpu_topology[lcpu].core_id = rcpu; | 95 | cpu_topology[lcpu + i].thread_id = lcpu + i; |
81 | if (one_socket_per_cpu) { | 96 | cpumask_set_cpu(lcpu + i, &book->mask); |
82 | cpu_topology[lcpu].socket_id = rcpu; | 97 | cpumask_set_cpu(lcpu + i, &socket->mask); |
83 | socket = socket->next; | 98 | if (one_socket_per_cpu) |
84 | } else { | 99 | cpu_topology[lcpu + i].socket_id = rcore; |
85 | cpu_topology[lcpu].socket_id = socket->id; | 100 | else |
101 | cpu_topology[lcpu + i].socket_id = socket->id; | ||
102 | smp_cpu_set_polarization(lcpu + i, tl_core->pp); | ||
86 | } | 103 | } |
87 | smp_cpu_set_polarization(lcpu, tl_cpu->pp); | 104 | if (one_socket_per_cpu) |
105 | socket = socket->next; | ||
88 | } | 106 | } |
89 | return socket; | 107 | return socket; |
90 | } | 108 | } |
@@ -108,7 +126,7 @@ static void clear_masks(void) | |||
108 | static union topology_entry *next_tle(union topology_entry *tle) | 126 | static union topology_entry *next_tle(union topology_entry *tle) |
109 | { | 127 | { |
110 | if (!tle->nl) | 128 | if (!tle->nl) |
111 | return (union topology_entry *)((struct topology_cpu *)tle + 1); | 129 | return (union topology_entry *)((struct topology_core *)tle + 1); |
112 | return (union topology_entry *)((struct topology_container *)tle + 1); | 130 | return (union topology_entry *)((struct topology_container *)tle + 1); |
113 | } | 131 | } |
114 | 132 | ||
@@ -231,9 +249,11 @@ static void update_cpu_masks(void) | |||
231 | 249 | ||
232 | spin_lock_irqsave(&topology_lock, flags); | 250 | spin_lock_irqsave(&topology_lock, flags); |
233 | for_each_possible_cpu(cpu) { | 251 | for_each_possible_cpu(cpu) { |
252 | cpu_topology[cpu].thread_mask = cpu_thread_map(cpu); | ||
234 | cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); | 253 | cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); |
235 | cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); | 254 | cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); |
236 | if (!MACHINE_HAS_TOPOLOGY) { | 255 | if (!MACHINE_HAS_TOPOLOGY) { |
256 | cpu_topology[cpu].thread_id = cpu; | ||
237 | cpu_topology[cpu].core_id = cpu; | 257 | cpu_topology[cpu].core_id = cpu; |
238 | cpu_topology[cpu].socket_id = cpu; | 258 | cpu_topology[cpu].socket_id = cpu; |
239 | cpu_topology[cpu].book_id = cpu; | 259 | cpu_topology[cpu].book_id = cpu; |
@@ -445,6 +465,12 @@ int topology_cpu_init(struct cpu *cpu) | |||
445 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); | 465 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); |
446 | } | 466 | } |
447 | 467 | ||
468 | const struct cpumask *cpu_thread_mask(int cpu) | ||
469 | { | ||
470 | return &cpu_topology[cpu].thread_mask; | ||
471 | } | ||
472 | |||
473 | |||
448 | const struct cpumask *cpu_coregroup_mask(int cpu) | 474 | const struct cpumask *cpu_coregroup_mask(int cpu) |
449 | { | 475 | { |
450 | return &cpu_topology[cpu].core_mask; | 476 | return &cpu_topology[cpu].core_mask; |
@@ -456,6 +482,7 @@ static const struct cpumask *cpu_book_mask(int cpu) | |||
456 | } | 482 | } |
457 | 483 | ||
458 | static struct sched_domain_topology_level s390_topology[] = { | 484 | static struct sched_domain_topology_level s390_topology[] = { |
485 | { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, | ||
459 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, | 486 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, |
460 | { cpu_book_mask, SD_INIT_NAME(BOOK) }, | 487 | { cpu_book_mask, SD_INIT_NAME(BOOK) }, |
461 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, | 488 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e34122e539a1..e53d3595a7c8 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <asm/cputime.h> | 15 | #include <asm/cputime.h> |
16 | #include <asm/vtimer.h> | 16 | #include <asm/vtimer.h> |
17 | #include <asm/vtime.h> | 17 | #include <asm/vtime.h> |
18 | #include <asm/cpu_mf.h> | ||
19 | #include <asm/smp.h> | ||
18 | 20 | ||
19 | static void virt_timer_expire(void); | 21 | static void virt_timer_expire(void); |
20 | 22 | ||
@@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock); | |||
23 | static atomic64_t virt_timer_current; | 25 | static atomic64_t virt_timer_current; |
24 | static atomic64_t virt_timer_elapsed; | 26 | static atomic64_t virt_timer_elapsed; |
25 | 27 | ||
28 | static DEFINE_PER_CPU(u64, mt_cycles[32]); | ||
29 | static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; | ||
30 | static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; | ||
31 | |||
26 | static inline u64 get_vtimer(void) | 32 | static inline u64 get_vtimer(void) |
27 | { | 33 | { |
28 | u64 timer; | 34 | u64 timer; |
@@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) | |||
61 | { | 67 | { |
62 | struct thread_info *ti = task_thread_info(tsk); | 68 | struct thread_info *ti = task_thread_info(tsk); |
63 | u64 timer, clock, user, system, steal; | 69 | u64 timer, clock, user, system, steal; |
70 | u64 user_scaled, system_scaled; | ||
71 | int i; | ||
64 | 72 | ||
65 | timer = S390_lowcore.last_update_timer; | 73 | timer = S390_lowcore.last_update_timer; |
66 | clock = S390_lowcore.last_update_clock; | 74 | clock = S390_lowcore.last_update_clock; |
@@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) | |||
76 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 84 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
77 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; | 85 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
78 | 86 | ||
87 | /* Do MT utilization calculation */ | ||
88 | if (smp_cpu_mtid) { | ||
89 | u64 cycles_new[32], *cycles_old; | ||
90 | u64 delta, mult, div; | ||
91 | |||
92 | cycles_old = this_cpu_ptr(mt_cycles); | ||
93 | if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { | ||
94 | mult = div = 0; | ||
95 | for (i = 0; i <= smp_cpu_mtid; i++) { | ||
96 | delta = cycles_new[i] - cycles_old[i]; | ||
97 | mult += delta; | ||
98 | div += (i + 1) * delta; | ||
99 | } | ||
100 | if (mult > 0) { | ||
101 | /* Update scaling factor */ | ||
102 | __this_cpu_write(mt_scaling_mult, mult); | ||
103 | __this_cpu_write(mt_scaling_div, div); | ||
104 | memcpy(cycles_old, cycles_new, | ||
105 | sizeof(u64) * (smp_cpu_mtid + 1)); | ||
106 | } | ||
107 | } | ||
108 | } | ||
109 | |||
79 | user = S390_lowcore.user_timer - ti->user_timer; | 110 | user = S390_lowcore.user_timer - ti->user_timer; |
80 | S390_lowcore.steal_timer -= user; | 111 | S390_lowcore.steal_timer -= user; |
81 | ti->user_timer = S390_lowcore.user_timer; | 112 | ti->user_timer = S390_lowcore.user_timer; |
82 | account_user_time(tsk, user, user); | ||
83 | 113 | ||
84 | system = S390_lowcore.system_timer - ti->system_timer; | 114 | system = S390_lowcore.system_timer - ti->system_timer; |
85 | S390_lowcore.steal_timer -= system; | 115 | S390_lowcore.steal_timer -= system; |
86 | ti->system_timer = S390_lowcore.system_timer; | 116 | ti->system_timer = S390_lowcore.system_timer; |
87 | account_system_time(tsk, hardirq_offset, system, system); | 117 | |
118 | user_scaled = user; | ||
119 | system_scaled = system; | ||
120 | /* Do MT utilization scaling */ | ||
121 | if (smp_cpu_mtid) { | ||
122 | u64 mult = __this_cpu_read(mt_scaling_mult); | ||
123 | u64 div = __this_cpu_read(mt_scaling_div); | ||
124 | |||
125 | user_scaled = (user_scaled * mult) / div; | ||
126 | system_scaled = (system_scaled * mult) / div; | ||
127 | } | ||
128 | account_user_time(tsk, user, user_scaled); | ||
129 | account_system_time(tsk, hardirq_offset, system, system_scaled); | ||
88 | 130 | ||
89 | steal = S390_lowcore.steal_timer; | 131 | steal = S390_lowcore.steal_timer; |
90 | if ((s64) steal > 0) { | 132 | if ((s64) steal > 0) { |
@@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk) | |||
126 | void vtime_account_irq_enter(struct task_struct *tsk) | 168 | void vtime_account_irq_enter(struct task_struct *tsk) |
127 | { | 169 | { |
128 | struct thread_info *ti = task_thread_info(tsk); | 170 | struct thread_info *ti = task_thread_info(tsk); |
129 | u64 timer, system; | 171 | u64 timer, system, system_scaled; |
130 | 172 | ||
131 | timer = S390_lowcore.last_update_timer; | 173 | timer = S390_lowcore.last_update_timer; |
132 | S390_lowcore.last_update_timer = get_vtimer(); | 174 | S390_lowcore.last_update_timer = get_vtimer(); |
@@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk) | |||
135 | system = S390_lowcore.system_timer - ti->system_timer; | 177 | system = S390_lowcore.system_timer - ti->system_timer; |
136 | S390_lowcore.steal_timer -= system; | 178 | S390_lowcore.steal_timer -= system; |
137 | ti->system_timer = S390_lowcore.system_timer; | 179 | ti->system_timer = S390_lowcore.system_timer; |
138 | account_system_time(tsk, 0, system, system); | 180 | system_scaled = system; |
181 | /* Do MT utilization scaling */ | ||
182 | if (smp_cpu_mtid) { | ||
183 | u64 mult = __this_cpu_read(mt_scaling_mult); | ||
184 | u64 div = __this_cpu_read(mt_scaling_div); | ||
185 | |||
186 | system_scaled = (system_scaled * mult) / div; | ||
187 | } | ||
188 | account_system_time(tsk, 0, system, system_scaled); | ||
139 | 189 | ||
140 | virt_timer_forward(system); | 190 | virt_timer_forward(system); |
141 | } | 191 | } |