aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/smp.h
blob: 2d9e15367c07d357b7e3f0c4d5262c5f0ea48c21 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/*
 *  include/asm-s390/smp.h
 *
 *  S390 version
 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
 *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
 *               Heiko Carstens (heiko.carstens@de.ibm.com)
 */
#ifndef __ASM_SMP_H
#define __ASM_SMP_H

#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/bitops.h>

#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)

#include <asm/lowcore.h>
#include <asm/sigp.h>
#include <asm/ptrace.h>

/*
  s390 specific smp.c headers
 */
typedef struct
{
	int        intresting;
	sigp_ccode ccode; 
	__u32      status;
	__u16      cpu;
} sigp_info;

extern void machine_restart_smp(char *);
extern void machine_halt_smp(void);
extern void machine_power_off_smp(void);

extern void smp_setup_cpu_possible_map(void);
extern int smp_call_function_on(void (*func) (void *info), void *info,
				int nonatomic, int wait, int cpu);
#define NO_PROC_ID		0xFF		/* No processor magic marker */

/*
 *	This magic constant controls our willingness to transfer
 *	a process across CPUs. Such a transfer incurs misses on the L1
 *	cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
 *	gut feeling is this will vary by board in value. For a board
 *	with separate L2 cache it probably depends also on the RSS, and
 *	for a board with shared L2 cache it ought to decay fast as other
 *	processes are run.
 */
 
#define PROC_CHANGE_PENALTY	20		/* Schedule penalty */

#define raw_smp_processor_id()	(S390_lowcore.cpu_data.cpu_nr)

extern int smp_get_cpu(cpumask_t cpu_map);
extern void smp_put_cpu(int cpu);

static inline __u16 hard_smp_processor_id(void)
{
        __u16 cpu_address;
 
	asm volatile("stap %0" : "=m" (cpu_address));
        return cpu_address;
}

/*
 * returns 1 if cpu is in stopped/check stopped state or not operational
 * returns 0 otherwise
 */
static inline int
smp_cpu_not_running(int cpu)
{
	__u32 status;

	switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
	case sigp_order_code_accepted:
	case sigp_status_stored:
		/* Check for stopped and check stop state */
		if (status & 0x50)
			return 1;
		break;
	case sigp_not_operational:
		return 1;
	default:
		break;
	}
	return 0;
}

#define cpu_logical_map(cpu) (cpu)

extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu);

#endif

#ifndef CONFIG_SMP
static inline int
smp_call_function_on(void (*func) (void *info), void *info,
		     int nonatomic, int wait, int cpu)
{
	func(info);
	return 0;
}

static inline void smp_send_stop(void)
{
	/* Disable all interrupts/machine checks */
	__load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
}

#define smp_cpu_not_running(cpu)	1
#define smp_get_cpu(cpu) ({ 0; })
#define smp_put_cpu(cpu) ({ 0; })
#define smp_setup_cpu_possible_map()	do { } while (0)
#endif

#endif