blob: f861d041517107be0a6198c174ec62e3cf1f2ad9 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
|
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#include <linux/init.h>
/*
* We need the APIC definitions automatically as part of 'smp.h'
*/
#ifdef CONFIG_X86_LOCAL_APIC
# include <asm/mpspec.h>
# include <asm/apic.h>
# ifdef CONFIG_X86_IO_APIC
# include <asm/io_apic.h>
# endif
#endif
extern cpumask_t cpu_callin_map;
extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
#ifdef CONFIG_SMP
/*
* This function is needed by all SMP systems. It must _always_ be valid
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
DECLARE_PER_CPU(int, cpu_number);
#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
extern int safe_smp_processor_id(void);
/* We don't mark CPUs online until __cpu_up(), so we need another measure */
static inline int num_booting_cpus(void)
{
return cpus_weight(cpu_callout_map);
}
#else /* CONFIG_SMP */
#define safe_smp_processor_id() 0
#endif /* !CONFIG_SMP */
#ifdef CONFIG_X86_LOCAL_APIC
static __inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
}
# ifdef APIC_DEFINITION
extern int hard_smp_processor_id(void);
# else
# include <mach_apicdef.h>
static inline int hard_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID));
}
# endif /* APIC_DEFINITION */
#else /* CONFIG_X86_LOCAL_APIC */
# ifndef CONFIG_SMP
# define hard_smp_processor_id() 0
# endif
#endif /* CONFIG_X86_LOCAL_APIC */
#endif /* !ASSEMBLY */
#endif
|