aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-28 17:12:55 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-28 17:12:55 -0500
commit6a385db5ce7f1fd2c68ec511e44587b67dab8fca (patch)
tree9324c8ae6f7be54b9fdbd6b60f759292aa727b1f /include
parent18e352e4a73465349711a9324767e1b2453383e2 (diff)
parent4369f1fb7cd4cf777312f43e1cb9aa5504fc4125 (diff)
Merge branch 'core/percpu' into x86/core
Conflicts: kernel/irq/handle.c
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h52
-rw-r--r--include/asm-generic/sections.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h48
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/irq.h86
-rw-r--r--include/linux/irqnr.h1
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/percpu.h41
-rw-r--r--include/linux/sched.h16
-rw-r--r--include/linux/stackprotector.h16
-rw-r--r--include/linux/topology.h6
11 files changed, 240 insertions, 30 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index b0e63c672ebd..00f45ff081a6 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,4 +80,56 @@ extern void setup_per_cpu_areas(void);
80#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ 80#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
81 __typeof__(type) per_cpu_var(name) 81 __typeof__(type) per_cpu_var(name)
82 82
83/*
84 * Optional methods for optimized non-lvalue per-cpu variable access.
85 *
86 * @var can be a percpu variable or a field of it and its size should
87 * equal char, int or long. percpu_read() evaluates to a lvalue and
88 * all others to void.
89 *
90 * These operations are guaranteed to be atomic w.r.t. preemption.
91 * The generic versions use plain get/put_cpu_var(). Archs are
92 * encouraged to implement single-instruction alternatives which don't
93 * require preemption protection.
94 */
95#ifndef percpu_read
96# define percpu_read(var) \
97 ({ \
98 typeof(per_cpu_var(var)) __tmp_var__; \
99 __tmp_var__ = get_cpu_var(var); \
100 put_cpu_var(var); \
101 __tmp_var__; \
102 })
103#endif
104
105#define __percpu_generic_to_op(var, val, op) \
106do { \
107 get_cpu_var(var) op val; \
108 put_cpu_var(var); \
109} while (0)
110
111#ifndef percpu_write
112# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
113#endif
114
115#ifndef percpu_add
116# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
117#endif
118
119#ifndef percpu_sub
120# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
121#endif
122
123#ifndef percpu_and
124# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
125#endif
126
127#ifndef percpu_or
128# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
129#endif
130
131#ifndef percpu_xor
132# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
133#endif
134
83#endif /* _ASM_GENERIC_PERCPU_H_ */ 135#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 79a7ff925bf8..4ce48e878530 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -9,7 +9,7 @@ extern char __bss_start[], __bss_stop[];
9extern char __init_begin[], __init_end[]; 9extern char __init_begin[], __init_end[];
10extern char _sinittext[], _einittext[]; 10extern char _sinittext[], _einittext[];
11extern char _end[]; 11extern char _end[];
12extern char __per_cpu_start[], __per_cpu_end[]; 12extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
13extern char __kprobes_text_start[], __kprobes_text_end[]; 13extern char __kprobes_text_start[], __kprobes_text_end[];
14extern char __initdata_begin[], __initdata_end[]; 14extern char __initdata_begin[], __initdata_end[];
15extern char __start_rodata[], __end_rodata[]; 15extern char __start_rodata[], __end_rodata[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c61fab1dd2f8..f3180a85c66a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -430,12 +430,48 @@
430 *(.initcall7.init) \ 430 *(.initcall7.init) \
431 *(.initcall7s.init) 431 *(.initcall7s.init)
432 432
433#define PERCPU(align) \ 433/**
434 . = ALIGN(align); \ 434 * PERCPU_VADDR - define output section for percpu area
435 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 435 * @vaddr: explicit base address (optional)
436 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 436 * @phdr: destination PHDR (optional)
437 *
438 * Macro which expands to output section for percpu area. If @vaddr
439 * is not blank, it specifies explicit base address and all percpu
440 * symbols will be offset from the given address. If blank, @vaddr
441 * always equals @laddr + LOAD_OFFSET.
442 *
443 * @phdr defines the output PHDR to use if not blank. Be warned that
444 * output PHDR is sticky. If @phdr is specified, the next output
445 * section in the linker script will go there too. @phdr should have
446 * a leading colon.
447 *
448 * This macro defines three symbols, __per_cpu_load, __per_cpu_start
449 * and __per_cpu_end. The first one is the vaddr of loaded percpu
450 * init data. __per_cpu_start equals @vaddr and __per_cpu_end is the
451 * end offset.
452 */
453#define PERCPU_VADDR(vaddr, phdr) \
454 VMLINUX_SYMBOL(__per_cpu_load_abs) = .; \
455 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load_abs) \
456 - LOAD_OFFSET) { \
457 VMLINUX_SYMBOL(__per_cpu_start) = .; \
458 VMLINUX_SYMBOL(__per_cpu_load) = LOADADDR(.data.percpu) + LOAD_OFFSET;\
459 *(.data.percpu.first) \
437 *(.data.percpu.page_aligned) \ 460 *(.data.percpu.page_aligned) \
438 *(.data.percpu) \ 461 *(.data.percpu) \
439 *(.data.percpu.shared_aligned) \ 462 *(.data.percpu.shared_aligned) \
440 } \ 463 VMLINUX_SYMBOL(__per_cpu_end) = .; \
441 VMLINUX_SYMBOL(__per_cpu_end) = .; 464 } phdr \
465 . = VMLINUX_SYMBOL(__per_cpu_load_abs) + SIZEOF(.data.percpu);
466
467/**
468 * PERCPU - define output section for percpu area, simple version
469 * @align: required alignment
470 *
471 * Align to @align and outputs output section for percpu area. This
472 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
473 * __per_cpu_start will be identical.
474 */
475#define PERCPU(align) \
476 . = ALIGN(align); \
477 PERCPU_VADDR( , )
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9127f6b51a39..472f11765f60 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
467struct irq_desc; 467struct irq_desc;
468 468
469extern int early_irq_init(void); 469extern int early_irq_init(void);
470extern int arch_probe_nr_irqs(void);
470extern int arch_early_irq_init(void); 471extern int arch_early_irq_init(void);
471extern int arch_init_chip_data(struct irq_desc *desc, int cpu); 472extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
472 473
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f899b502f186..27a67536511e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -182,11 +182,11 @@ struct irq_desc {
182 unsigned int irqs_unhandled; 182 unsigned int irqs_unhandled;
183 spinlock_t lock; 183 spinlock_t lock;
184#ifdef CONFIG_SMP 184#ifdef CONFIG_SMP
185 cpumask_t affinity; 185 cpumask_var_t affinity;
186 unsigned int cpu; 186 unsigned int cpu;
187#endif
188#ifdef CONFIG_GENERIC_PENDING_IRQ 187#ifdef CONFIG_GENERIC_PENDING_IRQ
189 cpumask_t pending_mask; 188 cpumask_var_t pending_mask;
189#endif
190#endif 190#endif
191#ifdef CONFIG_PROC_FS 191#ifdef CONFIG_PROC_FS
192 struct proc_dir_entry *dir; 192 struct proc_dir_entry *dir;
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
422 422
423#endif /* !CONFIG_S390 */ 423#endif /* !CONFIG_S390 */
424 424
425#ifdef CONFIG_SMP
426/**
427 * init_alloc_desc_masks - allocate cpumasks for irq_desc
428 * @desc: pointer to irq_desc struct
429 * @cpu: cpu which will be handling the cpumasks
430 * @boot: true if need bootmem
431 *
432 * Allocates affinity and pending_mask cpumask if required.
433 * Returns true if successful (or not required).
434 * Side effect: affinity has all bits set, pending_mask has all bits clear.
435 */
436static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
437 bool boot)
438{
439 int node;
440
441 if (boot) {
442 alloc_bootmem_cpumask_var(&desc->affinity);
443 cpumask_setall(desc->affinity);
444
445#ifdef CONFIG_GENERIC_PENDING_IRQ
446 alloc_bootmem_cpumask_var(&desc->pending_mask);
447 cpumask_clear(desc->pending_mask);
448#endif
449 return true;
450 }
451
452 node = cpu_to_node(cpu);
453
454 if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
455 return false;
456 cpumask_setall(desc->affinity);
457
458#ifdef CONFIG_GENERIC_PENDING_IRQ
459 if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
460 free_cpumask_var(desc->affinity);
461 return false;
462 }
463 cpumask_clear(desc->pending_mask);
464#endif
465 return true;
466}
467
468/**
469 * init_copy_desc_masks - copy cpumasks for irq_desc
470 * @old_desc: pointer to old irq_desc struct
471 * @new_desc: pointer to new irq_desc struct
472 *
473 * Insures affinity and pending_masks are copied to new irq_desc.
474 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
475 * irq_desc struct so the copy is redundant.
476 */
477
478static inline void init_copy_desc_masks(struct irq_desc *old_desc,
479 struct irq_desc *new_desc)
480{
481#ifdef CONFIG_CPUMASKS_OFFSTACK
482 cpumask_copy(new_desc->affinity, old_desc->affinity);
483
484#ifdef CONFIG_GENERIC_PENDING_IRQ
485 cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
486#endif
487#endif
488}
489
490#else /* !CONFIG_SMP */
491
492static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
493 bool boot)
494{
495 return true;
496}
497
498static inline void init_copy_desc_masks(struct irq_desc *old_desc,
499 struct irq_desc *new_desc)
500{
501}
502
503#endif /* CONFIG_SMP */
504
425#endif /* _LINUX_IRQ_H */ 505#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 86af92e9e84c..887477bc2ab0 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -20,6 +20,7 @@
20 20
21# define for_each_irq_desc_reverse(irq, desc) \ 21# define for_each_irq_desc_reverse(irq, desc) \
22 for (irq = nr_irqs - 1; irq >= 0; irq--) 22 for (irq = nr_irqs - 1; irq >= 0; irq--)
23
23#else /* CONFIG_GENERIC_HARDIRQS */ 24#else /* CONFIG_GENERIC_HARDIRQS */
24 25
25extern int nr_irqs; 26extern int nr_irqs;
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 0b4df7eba852..5b4e28bcb788 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -49,4 +49,5 @@
49#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA 49#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
50#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA 50#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
51 51
52#define STACK_END_MAGIC 0x57AC6E9D
52#endif /* __LINUX_MAGIC_H__ */ 53#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9f2a3751873a..0e24202b5a4e 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -9,34 +9,39 @@
9#include <asm/percpu.h> 9#include <asm/percpu.h>
10 10
11#ifdef CONFIG_SMP 11#ifdef CONFIG_SMP
12#define DEFINE_PER_CPU(type, name) \ 12#define PER_CPU_BASE_SECTION ".data.percpu"
13 __attribute__((__section__(".data.percpu"))) \
14 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
15 13
16#ifdef MODULE 14#ifdef MODULE
17#define SHARED_ALIGNED_SECTION ".data.percpu" 15#define PER_CPU_SHARED_ALIGNED_SECTION ""
18#else 16#else
19#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned" 17#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
20#endif 18#endif
19#define PER_CPU_FIRST_SECTION ".first"
21 20
22#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 21#else
23 __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ 22
24 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ 23#define PER_CPU_BASE_SECTION ".data"
25 ____cacheline_aligned_in_smp 24#define PER_CPU_SHARED_ALIGNED_SECTION ""
25#define PER_CPU_FIRST_SECTION ""
26
27#endif
26 28
27#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 29#define DEFINE_PER_CPU_SECTION(type, name, section) \
28 __attribute__((__section__(".data.percpu.page_aligned"))) \ 30 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
29 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name 31 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
30#else 32
31#define DEFINE_PER_CPU(type, name) \ 33#define DEFINE_PER_CPU(type, name) \
32 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name 34 DEFINE_PER_CPU_SECTION(type, name, "")
33 35
34#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 36#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
35 DEFINE_PER_CPU(type, name) 37 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
38 ____cacheline_aligned_in_smp
36 39
37#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 40#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
38 DEFINE_PER_CPU(type, name) 41 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
39#endif 42
43#define DEFINE_PER_CPU_FIRST(type, name) \
44 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
40 45
41#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 46#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
42#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 47#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 02e16d207304..681394f3a0cf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1161,10 +1161,9 @@ struct task_struct {
1161 pid_t pid; 1161 pid_t pid;
1162 pid_t tgid; 1162 pid_t tgid;
1163 1163
1164#ifdef CONFIG_CC_STACKPROTECTOR
1165 /* Canary value for the -fstack-protector gcc feature */ 1164 /* Canary value for the -fstack-protector gcc feature */
1166 unsigned long stack_canary; 1165 unsigned long stack_canary;
1167#endif 1166
1168 /* 1167 /*
1169 * pointers to (original) parent process, youngest child, younger sibling, 1168 * pointers to (original) parent process, youngest child, younger sibling,
1170 * older sibling, respectively. (p->father can be replaced with 1169 * older sibling, respectively. (p->father can be replaced with
@@ -2070,6 +2069,19 @@ static inline int object_is_on_stack(void *obj)
2070 2069
2071extern void thread_info_cache_init(void); 2070extern void thread_info_cache_init(void);
2072 2071
2072#ifdef CONFIG_DEBUG_STACK_USAGE
2073static inline unsigned long stack_not_used(struct task_struct *p)
2074{
2075 unsigned long *n = end_of_stack(p);
2076
2077 do { /* Skip over canary */
2078 n++;
2079 } while (!*n);
2080
2081 return (unsigned long)n - (unsigned long)end_of_stack(p);
2082}
2083#endif
2084
2073/* set thread flags in other task's structures 2085/* set thread flags in other task's structures
2074 * - see asm/thread_info.h for TIF_xxxx flags available 2086 * - see asm/thread_info.h for TIF_xxxx flags available
2075 */ 2087 */
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
new file mode 100644
index 000000000000..6f3e54c704c0
--- /dev/null
+++ b/include/linux/stackprotector.h
@@ -0,0 +1,16 @@
1#ifndef _LINUX_STACKPROTECTOR_H
2#define _LINUX_STACKPROTECTOR_H 1
3
4#include <linux/compiler.h>
5#include <linux/sched.h>
6#include <linux/random.h>
7
8#ifdef CONFIG_CC_STACKPROTECTOR
9# include <asm/stackprotector.h>
10#else
11static inline void boot_init_stack_canary(void)
12{
13}
14#endif
15
16#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e632d29f0544..a16b9e06f2e5 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
193#ifndef topology_core_siblings 193#ifndef topology_core_siblings
194#define topology_core_siblings(cpu) cpumask_of_cpu(cpu) 194#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
195#endif 195#endif
196#ifndef topology_thread_cpumask
197#define topology_thread_cpumask(cpu) cpumask_of(cpu)
198#endif
199#ifndef topology_core_cpumask
200#define topology_core_cpumask(cpu) cpumask_of(cpu)
201#endif
196 202
197#endif /* _LINUX_TOPOLOGY_H */ 203#endif /* _LINUX_TOPOLOGY_H */