aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/bootmem.h36
-rw-r--r--include/linux/coda_psdev.h15
-rw-r--r--include/linux/decompress/bunzip2.h10
-rw-r--r--include/linux/decompress/generic.h33
-rw-r--r--include/linux/decompress/inflate.h13
-rw-r--r--include/linux/decompress/mm.h87
-rw-r--r--include/linux/decompress/unlzma.h12
-rw-r--r--include/linux/elfcore.h9
-rw-r--r--include/linux/ftrace.h223
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/hardirq.h73
-rw-r--r--include/linux/in6.h2
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/irq.h86
-rw-r--r--include/linux/irqnr.h1
-rw-r--r--include/linux/kernel.h136
-rw-r--r--include/linux/kprobes.h22
-rw-r--r--include/linux/lockdep.h50
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/memory.h6
-rw-r--r--include/linux/mmiotrace.h78
-rw-r--r--include/linux/module.h5
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/nubus.h2
-rw-r--r--include/linux/percpu.h149
-rw-r--r--include/linux/reiserfs_fs.h56
-rw-r--r--include/linux/ring_buffer.h20
-rw-r--r--include/linux/sched.h31
-rw-r--r--include/linux/slab_def.h68
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--include/linux/slub_def.h72
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/socket.h6
-rw-r--r--include/linux/stackprotector.h16
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/timer.h93
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/trace_clock.h19
-rw-r--r--include/linux/tracepoint.h7
-rw-r--r--include/linux/types.h6
-rw-r--r--include/linux/vmalloc.h4
43 files changed, 1131 insertions, 358 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6fce2fc2d124..78199151c00b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
79typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); 79typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
80 80
81char * __acpi_map_table (unsigned long phys_addr, unsigned long size); 81char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
82void __acpi_unmap_table(char *map, unsigned long size);
82int early_acpi_boot_init(void); 83int early_acpi_boot_init(void);
83int acpi_boot_init (void); 84int acpi_boot_init (void);
84int acpi_boot_table_init (void); 85int acpi_boot_table_init (void);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 6e915878e88c..d960889e92ef 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
144 144
145#ifdef __KERNEL__ 145#ifdef __KERNEL__
146#if defined(CONFIG_BLK_DEV_IO_TRACE) 146#if defined(CONFIG_BLK_DEV_IO_TRACE)
147
148#include <linux/sysfs.h>
149
147struct blk_trace { 150struct blk_trace {
148 int trace_state; 151 int trace_state;
149 struct rchan *rchan; 152 struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
194extern int blk_trace_startstop(struct request_queue *q, int start); 197extern int blk_trace_startstop(struct request_queue *q, int start);
195extern int blk_trace_remove(struct request_queue *q); 198extern int blk_trace_remove(struct request_queue *q);
196 199
200extern struct attribute_group blk_trace_attr_group;
201
197#else /* !CONFIG_BLK_DEV_IO_TRACE */ 202#else /* !CONFIG_BLK_DEV_IO_TRACE */
198#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 203#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
199#define blk_trace_shutdown(q) do { } while (0) 204#define blk_trace_shutdown(q) do { } while (0)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 95837bfb5256..455d83219fae 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
65#define BOOTMEM_DEFAULT 0 65#define BOOTMEM_DEFAULT 0
66#define BOOTMEM_EXCLUSIVE (1<<0) 66#define BOOTMEM_EXCLUSIVE (1<<0)
67 67
68extern int reserve_bootmem(unsigned long addr,
69 unsigned long size,
70 int flags);
68extern int reserve_bootmem_node(pg_data_t *pgdat, 71extern int reserve_bootmem_node(pg_data_t *pgdat,
69 unsigned long physaddr, 72 unsigned long physaddr,
70 unsigned long size, 73 unsigned long size,
71 int flags); 74 int flags);
72#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
73extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
74#endif
75 75
76extern void *__alloc_bootmem_nopanic(unsigned long size, 76extern void *__alloc_bootmem(unsigned long size,
77 unsigned long align, 77 unsigned long align,
78 unsigned long goal); 78 unsigned long goal);
79extern void *__alloc_bootmem(unsigned long size, 79extern void *__alloc_bootmem_nopanic(unsigned long size,
80 unsigned long align, 80 unsigned long align,
81 unsigned long goal); 81 unsigned long goal);
82extern void *__alloc_bootmem_low(unsigned long size,
83 unsigned long align,
84 unsigned long goal);
85extern void *__alloc_bootmem_node(pg_data_t *pgdat, 82extern void *__alloc_bootmem_node(pg_data_t *pgdat,
86 unsigned long size, 83 unsigned long size,
87 unsigned long align, 84 unsigned long align,
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
90 unsigned long size, 87 unsigned long size,
91 unsigned long align, 88 unsigned long align,
92 unsigned long goal); 89 unsigned long goal);
90extern void *__alloc_bootmem_low(unsigned long size,
91 unsigned long align,
92 unsigned long goal);
93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, 93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
94 unsigned long size, 94 unsigned long size,
95 unsigned long align, 95 unsigned long align,
96 unsigned long goal); 96 unsigned long goal);
97#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 97
98#define alloc_bootmem(x) \ 98#define alloc_bootmem(x) \
99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
100#define alloc_bootmem_nopanic(x) \ 100#define alloc_bootmem_nopanic(x) \
101 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 101 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
102#define alloc_bootmem_low(x) \
103 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
104#define alloc_bootmem_pages(x) \ 102#define alloc_bootmem_pages(x) \
105 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 103 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
106#define alloc_bootmem_pages_nopanic(x) \ 104#define alloc_bootmem_pages_nopanic(x) \
107 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 105 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
108#define alloc_bootmem_low_pages(x) \
109 __alloc_bootmem_low(x, PAGE_SIZE, 0)
110#define alloc_bootmem_node(pgdat, x) \ 106#define alloc_bootmem_node(pgdat, x) \
111 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 107 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
112#define alloc_bootmem_pages_node(pgdat, x) \ 108#define alloc_bootmem_pages_node(pgdat, x) \
113 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 109 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
110#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
111 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
112
113#define alloc_bootmem_low(x) \
114 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
115#define alloc_bootmem_low_pages(x) \
116 __alloc_bootmem_low(x, PAGE_SIZE, 0)
114#define alloc_bootmem_low_pages_node(pgdat, x) \ 117#define alloc_bootmem_low_pages_node(pgdat, x) \
115 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) 118 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
116#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
117 119
118extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, 120extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
119 int flags); 121 int flags);
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 07ae8f846055..5b5d4731f956 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -6,6 +6,7 @@
6#define CODA_PSDEV_MAJOR 67 6#define CODA_PSDEV_MAJOR 67
7#define MAX_CODADEVS 5 /* how many do we allow */ 7#define MAX_CODADEVS 5 /* how many do we allow */
8 8
9#ifdef __KERNEL__
9struct kstatfs; 10struct kstatfs;
10 11
11/* communication pending/processing queues */ 12/* communication pending/processing queues */
@@ -24,7 +25,6 @@ static inline struct venus_comm *coda_vcp(struct super_block *sb)
24 return (struct venus_comm *)((sb)->s_fs_info); 25 return (struct venus_comm *)((sb)->s_fs_info);
25} 26}
26 27
27
28/* upcalls */ 28/* upcalls */
29int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); 29int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
30int venus_getattr(struct super_block *sb, struct CodaFid *fid, 30int venus_getattr(struct super_block *sb, struct CodaFid *fid,
@@ -64,6 +64,12 @@ int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb);
64int venus_fsync(struct super_block *sb, struct CodaFid *fid); 64int venus_fsync(struct super_block *sb, struct CodaFid *fid);
65int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); 65int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
66 66
67/*
68 * Statistics
69 */
70
71extern struct venus_comm coda_comms[];
72#endif /* __KERNEL__ */
67 73
68/* messages between coda filesystem in kernel and Venus */ 74/* messages between coda filesystem in kernel and Venus */
69struct upc_req { 75struct upc_req {
@@ -82,11 +88,4 @@ struct upc_req {
82#define REQ_WRITE 0x4 88#define REQ_WRITE 0x4
83#define REQ_ABORT 0x8 89#define REQ_ABORT 0x8
84 90
85
86/*
87 * Statistics
88 */
89
90extern struct venus_comm coda_comms[];
91
92#endif 91#endif
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h
new file mode 100644
index 000000000000..115272137a9c
--- /dev/null
+++ b/include/linux/decompress/bunzip2.h
@@ -0,0 +1,10 @@
1#ifndef DECOMPRESS_BUNZIP2_H
2#define DECOMPRESS_BUNZIP2_H
3
4int bunzip2(unsigned char *inbuf, int len,
5 int(*fill)(void*, unsigned int),
6 int(*flush)(void*, unsigned int),
7 unsigned char *output,
8 int *pos,
9 void(*error)(char *x));
10#endif
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
new file mode 100644
index 000000000000..6dfb856327bb
--- /dev/null
+++ b/include/linux/decompress/generic.h
@@ -0,0 +1,33 @@
1#ifndef DECOMPRESS_GENERIC_H
2#define DECOMPRESS_GENERIC_H
3
4/* Minimal chunksize to be read.
5 *Bzip2 prefers at least 4096
6 *Lzma prefers 0x10000 */
7#define COMPR_IOBUF_SIZE 4096
8
9typedef int (*decompress_fn) (unsigned char *inbuf, int len,
10 int(*fill)(void*, unsigned int),
11 int(*writebb)(void*, unsigned int),
12 unsigned char *output,
13 int *posp,
14 void(*error)(char *x));
15
16/* inbuf - input buffer
17 *len - len of pre-read data in inbuf
18 *fill - function to fill inbuf if empty
19 *writebb - function to write out outbug
20 *posp - if non-null, input position (number of bytes read) will be
21 * returned here
22 *
23 *If len != 0, the inbuf is initialized (with as much data), and fill
24 *should not be called
25 *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
26 *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
27 */
28
29/* Utility routine to detect the decompression method */
30decompress_fn decompress_method(const unsigned char *inbuf, int len,
31 const char **name);
32
33#endif
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
new file mode 100644
index 000000000000..f9b06ccc3e5c
--- /dev/null
+++ b/include/linux/decompress/inflate.h
@@ -0,0 +1,13 @@
1#ifndef INFLATE_H
2#define INFLATE_H
3
4/* Other housekeeping constants */
5#define INBUFSIZ 4096
6
7int gunzip(unsigned char *inbuf, int len,
8 int(*fill)(void*, unsigned int),
9 int(*flush)(void*, unsigned int),
10 unsigned char *output,
11 int *pos,
12 void(*error_fn)(char *x));
13#endif
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
new file mode 100644
index 000000000000..12ff8c3f1d05
--- /dev/null
+++ b/include/linux/decompress/mm.h
@@ -0,0 +1,87 @@
1/*
2 * linux/compr_mm.h
3 *
4 * Memory management for pre-boot and ramdisk uncompressors
5 *
6 * Authors: Alain Knaff <alain@knaff.lu>
7 *
8 */
9
10#ifndef DECOMPR_MM_H
11#define DECOMPR_MM_H
12
13#ifdef STATIC
14
15/* Code active when included from pre-boot environment: */
16
17/* A trivial malloc implementation, adapted from
18 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
19 */
20static unsigned long malloc_ptr;
21static int malloc_count;
22
23static void *malloc(int size)
24{
25 void *p;
26
27 if (size < 0)
28 error("Malloc error");
29 if (!malloc_ptr)
30 malloc_ptr = free_mem_ptr;
31
32 malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
33
34 p = (void *)malloc_ptr;
35 malloc_ptr += size;
36
37 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
38 error("Out of memory");
39
40 malloc_count++;
41 return p;
42}
43
44static void free(void *where)
45{
46 malloc_count--;
47 if (!malloc_count)
48 malloc_ptr = free_mem_ptr;
49}
50
51#define large_malloc(a) malloc(a)
52#define large_free(a) free(a)
53
54#define set_error_fn(x)
55
56#define INIT
57
58#else /* STATIC */
59
60/* Code active when compiled standalone for use when loading ramdisk: */
61
62#include <linux/kernel.h>
63#include <linux/fs.h>
64#include <linux/string.h>
65#include <linux/vmalloc.h>
66
67/* Use defines rather than static inline in order to avoid spurious
68 * warnings when not needed (indeed large_malloc / large_free are not
69 * needed by inflate */
70
71#define malloc(a) kmalloc(a, GFP_KERNEL)
72#define free(a) kfree(a)
73
74#define large_malloc(a) vmalloc(a)
75#define large_free(a) vfree(a)
76
77static void(*error)(char *m);
78#define set_error_fn(x) error = x;
79
80#define INIT __init
81#define STATIC
82
83#include <linux/init.h>
84
85#endif /* STATIC */
86
87#endif /* DECOMPR_MM_H */
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h
new file mode 100644
index 000000000000..7796538f1bf4
--- /dev/null
+++ b/include/linux/decompress/unlzma.h
@@ -0,0 +1,12 @@
1#ifndef DECOMPRESS_UNLZMA_H
2#define DECOMPRESS_UNLZMA_H
3
4int unlzma(unsigned char *, int,
5 int(*fill)(void*, unsigned int),
6 int(*flush)(void*, unsigned int),
7 unsigned char *output,
8 int *posp,
9 void(*error)(char *x)
10 );
11
12#endif
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 5ca54d77079f..7605c5e9589f 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -111,6 +111,15 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
111#endif 111#endif
112} 112}
113 113
114static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
115{
116#ifdef ELF_CORE_COPY_KERNEL_REGS
117 ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
118#else
119 elf_core_copy_regs(elfregs, regs);
120#endif
121}
122
114static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) 123static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
115{ 124{
116#ifdef ELF_CORE_COPY_TASK_REGS 125#ifdef ELF_CORE_COPY_TASK_REGS
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 677432b9cb7e..e1583f2639b0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,15 +1,18 @@
1#ifndef _LINUX_FTRACE_H 1#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 2#define _LINUX_FTRACE_H
3 3
4#include <linux/linkage.h> 4#include <linux/trace_clock.h>
5#include <linux/fs.h>
6#include <linux/ktime.h>
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6#include <linux/linkage.h>
11#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include <linux/module.h>
9#include <linux/ktime.h>
12#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14
15#include <asm/ftrace.h>
13 16
14#ifdef CONFIG_FUNCTION_TRACER 17#ifdef CONFIG_FUNCTION_TRACER
15 18
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
95 loff_t *ppos); 98 loff_t *ppos);
96#endif 99#endif
97 100
101struct ftrace_func_command {
102 struct list_head list;
103 char *name;
104 int (*func)(char *func, char *cmd,
105 char *params, int enable);
106};
107
98#ifdef CONFIG_DYNAMIC_FTRACE 108#ifdef CONFIG_DYNAMIC_FTRACE
99/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ 109
100#include <asm/ftrace.h> 110int ftrace_arch_code_modify_prepare(void);
111int ftrace_arch_code_modify_post_process(void);
112
113struct seq_file;
114
115struct ftrace_probe_ops {
116 void (*func)(unsigned long ip,
117 unsigned long parent_ip,
118 void **data);
119 int (*callback)(unsigned long ip, void **data);
120 void (*free)(void **data);
121 int (*print)(struct seq_file *m,
122 unsigned long ip,
123 struct ftrace_probe_ops *ops,
124 void *data);
125};
126
127extern int
128register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
129 void *data);
130extern void
131unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
132 void *data);
133extern void
134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
135extern void unregister_ftrace_function_probe_all(char *glob);
101 136
102enum { 137enum {
103 FTRACE_FL_FREE = (1 << 0), 138 FTRACE_FL_FREE = (1 << 0),
@@ -119,6 +154,9 @@ struct dyn_ftrace {
119int ftrace_force_update(void); 154int ftrace_force_update(void);
120void ftrace_set_filter(unsigned char *buf, int len, int reset); 155void ftrace_set_filter(unsigned char *buf, int len, int reset);
121 156
157int register_ftrace_command(struct ftrace_func_command *cmd);
158int unregister_ftrace_command(struct ftrace_func_command *cmd);
159
122/* defined in arch */ 160/* defined in arch */
123extern int ftrace_ip_converted(unsigned long ip); 161extern int ftrace_ip_converted(unsigned long ip);
124extern int ftrace_dyn_arch_init(void *data); 162extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +164,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
126extern void ftrace_caller(void); 164extern void ftrace_caller(void);
127extern void ftrace_call(void); 165extern void ftrace_call(void);
128extern void mcount_call(void); 166extern void mcount_call(void);
167
168#ifndef FTRACE_ADDR
169#define FTRACE_ADDR ((unsigned long)ftrace_caller)
170#endif
129#ifdef CONFIG_FUNCTION_GRAPH_TRACER 171#ifdef CONFIG_FUNCTION_GRAPH_TRACER
130extern void ftrace_graph_caller(void); 172extern void ftrace_graph_caller(void);
131extern int ftrace_enable_ftrace_graph_caller(void); 173extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +178,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
136#endif 178#endif
137 179
138/** 180/**
139 * ftrace_make_nop - convert code into top 181 * ftrace_make_nop - convert code into nop
140 * @mod: module structure if called by module load initialization 182 * @mod: module structure if called by module load initialization
141 * @rec: the mcount call site record 183 * @rec: the mcount call site record
142 * @addr: the address that the call site should be calling 184 * @addr: the address that the call site should be calling
@@ -181,7 +223,6 @@ extern int ftrace_make_nop(struct module *mod,
181 */ 223 */
182extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 224extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
183 225
184
185/* May be defined in arch */ 226/* May be defined in arch */
186extern int ftrace_arch_read_dyn_info(char *buf, int size); 227extern int ftrace_arch_read_dyn_info(char *buf, int size);
187 228
@@ -198,6 +239,14 @@ extern void ftrace_enable_daemon(void);
198# define ftrace_disable_daemon() do { } while (0) 239# define ftrace_disable_daemon() do { } while (0)
199# define ftrace_enable_daemon() do { } while (0) 240# define ftrace_enable_daemon() do { } while (0)
200static inline void ftrace_release(void *start, unsigned long size) { } 241static inline void ftrace_release(void *start, unsigned long size) { }
242static inline int register_ftrace_command(struct ftrace_func_command *cmd)
243{
244 return -EINVAL;
245}
246static inline int unregister_ftrace_command(char *cmd_name)
247{
248 return -EINVAL;
249}
201#endif /* CONFIG_DYNAMIC_FTRACE */ 250#endif /* CONFIG_DYNAMIC_FTRACE */
202 251
203/* totally disable ftrace - can not re-enable after this */ 252/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +282,25 @@ static inline void __ftrace_enabled_restore(int enabled)
233#endif 282#endif
234} 283}
235 284
236#ifdef CONFIG_FRAME_POINTER 285#ifndef HAVE_ARCH_CALLER_ADDR
237/* TODO: need to fix this for ARM */ 286# ifdef CONFIG_FRAME_POINTER
238# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 287# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
239# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) 288# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
240# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) 289# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
241# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) 290# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
242# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) 291# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
243# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) 292# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
244# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) 293# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
245#else 294# else
246# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 295# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
247# define CALLER_ADDR1 0UL 296# define CALLER_ADDR1 0UL
248# define CALLER_ADDR2 0UL 297# define CALLER_ADDR2 0UL
249# define CALLER_ADDR3 0UL 298# define CALLER_ADDR3 0UL
250# define CALLER_ADDR4 0UL 299# define CALLER_ADDR4 0UL
251# define CALLER_ADDR5 0UL 300# define CALLER_ADDR5 0UL
252# define CALLER_ADDR6 0UL 301# define CALLER_ADDR6 0UL
253#endif 302# endif
303#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
254 304
255#ifdef CONFIG_IRQSOFF_TRACER 305#ifdef CONFIG_IRQSOFF_TRACER
256 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 306 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +318,6 @@ static inline void __ftrace_enabled_restore(int enabled)
268# define trace_preempt_off(a0, a1) do { } while (0) 318# define trace_preempt_off(a0, a1) do { } while (0)
269#endif 319#endif
270 320
271#ifdef CONFIG_TRACING
272extern int ftrace_dump_on_oops;
273
274extern void tracing_start(void);
275extern void tracing_stop(void);
276extern void ftrace_off_permanent(void);
277
278extern void
279ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
280
281/**
282 * ftrace_printk - printf formatting in the ftrace buffer
283 * @fmt: the printf format for printing
284 *
285 * Note: __ftrace_printk is an internal function for ftrace_printk and
286 * the @ip is passed in via the ftrace_printk macro.
287 *
288 * This function allows a kernel developer to debug fast path sections
289 * that printk is not appropriate for. By scattering in various
290 * printk like tracing in the code, a developer can quickly see
291 * where problems are occurring.
292 *
293 * This is intended as a debugging tool for the developer only.
294 * Please refrain from leaving ftrace_printks scattered around in
295 * your code.
296 */
297# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
298extern int
299__ftrace_printk(unsigned long ip, const char *fmt, ...)
300 __attribute__ ((format (printf, 2, 3)));
301extern void ftrace_dump(void);
302#else
303static inline void
304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
305static inline int
306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
307
308static inline void tracing_start(void) { }
309static inline void tracing_stop(void) { }
310static inline void ftrace_off_permanent(void) { }
311static inline int
312ftrace_printk(const char *fmt, ...)
313{
314 return 0;
315}
316static inline void ftrace_dump(void) { }
317#endif
318
319#ifdef CONFIG_FTRACE_MCOUNT_RECORD 321#ifdef CONFIG_FTRACE_MCOUNT_RECORD
320extern void ftrace_init(void); 322extern void ftrace_init(void);
321extern void ftrace_init_module(struct module *mod, 323extern void ftrace_init_module(struct module *mod,
@@ -327,36 +329,6 @@ ftrace_init_module(struct module *mod,
327 unsigned long *start, unsigned long *end) { } 329 unsigned long *start, unsigned long *end) { }
328#endif 330#endif
329 331
330enum {
331 POWER_NONE = 0,
332 POWER_CSTATE = 1,
333 POWER_PSTATE = 2,
334};
335
336struct power_trace {
337#ifdef CONFIG_POWER_TRACER
338 ktime_t stamp;
339 ktime_t end;
340 int type;
341 int state;
342#endif
343};
344
345#ifdef CONFIG_POWER_TRACER
346extern void trace_power_start(struct power_trace *it, unsigned int type,
347 unsigned int state);
348extern void trace_power_mark(struct power_trace *it, unsigned int type,
349 unsigned int state);
350extern void trace_power_end(struct power_trace *it);
351#else
352static inline void trace_power_start(struct power_trace *it, unsigned int type,
353 unsigned int state) { }
354static inline void trace_power_mark(struct power_trace *it, unsigned int type,
355 unsigned int state) { }
356static inline void trace_power_end(struct power_trace *it) { }
357#endif
358
359
360/* 332/*
361 * Structure that defines an entry function trace. 333 * Structure that defines an entry function trace.
362 */ 334 */
@@ -380,6 +352,30 @@ struct ftrace_graph_ret {
380#ifdef CONFIG_FUNCTION_GRAPH_TRACER 352#ifdef CONFIG_FUNCTION_GRAPH_TRACER
381 353
382/* 354/*
355 * Stack of return addresses for functions
356 * of a thread.
357 * Used in struct thread_info
358 */
359struct ftrace_ret_stack {
360 unsigned long ret;
361 unsigned long func;
362 unsigned long long calltime;
363};
364
365/*
366 * Primary handler of a function return.
367 * It relays on ftrace_return_to_handler.
368 * Defined in entry_32/64.S
369 */
370extern void return_to_handler(void);
371
372extern int
373ftrace_push_return_trace(unsigned long ret, unsigned long long time,
374 unsigned long func, int *depth);
375extern void
376ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
377
378/*
383 * Sometimes we don't want to trace a function with the function 379 * Sometimes we don't want to trace a function with the function
384 * graph tracer but we want them to keep traced by the usual function 380 * graph tracer but we want them to keep traced by the usual function
385 * tracer if the function graph tracer is not configured. 381 * tracer if the function graph tracer is not configured.
@@ -490,6 +486,21 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
490 return tsk->trace & TSK_TRACE_FL_GRAPH; 486 return tsk->trace & TSK_TRACE_FL_GRAPH;
491} 487}
492 488
489extern int ftrace_dump_on_oops;
490
493#endif /* CONFIG_TRACING */ 491#endif /* CONFIG_TRACING */
494 492
493
494#ifdef CONFIG_HW_BRANCH_TRACER
495
496void trace_hw_branch(u64 from, u64 to);
497void trace_hw_branch_oops(void);
498
499#else /* CONFIG_HW_BRANCH_TRACER */
500
501static inline void trace_hw_branch(u64 from, u64 to) {}
502static inline void trace_hw_branch_oops(void) {}
503
504#endif /* CONFIG_HW_BRANCH_TRACER */
505
495#endif /* _LINUX_FTRACE_H */ 506#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b05..dca7bf8cffe2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) 5#ifdef CONFIG_FTRACE_NMI_ENTER
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dda..faa1cf848bcd 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -15,55 +15,61 @@
15 * - bits 0-7 are the preemption count (max preemption depth: 256) 15 * - bits 0-7 are the preemption count (max preemption depth: 256)
16 * - bits 8-15 are the softirq count (max # of softirqs: 256) 16 * - bits 8-15 are the softirq count (max # of softirqs: 256)
17 * 17 *
18 * The hardirq count can be overridden per architecture, the default is: 18 * The hardirq count can in theory reach the same as NR_IRQS.
19 * In reality, the number of nested IRQS is limited to the stack
20 * size as well. For archs with over 1000 IRQS it is not practical
21 * to expect that they will all nest. We give a max of 10 bits for
22 * hardirq nesting. An arch may choose to give less than 10 bits.
23 * m68k expects it to be 8.
19 * 24 *
20 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) 25 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
21 * - ( bit 28 is the PREEMPT_ACTIVE flag. ) 26 * - bit 26 is the NMI_MASK
27 * - bit 28 is the PREEMPT_ACTIVE flag
22 * 28 *
23 * PREEMPT_MASK: 0x000000ff 29 * PREEMPT_MASK: 0x000000ff
24 * SOFTIRQ_MASK: 0x0000ff00 30 * SOFTIRQ_MASK: 0x0000ff00
25 * HARDIRQ_MASK: 0x0fff0000 31 * HARDIRQ_MASK: 0x03ff0000
32 * NMI_MASK: 0x04000000
26 */ 33 */
27#define PREEMPT_BITS 8 34#define PREEMPT_BITS 8
28#define SOFTIRQ_BITS 8 35#define SOFTIRQ_BITS 8
36#define NMI_BITS 1
29 37
30#ifndef HARDIRQ_BITS 38#define MAX_HARDIRQ_BITS 10
31#define HARDIRQ_BITS 12
32 39
33#ifndef MAX_HARDIRQS_PER_CPU 40#ifndef HARDIRQ_BITS
34#define MAX_HARDIRQS_PER_CPU NR_IRQS 41# define HARDIRQ_BITS MAX_HARDIRQ_BITS
35#endif 42#endif
36 43
37/* 44#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
38 * The hardirq mask has to be large enough to have space for potentially 45#error HARDIRQ_BITS too high!
39 * all IRQ sources in the system nesting on a single CPU.
40 */
41#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
42# error HARDIRQ_BITS is too low!
43#endif
44#endif 46#endif
45 47
46#define PREEMPT_SHIFT 0 48#define PREEMPT_SHIFT 0
47#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 49#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
48#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 50#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
51#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
49 52
50#define __IRQ_MASK(x) ((1UL << (x))-1) 53#define __IRQ_MASK(x) ((1UL << (x))-1)
51 54
52#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 55#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
53#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 56#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
54#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 57#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
58#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
55 59
56#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 60#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
57#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 61#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
58#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 62#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
63#define NMI_OFFSET (1UL << NMI_SHIFT)
59 64
60#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) 65#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
61#error PREEMPT_ACTIVE is too low! 66#error PREEMPT_ACTIVE is too low!
62#endif 67#endif
63 68
64#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 69#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
65#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 70#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
66#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 71#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
72 | NMI_MASK))
67 73
68/* 74/*
69 * Are we doing bottom half or hardware interrupt processing? 75 * Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
73#define in_softirq() (softirq_count()) 79#define in_softirq() (softirq_count())
74#define in_interrupt() (irq_count()) 80#define in_interrupt() (irq_count())
75 81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_MASK)
86
76#if defined(CONFIG_PREEMPT) 87#if defined(CONFIG_PREEMPT)
77# define PREEMPT_INATOMIC_BASE kernel_locked() 88# define PREEMPT_INATOMIC_BASE kernel_locked()
78# define PREEMPT_CHECK_OFFSET 1 89# define PREEMPT_CHECK_OFFSET 1
@@ -164,20 +175,24 @@ extern void irq_enter(void);
164 */ 175 */
165extern void irq_exit(void); 176extern void irq_exit(void);
166 177
167#define nmi_enter() \ 178#define nmi_enter() \
168 do { \ 179 do { \
169 ftrace_nmi_enter(); \ 180 ftrace_nmi_enter(); \
170 lockdep_off(); \ 181 BUG_ON(in_nmi()); \
171 rcu_nmi_enter(); \ 182 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
172 __irq_enter(); \ 183 lockdep_off(); \
184 rcu_nmi_enter(); \
185 trace_hardirq_enter(); \
173 } while (0) 186 } while (0)
174 187
175#define nmi_exit() \ 188#define nmi_exit() \
176 do { \ 189 do { \
177 __irq_exit(); \ 190 trace_hardirq_exit(); \
178 rcu_nmi_exit(); \ 191 rcu_nmi_exit(); \
179 lockdep_on(); \ 192 lockdep_on(); \
180 ftrace_nmi_exit(); \ 193 BUG_ON(!in_nmi()); \
194 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
195 ftrace_nmi_exit(); \
181 } while (0) 196 } while (0)
182 197
183#endif /* LINUX_HARDIRQ_H */ 198#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/in6.h b/include/linux/in6.h
index bc492048c349..718bf21c5754 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -44,11 +44,11 @@ struct in6_addr
44 * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined 44 * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
45 * in network byte order, not in host byte order as are the IPv4 equivalents 45 * in network byte order, not in host byte order as are the IPv4 equivalents
46 */ 46 */
47#ifdef __KERNEL__
47extern const struct in6_addr in6addr_any; 48extern const struct in6_addr in6addr_any;
48#define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } 49#define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } }
49extern const struct in6_addr in6addr_loopback; 50extern const struct in6_addr in6addr_loopback;
50#define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } 51#define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
51#ifdef __KERNEL__
52extern const struct in6_addr in6addr_linklocal_allnodes; 52extern const struct in6_addr in6addr_linklocal_allnodes;
53#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ 53#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \
54 { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } 54 { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9127f6b51a39..472f11765f60 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
467struct irq_desc; 467struct irq_desc;
468 468
469extern int early_irq_init(void); 469extern int early_irq_init(void);
470extern int arch_probe_nr_irqs(void);
470extern int arch_early_irq_init(void); 471extern int arch_early_irq_init(void);
471extern int arch_init_chip_data(struct irq_desc *desc, int cpu); 472extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
472 473
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f899b502f186..27a67536511e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -182,11 +182,11 @@ struct irq_desc {
182 unsigned int irqs_unhandled; 182 unsigned int irqs_unhandled;
183 spinlock_t lock; 183 spinlock_t lock;
184#ifdef CONFIG_SMP 184#ifdef CONFIG_SMP
185 cpumask_t affinity; 185 cpumask_var_t affinity;
186 unsigned int cpu; 186 unsigned int cpu;
187#endif
188#ifdef CONFIG_GENERIC_PENDING_IRQ 187#ifdef CONFIG_GENERIC_PENDING_IRQ
189 cpumask_t pending_mask; 188 cpumask_var_t pending_mask;
189#endif
190#endif 190#endif
191#ifdef CONFIG_PROC_FS 191#ifdef CONFIG_PROC_FS
192 struct proc_dir_entry *dir; 192 struct proc_dir_entry *dir;
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
422 422
423#endif /* !CONFIG_S390 */ 423#endif /* !CONFIG_S390 */
424 424
425#ifdef CONFIG_SMP
426/**
427 * init_alloc_desc_masks - allocate cpumasks for irq_desc
428 * @desc: pointer to irq_desc struct
429 * @cpu: cpu which will be handling the cpumasks
430 * @boot: true if need bootmem
431 *
432 * Allocates affinity and pending_mask cpumask if required.
433 * Returns true if successful (or not required).
434 * Side effect: affinity has all bits set, pending_mask has all bits clear.
435 */
436static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
437 bool boot)
438{
439 int node;
440
441 if (boot) {
442 alloc_bootmem_cpumask_var(&desc->affinity);
443 cpumask_setall(desc->affinity);
444
445#ifdef CONFIG_GENERIC_PENDING_IRQ
446 alloc_bootmem_cpumask_var(&desc->pending_mask);
447 cpumask_clear(desc->pending_mask);
448#endif
449 return true;
450 }
451
452 node = cpu_to_node(cpu);
453
454 if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
455 return false;
456 cpumask_setall(desc->affinity);
457
458#ifdef CONFIG_GENERIC_PENDING_IRQ
459 if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
460 free_cpumask_var(desc->affinity);
461 return false;
462 }
463 cpumask_clear(desc->pending_mask);
464#endif
465 return true;
466}
467
468/**
469 * init_copy_desc_masks - copy cpumasks for irq_desc
470 * @old_desc: pointer to old irq_desc struct
471 * @new_desc: pointer to new irq_desc struct
472 *
473 * Insures affinity and pending_masks are copied to new irq_desc.
474 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
475 * irq_desc struct so the copy is redundant.
476 */
477
478static inline void init_copy_desc_masks(struct irq_desc *old_desc,
479 struct irq_desc *new_desc)
480{
481#ifdef CONFIG_CPUMASKS_OFFSTACK
482 cpumask_copy(new_desc->affinity, old_desc->affinity);
483
484#ifdef CONFIG_GENERIC_PENDING_IRQ
485 cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
486#endif
487#endif
488}
489
490#else /* !CONFIG_SMP */
491
492static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
493 bool boot)
494{
495 return true;
496}
497
498static inline void init_copy_desc_masks(struct irq_desc *old_desc,
499 struct irq_desc *new_desc)
500{
501}
502
503#endif /* CONFIG_SMP */
504
425#endif /* _LINUX_IRQ_H */ 505#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 86af92e9e84c..887477bc2ab0 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -20,6 +20,7 @@
20 20
21# define for_each_irq_desc_reverse(irq, desc) \ 21# define for_each_irq_desc_reverse(irq, desc) \
22 for (irq = nr_irqs - 1; irq >= 0; irq--) 22 for (irq = nr_irqs - 1; irq >= 0; irq--)
23
23#else /* CONFIG_GENERIC_HARDIRQS */ 24#else /* CONFIG_GENERIC_HARDIRQS */
24 25
25extern int nr_irqs; 26extern int nr_irqs;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7fa371898e3e..7742798c9208 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -242,6 +242,19 @@ extern struct ratelimit_state printk_ratelimit_state;
242extern int printk_ratelimit(void); 242extern int printk_ratelimit(void);
243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
244 unsigned int interval_msec); 244 unsigned int interval_msec);
245
246/*
247 * Print a one-time message (analogous to WARN_ONCE() et al):
248 */
249#define printk_once(x...) ({ \
250 static int __print_once = 1; \
251 \
252 if (__print_once) { \
253 __print_once = 0; \
254 printk(x); \
255 } \
256})
257
245#else 258#else
246static inline int vprintk(const char *s, va_list args) 259static inline int vprintk(const char *s, va_list args)
247 __attribute__ ((format (printf, 1, 0))); 260 __attribute__ ((format (printf, 1, 0)));
@@ -253,6 +266,10 @@ static inline int printk_ratelimit(void) { return 0; }
253static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ 266static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
254 unsigned int interval_msec) \ 267 unsigned int interval_msec) \
255 { return false; } 268 { return false; }
269
270/* No effect, but we still get type checking even in the !PRINTK case: */
271#define printk_once(x...) printk(x)
272
256#endif 273#endif
257 274
258extern int printk_needs_cpu(int cpu); 275extern int printk_needs_cpu(int cpu);
@@ -368,6 +385,125 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
368#endif 385#endif
369 386
370/* 387/*
388 * General tracing related utility functions - trace_printk(),
389 * tracing_on/tracing_off and tracing_start()/tracing_stop
390 *
391 * Use tracing_on/tracing_off when you want to quickly turn on or off
392 * tracing. It simply enables or disables the recording of the trace events.
393 * This also corresponds to the user space debugfs/tracing/tracing_on
394 * file, which gives a means for the kernel and userspace to interact.
395 * Place a tracing_off() in the kernel where you want tracing to end.
396 * From user space, examine the trace, and then echo 1 > tracing_on
397 * to continue tracing.
398 *
399 * tracing_stop/tracing_start has slightly more overhead. It is used
400 * by things like suspend to ram where disabling the recording of the
401 * trace is not enough, but tracing must actually stop because things
402 * like calling smp_processor_id() may crash the system.
403 *
404 * Most likely, you want to use tracing_on/tracing_off.
405 */
406#ifdef CONFIG_RING_BUFFER
407void tracing_on(void);
408void tracing_off(void);
409/* trace_off_permanent stops recording with no way to bring it back */
410void tracing_off_permanent(void);
411int tracing_is_on(void);
412#else
413static inline void tracing_on(void) { }
414static inline void tracing_off(void) { }
415static inline void tracing_off_permanent(void) { }
416static inline int tracing_is_on(void) { return 0; }
417#endif
418#ifdef CONFIG_TRACING
419extern void tracing_start(void);
420extern void tracing_stop(void);
421extern void ftrace_off_permanent(void);
422
423extern void
424ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
425
426static inline void __attribute__ ((format (printf, 1, 2)))
427____trace_printk_check_format(const char *fmt, ...)
428{
429}
430#define __trace_printk_check_format(fmt, args...) \
431do { \
432 if (0) \
433 ____trace_printk_check_format(fmt, ##args); \
434} while (0)
435
436/**
437 * trace_printk - printf formatting in the ftrace buffer
438 * @fmt: the printf format for printing
439 *
440 * Note: __trace_printk is an internal function for trace_printk and
441 * the @ip is passed in via the trace_printk macro.
442 *
443 * This function allows a kernel developer to debug fast path sections
444 * that printk is not appropriate for. By scattering in various
445 * printk like tracing in the code, a developer can quickly see
446 * where problems are occurring.
447 *
448 * This is intended as a debugging tool for the developer only.
449 * Please refrain from leaving trace_printks scattered around in
450 * your code.
451 */
452
453#define trace_printk(fmt, args...) \
454do { \
455 static const char *trace_printk_fmt \
456 __attribute__((section("__trace_printk_fmt"))); \
457 \
458 if (!trace_printk_fmt) \
459 trace_printk_fmt = fmt; \
460 \
461 __trace_printk_check_format(fmt, ##args); \
462 __trace_printk(_THIS_IP_, trace_printk_fmt, ##args); \
463} while (0)
464
465extern int
466__trace_printk(unsigned long ip, const char *fmt, ...)
467 __attribute__ ((format (printf, 2, 3)));
468
469#define ftrace_vprintk(fmt, vargs) \
470do { \
471 static const char *trace_printk_fmt \
472 __attribute__((section("__trace_printk_fmt"))); \
473 \
474 if (!trace_printk_fmt) \
475 trace_printk_fmt = fmt; \
476 \
477 __ftrace_vprintk(_THIS_IP_, trace_printk_fmt, vargs); \
478} while (0)
479
480extern int
481__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
482
483extern void ftrace_dump(void);
484#else
485static inline void
486ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
487static inline int
488trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
489
490static inline void tracing_start(void) { }
491static inline void tracing_stop(void) { }
492static inline void ftrace_off_permanent(void) { }
493static inline int
494trace_printk(const char *fmt, ...)
495{
496 return 0;
497}
498static inline int
499ftrace_vprintk(const char *fmt, va_list ap)
500{
501 return 0;
502}
503static inline void ftrace_dump(void) { }
504#endif /* CONFIG_TRACING */
505
506/*
371 * Display an IP address in readable format. 507 * Display an IP address in readable format.
372 */ 508 */
373 509
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 32851eef48f0..2ec6cc14a114 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -182,6 +182,14 @@ struct kprobe_blackpoint {
182DECLARE_PER_CPU(struct kprobe *, current_kprobe); 182DECLARE_PER_CPU(struct kprobe *, current_kprobe);
183DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 183DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
184 184
185/*
186 * For #ifdef avoidance:
187 */
188static inline int kprobes_built_in(void)
189{
190 return 1;
191}
192
185#ifdef CONFIG_KRETPROBES 193#ifdef CONFIG_KRETPROBES
186extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, 194extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
187 struct pt_regs *regs); 195 struct pt_regs *regs);
@@ -271,8 +279,16 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
271void kprobe_flush_task(struct task_struct *tk); 279void kprobe_flush_task(struct task_struct *tk);
272void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); 280void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
273 281
274#else /* CONFIG_KPROBES */ 282#else /* !CONFIG_KPROBES: */
275 283
284static inline int kprobes_built_in(void)
285{
286 return 0;
287}
288static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
289{
290 return 0;
291}
276static inline struct kprobe *get_kprobe(void *addr) 292static inline struct kprobe *get_kprobe(void *addr)
277{ 293{
278 return NULL; 294 return NULL;
@@ -329,5 +345,5 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
329static inline void kprobe_flush_task(struct task_struct *tk) 345static inline void kprobe_flush_task(struct task_struct *tk)
330{ 346{
331} 347}
332#endif /* CONFIG_KPROBES */ 348#endif /* CONFIG_KPROBES */
333#endif /* _LINUX_KPROBES_H */ 349#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 23bf02fb124f..5a58ea3e91e9 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -20,43 +20,10 @@ struct lockdep_map;
20#include <linux/stacktrace.h> 20#include <linux/stacktrace.h>
21 21
22/* 22/*
23 * Lock-class usage-state bits: 23 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24 * the total number of states... :-(
24 */ 25 */
25enum lock_usage_bit 26#define XXX_LOCK_USAGE_STATES (1+3*4)
26{
27 LOCK_USED = 0,
28 LOCK_USED_IN_HARDIRQ,
29 LOCK_USED_IN_SOFTIRQ,
30 LOCK_ENABLED_SOFTIRQS,
31 LOCK_ENABLED_HARDIRQS,
32 LOCK_USED_IN_HARDIRQ_READ,
33 LOCK_USED_IN_SOFTIRQ_READ,
34 LOCK_ENABLED_SOFTIRQS_READ,
35 LOCK_ENABLED_HARDIRQS_READ,
36 LOCK_USAGE_STATES
37};
38
39/*
40 * Usage-state bitmasks:
41 */
42#define LOCKF_USED (1 << LOCK_USED)
43#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
47
48#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50
51#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
55
56#define LOCKF_ENABLED_IRQS_READ \
57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58#define LOCKF_USED_IN_IRQ_READ \
59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60 27
61#define MAX_LOCKDEP_SUBCLASSES 8UL 28#define MAX_LOCKDEP_SUBCLASSES 8UL
62 29
@@ -97,7 +64,7 @@ struct lock_class {
97 * IRQ/softirq usage tracking bits: 64 * IRQ/softirq usage tracking bits:
98 */ 65 */
99 unsigned long usage_mask; 66 unsigned long usage_mask;
100 struct stack_trace usage_traces[LOCK_USAGE_STATES]; 67 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
101 68
102 /* 69 /*
103 * These fields represent a directed graph of lock dependencies, 70 * These fields represent a directed graph of lock dependencies,
@@ -324,7 +291,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
324 lock_set_class(lock, lock->name, lock->key, subclass, ip); 291 lock_set_class(lock, lock->name, lock->key, subclass, ip);
325} 292}
326 293
327# define INIT_LOCKDEP .lockdep_recursion = 0, 294extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
295extern void lockdep_clear_current_reclaim_state(void);
296extern void lockdep_trace_alloc(gfp_t mask);
297
298# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
328 299
329#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 300#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
330 301
@@ -342,6 +313,9 @@ static inline void lockdep_on(void)
342# define lock_release(l, n, i) do { } while (0) 313# define lock_release(l, n, i) do { } while (0)
343# define lock_set_class(l, n, k, s, i) do { } while (0) 314# define lock_set_class(l, n, k, s, i) do { } while (0)
344# define lock_set_subclass(l, s, i) do { } while (0) 315# define lock_set_subclass(l, s, i) do { } while (0)
316# define lockdep_set_current_reclaim_state(g) do { } while (0)
317# define lockdep_clear_current_reclaim_state() do { } while (0)
318# define lockdep_trace_alloc(g) do { } while (0)
345# define lockdep_init() do { } while (0) 319# define lockdep_init() do { } while (0)
346# define lockdep_info() do { } while (0) 320# define lockdep_info() do { } while (0)
347# define lockdep_init_map(lock, name, key, sub) \ 321# define lockdep_init_map(lock, name, key, sub) \
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 0b4df7eba852..5b4e28bcb788 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -49,4 +49,5 @@
49#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA 49#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
50#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA 50#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
51 51
52#define STACK_END_MAGIC 0x57AC6E9D
52#endif /* __LINUX_MAGIC_H__ */ 53#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 3fdc10806d31..86a6c0f0518d 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -99,4 +99,10 @@ enum mem_add_context { BOOT, HOTPLUG };
99#define hotplug_memory_notifier(fn, pri) do { } while (0) 99#define hotplug_memory_notifier(fn, pri) do { } while (0)
100#endif 100#endif
101 101
102/*
103 * Kernel text modification mutex, used for code patching. Users of this lock
104 * can sleep.
105 */
106extern struct mutex text_mutex;
107
102#endif /* _LINUX_MEMORY_H_ */ 108#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 139d7c88d9c9..3d1b7bde1283 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -1,5 +1,5 @@
1#ifndef MMIOTRACE_H 1#ifndef _LINUX_MMIOTRACE_H
2#define MMIOTRACE_H 2#define _LINUX_MMIOTRACE_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/list.h> 5#include <linux/list.h>
@@ -13,28 +13,34 @@ typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
13 unsigned long condition, struct pt_regs *); 13 unsigned long condition, struct pt_regs *);
14 14
15struct kmmio_probe { 15struct kmmio_probe {
16 struct list_head list; /* kmmio internal list */ 16 /* kmmio internal list: */
17 unsigned long addr; /* start location of the probe point */ 17 struct list_head list;
18 unsigned long len; /* length of the probe region */ 18 /* start location of the probe point: */
19 kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */ 19 unsigned long addr;
20 kmmio_post_handler_t post_handler; /* Called after addr is executed */ 20 /* length of the probe region: */
21 void *private; 21 unsigned long len;
22 /* Called before addr is executed: */
23 kmmio_pre_handler_t pre_handler;
24 /* Called after addr is executed: */
25 kmmio_post_handler_t post_handler;
26 void *private;
22}; 27};
23 28
29extern unsigned int kmmio_count;
30
31extern int register_kmmio_probe(struct kmmio_probe *p);
32extern void unregister_kmmio_probe(struct kmmio_probe *p);
33
34#ifdef CONFIG_MMIOTRACE
24/* kmmio is active by some kmmio_probes? */ 35/* kmmio is active by some kmmio_probes? */
25static inline int is_kmmio_active(void) 36static inline int is_kmmio_active(void)
26{ 37{
27 extern unsigned int kmmio_count;
28 return kmmio_count; 38 return kmmio_count;
29} 39}
30 40
31extern int register_kmmio_probe(struct kmmio_probe *p);
32extern void unregister_kmmio_probe(struct kmmio_probe *p);
33
34/* Called from page fault handler. */ 41/* Called from page fault handler. */
35extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); 42extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
36 43
37#ifdef CONFIG_MMIOTRACE
38/* Called from ioremap.c */ 44/* Called from ioremap.c */
39extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, 45extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
40 void __iomem *addr); 46 void __iomem *addr);
@@ -43,7 +49,17 @@ extern void mmiotrace_iounmap(volatile void __iomem *addr);
43/* For anyone to insert markers. Remember trailing newline. */ 49/* For anyone to insert markers. Remember trailing newline. */
44extern int mmiotrace_printk(const char *fmt, ...) 50extern int mmiotrace_printk(const char *fmt, ...)
45 __attribute__ ((format (printf, 1, 2))); 51 __attribute__ ((format (printf, 1, 2)));
46#else 52#else /* !CONFIG_MMIOTRACE: */
53static inline int is_kmmio_active(void)
54{
55 return 0;
56}
57
58static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr)
59{
60 return 0;
61}
62
47static inline void mmiotrace_ioremap(resource_size_t offset, 63static inline void mmiotrace_ioremap(resource_size_t offset,
48 unsigned long size, void __iomem *addr) 64 unsigned long size, void __iomem *addr)
49{ 65{
@@ -63,28 +79,28 @@ static inline int mmiotrace_printk(const char *fmt, ...)
63#endif /* CONFIG_MMIOTRACE */ 79#endif /* CONFIG_MMIOTRACE */
64 80
65enum mm_io_opcode { 81enum mm_io_opcode {
66 MMIO_READ = 0x1, /* struct mmiotrace_rw */ 82 MMIO_READ = 0x1, /* struct mmiotrace_rw */
67 MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ 83 MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
68 MMIO_PROBE = 0x3, /* struct mmiotrace_map */ 84 MMIO_PROBE = 0x3, /* struct mmiotrace_map */
69 MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ 85 MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
70 MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ 86 MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
71}; 87};
72 88
73struct mmiotrace_rw { 89struct mmiotrace_rw {
74 resource_size_t phys; /* PCI address of register */ 90 resource_size_t phys; /* PCI address of register */
75 unsigned long value; 91 unsigned long value;
76 unsigned long pc; /* optional program counter */ 92 unsigned long pc; /* optional program counter */
77 int map_id; 93 int map_id;
78 unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ 94 unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
79 unsigned char width; /* size of register access in bytes */ 95 unsigned char width; /* size of register access in bytes */
80}; 96};
81 97
82struct mmiotrace_map { 98struct mmiotrace_map {
83 resource_size_t phys; /* base address in PCI space */ 99 resource_size_t phys; /* base address in PCI space */
84 unsigned long virt; /* base virtual address */ 100 unsigned long virt; /* base virtual address */
85 unsigned long len; /* mapping size */ 101 unsigned long len; /* mapping size */
86 int map_id; 102 int map_id;
87 unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ 103 unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
88}; 104};
89 105
90/* in kernel/trace/trace_mmiotrace.c */ 106/* in kernel/trace/trace_mmiotrace.c */
@@ -94,4 +110,4 @@ extern void mmio_trace_rw(struct mmiotrace_rw *rw);
94extern void mmio_trace_mapping(struct mmiotrace_map *map); 110extern void mmio_trace_mapping(struct mmiotrace_map *map);
95extern int mmio_trace_printk(const char *fmt, va_list args); 111extern int mmio_trace_printk(const char *fmt, va_list args);
96 112
97#endif /* MMIOTRACE_H */ 113#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 145a75528cc1..22d9878e868c 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -329,6 +329,11 @@ struct module
329 unsigned int num_tracepoints; 329 unsigned int num_tracepoints;
330#endif 330#endif
331 331
332#ifdef CONFIG_TRACING
333 const char **trace_bprintk_fmt_start;
334 unsigned int num_trace_bprintk_fmt;
335#endif
336
332#ifdef CONFIG_MODULE_UNLOAD 337#ifdef CONFIG_MODULE_UNLOAD
333 /* What modules depend on me? */ 338 /* What modules depend on me? */
334 struct list_head modules_which_use_me; 339 struct list_head modules_which_use_me;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 7a0e5c4f8072..3069ec7e0ab8 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -50,8 +50,10 @@ struct mutex {
50 atomic_t count; 50 atomic_t count;
51 spinlock_t wait_lock; 51 spinlock_t wait_lock;
52 struct list_head wait_list; 52 struct list_head wait_list;
53#ifdef CONFIG_DEBUG_MUTEXES 53#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
54 struct thread_info *owner; 54 struct thread_info *owner;
55#endif
56#ifdef CONFIG_DEBUG_MUTEXES
55 const char *name; 57 const char *name;
56 void *magic; 58 void *magic;
57#endif 59#endif
@@ -68,7 +70,6 @@ struct mutex_waiter {
68 struct list_head list; 70 struct list_head list;
69 struct task_struct *task; 71 struct task_struct *task;
70#ifdef CONFIG_DEBUG_MUTEXES 72#ifdef CONFIG_DEBUG_MUTEXES
71 struct mutex *lock;
72 void *magic; 73 void *magic;
73#endif 74#endif
74}; 75};
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 7382af374731..e137b3c486a7 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -237,6 +237,7 @@ struct nubus_dirent
237 int mask; 237 int mask;
238}; 238};
239 239
240#ifdef __KERNEL__
240struct nubus_board { 241struct nubus_board {
241 struct nubus_board* next; 242 struct nubus_board* next;
242 struct nubus_dev* first_dev; 243 struct nubus_dev* first_dev;
@@ -351,6 +352,7 @@ void nubus_get_rsrc_mem(void* dest,
351void nubus_get_rsrc_str(void* dest, 352void nubus_get_rsrc_str(void* dest,
352 const struct nubus_dirent *dirent, 353 const struct nubus_dirent *dirent,
353 int maxlen); 354 int maxlen);
355#endif /* __KERNEL__ */
354 356
355/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */ 357/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */
356static inline void *nubus_slot_addr(int slot) 358static inline void *nubus_slot_addr(int slot)
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9f2a3751873a..545b068bcb70 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -8,35 +8,46 @@
8 8
9#include <asm/percpu.h> 9#include <asm/percpu.h>
10 10
11#ifndef PER_CPU_BASE_SECTION
12#ifdef CONFIG_SMP
13#define PER_CPU_BASE_SECTION ".data.percpu"
14#else
15#define PER_CPU_BASE_SECTION ".data"
16#endif
17#endif
18
11#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
12#define DEFINE_PER_CPU(type, name) \
13 __attribute__((__section__(".data.percpu"))) \
14 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
15 20
16#ifdef MODULE 21#ifdef MODULE
17#define SHARED_ALIGNED_SECTION ".data.percpu" 22#define PER_CPU_SHARED_ALIGNED_SECTION ""
18#else 23#else
19#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned" 24#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
20#endif 25#endif
26#define PER_CPU_FIRST_SECTION ".first"
21 27
22#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 28#else
23 __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ 29
24 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ 30#define PER_CPU_SHARED_ALIGNED_SECTION ""
25 ____cacheline_aligned_in_smp 31#define PER_CPU_FIRST_SECTION ""
32
33#endif
26 34
27#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 35#define DEFINE_PER_CPU_SECTION(type, name, section) \
28 __attribute__((__section__(".data.percpu.page_aligned"))) \ 36 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
29 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name 37 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
30#else 38
31#define DEFINE_PER_CPU(type, name) \ 39#define DEFINE_PER_CPU(type, name) \
32 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name 40 DEFINE_PER_CPU_SECTION(type, name, "")
41
42#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
43 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
44 ____cacheline_aligned_in_smp
33 45
34#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 46#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
35 DEFINE_PER_CPU(type, name) 47 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
36 48
37#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 49#define DEFINE_PER_CPU_FIRST(type, name) \
38 DEFINE_PER_CPU(type, name) 50 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
39#endif
40 51
41#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 52#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
42#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 53#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
@@ -65,52 +76,98 @@
65 76
66#ifdef CONFIG_SMP 77#ifdef CONFIG_SMP
67 78
79#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
80
81/* minimum unit size, also is the maximum supported allocation size */
82#define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT)
83
84/*
85 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
86 * back on the first chunk if arch is manually allocating and mapping
87 * it for faster access (as a part of large page mapping for example).
88 * Note that dynamic percpu allocator covers both static and dynamic
89 * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
90 *
91 * On typical configuration with modules, the following values leave
92 * about 8k of free space on the first chunk after boot on both x86_32
93 * and 64 when module support is enabled. When module support is
94 * disabled, it's much tighter.
95 */
96#ifndef PERCPU_DYNAMIC_RESERVE
97# if BITS_PER_LONG > 32
98# ifdef CONFIG_MODULES
99# define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT)
100# else
101# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
102# endif
103# else
104# ifdef CONFIG_MODULES
105# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
106# else
107# define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT)
108# endif
109# endif
110#endif /* PERCPU_DYNAMIC_RESERVE */
111
112extern void *pcpu_base_addr;
113
114typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
115typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
116
117extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
118 size_t static_size, size_t unit_size,
119 size_t free_size, void *base_addr,
120 pcpu_populate_pte_fn_t populate_pte_fn);
121
122/*
123 * Use this to get to a cpu's version of the per-cpu object
124 * dynamically allocated. Non-atomic access to the current CPU's
125 * version should probably be combined with get_cpu()/put_cpu().
126 */
127#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
128
129#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
130
68struct percpu_data { 131struct percpu_data {
69 void *ptrs[1]; 132 void *ptrs[1];
70}; 133};
71 134
72#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 135#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
73/* 136
74 * Use this to get to a cpu's version of the per-cpu object dynamically 137#define per_cpu_ptr(ptr, cpu) \
75 * allocated. Non-atomic access to the current CPU's version should 138({ \
76 * probably be combined with get_cpu()/put_cpu(). 139 struct percpu_data *__p = __percpu_disguise(ptr); \
77 */ 140 (__typeof__(ptr))__p->ptrs[(cpu)]; \
78#define percpu_ptr(ptr, cpu) \
79({ \
80 struct percpu_data *__p = __percpu_disguise(ptr); \
81 (__typeof__(ptr))__p->ptrs[(cpu)]; \
82}) 141})
83 142
84extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 143#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
85extern void percpu_free(void *__pdata); 144
145extern void *__alloc_percpu(size_t size, size_t align);
146extern void free_percpu(void *__pdata);
86 147
87#else /* CONFIG_SMP */ 148#else /* CONFIG_SMP */
88 149
89#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 150#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
90 151
91static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 152static inline void *__alloc_percpu(size_t size, size_t align)
92{ 153{
93 return kzalloc(size, gfp); 154 /*
155 * Can't easily make larger alignment work with kmalloc. WARN
156 * on it. Larger alignment should only be used for module
157 * percpu sections on SMP for which this path isn't used.
158 */
159 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
160 return kzalloc(size, GFP_KERNEL);
94} 161}
95 162
96static inline void percpu_free(void *__pdata) 163static inline void free_percpu(void *p)
97{ 164{
98 kfree(__pdata); 165 kfree(p);
99} 166}
100 167
101#endif /* CONFIG_SMP */ 168#endif /* CONFIG_SMP */
102 169
103#define percpu_alloc_mask(size, gfp, mask) \ 170#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
104 __percpu_alloc_mask((size), (gfp), &(mask)) 171 __alignof__(type))
105
106#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
107
108/* (legacy) interface for use without CPU hotplug handling */
109
110#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
111 cpu_possible_map)
112#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
113#define free_percpu(ptr) percpu_free((ptr))
114#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
115 172
116#endif /* __LINUX_PERCPU_H */ 173#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index bc5114d35e99..e356c99f0659 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -28,8 +28,6 @@
28#include <linux/reiserfs_fs_sb.h> 28#include <linux/reiserfs_fs_sb.h>
29#endif 29#endif
30 30
31struct fid;
32
33/* 31/*
34 * include/linux/reiser_fs.h 32 * include/linux/reiser_fs.h
35 * 33 *
@@ -37,6 +35,33 @@ struct fid;
37 * 35 *
38 */ 36 */
39 37
38/* ioctl's command */
39#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long)
40/* define following flags to be the same as in ext2, so that chattr(1),
41 lsattr(1) will work with us. */
42#define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS
43#define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS
44#define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION
45#define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION
46
47#ifdef __KERNEL__
48/* the 32 bit compat definitions with int argument */
49#define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int)
50#define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
51#define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
52#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION
53#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION
54
55/* Locking primitives */
56/* Right now we are still falling back to (un)lock_kernel, but eventually that
57 would evolve into real per-fs locks */
58#define reiserfs_write_lock( sb ) lock_kernel()
59#define reiserfs_write_unlock( sb ) unlock_kernel()
60
61/* xattr stuff */
62#define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem)
63struct fid;
64
40/* in reading the #defines, it may help to understand that they employ 65/* in reading the #defines, it may help to understand that they employ
41 the following abbreviations: 66 the following abbreviations:
42 67
@@ -698,6 +723,7 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key)
698/* object identifier for root dir */ 723/* object identifier for root dir */
699#define REISERFS_ROOT_OBJECTID 2 724#define REISERFS_ROOT_OBJECTID 2
700#define REISERFS_ROOT_PARENT_OBJECTID 1 725#define REISERFS_ROOT_PARENT_OBJECTID 1
726
701extern struct reiserfs_key root_key; 727extern struct reiserfs_key root_key;
702 728
703/* 729/*
@@ -1540,7 +1566,6 @@ struct reiserfs_iget_args {
1540/* FUNCTION DECLARATIONS */ 1566/* FUNCTION DECLARATIONS */
1541/***************************************************************************/ 1567/***************************************************************************/
1542 1568
1543/*#ifdef __KERNEL__*/
1544#define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12) 1569#define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
1545 1570
1546#define journal_trans_half(blocksize) \ 1571#define journal_trans_half(blocksize) \
@@ -2178,29 +2203,6 @@ long reiserfs_compat_ioctl(struct file *filp,
2178 unsigned int cmd, unsigned long arg); 2203 unsigned int cmd, unsigned long arg);
2179int reiserfs_unpack(struct inode *inode, struct file *filp); 2204int reiserfs_unpack(struct inode *inode, struct file *filp);
2180 2205
2181/* ioctl's command */
2182#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long)
2183/* define following flags to be the same as in ext2, so that chattr(1),
2184 lsattr(1) will work with us. */
2185#define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS
2186#define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS
2187#define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION
2188#define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION
2189
2190/* the 32 bit compat definitions with int argument */
2191#define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int)
2192#define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
2193#define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
2194#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION
2195#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION
2196
2197/* Locking primitives */
2198/* Right now we are still falling back to (un)lock_kernel, but eventually that
2199 would evolve into real per-fs locks */
2200#define reiserfs_write_lock( sb ) lock_kernel()
2201#define reiserfs_write_unlock( sb ) unlock_kernel()
2202
2203/* xattr stuff */
2204#define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem)
2205 2206
2207#endif /* __KERNEL__ */
2206#endif /* _LINUX_REISER_FS_H */ 2208#endif /* _LINUX_REISER_FS_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b3b359660082..b1a0068a5557 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -8,7 +8,7 @@ struct ring_buffer;
8struct ring_buffer_iter; 8struct ring_buffer_iter;
9 9
10/* 10/*
11 * Don't reference this struct directly, use functions below. 11 * Don't refer to this struct directly, use functions below.
12 */ 12 */
13struct ring_buffer_event { 13struct ring_buffer_event {
14 u32 type:2, len:3, time_delta:27; 14 u32 type:2, len:3, time_delta:27;
@@ -74,13 +74,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
74 74
75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); 75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
76 76
77struct ring_buffer_event * 77struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
78ring_buffer_lock_reserve(struct ring_buffer *buffer, 78 unsigned long length);
79 unsigned long length,
80 unsigned long *flags);
81int ring_buffer_unlock_commit(struct ring_buffer *buffer, 79int ring_buffer_unlock_commit(struct ring_buffer *buffer,
82 struct ring_buffer_event *event, 80 struct ring_buffer_event *event);
83 unsigned long flags);
84int ring_buffer_write(struct ring_buffer *buffer, 81int ring_buffer_write(struct ring_buffer *buffer,
85 unsigned long length, void *data); 82 unsigned long length, void *data);
86 83
@@ -124,14 +121,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
124u64 ring_buffer_time_stamp(int cpu); 121u64 ring_buffer_time_stamp(int cpu);
125void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 122void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
126 123
127void tracing_on(void); 124size_t ring_buffer_page_len(void *page);
128void tracing_off(void); 125
129void tracing_off_permanent(void);
130 126
131void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); 127void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
132void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); 128void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
133int ring_buffer_read_page(struct ring_buffer *buffer, 129int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
134 void **data_page, int cpu, int full); 130 size_t len, int cpu, int full);
135 131
136enum ring_buffer_flags { 132enum ring_buffer_flags {
137 RB_FL_OVERWRITE = 1 << 0, 133 RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8c216e057c94..5b9424eaa58f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -137,6 +137,8 @@ extern unsigned long nr_uninterruptible(void);
137extern unsigned long nr_active(void); 137extern unsigned long nr_active(void);
138extern unsigned long nr_iowait(void); 138extern unsigned long nr_iowait(void);
139 139
140extern unsigned long get_parent_ip(unsigned long addr);
141
140struct seq_file; 142struct seq_file;
141struct cfs_rq; 143struct cfs_rq;
142struct task_group; 144struct task_group;
@@ -331,7 +333,9 @@ extern signed long schedule_timeout(signed long timeout);
331extern signed long schedule_timeout_interruptible(signed long timeout); 333extern signed long schedule_timeout_interruptible(signed long timeout);
332extern signed long schedule_timeout_killable(signed long timeout); 334extern signed long schedule_timeout_killable(signed long timeout);
333extern signed long schedule_timeout_uninterruptible(signed long timeout); 335extern signed long schedule_timeout_uninterruptible(signed long timeout);
336asmlinkage void __schedule(void);
334asmlinkage void schedule(void); 337asmlinkage void schedule(void);
338extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
335 339
336struct nsproxy; 340struct nsproxy;
337struct user_namespace; 341struct user_namespace;
@@ -1178,10 +1182,9 @@ struct task_struct {
1178 pid_t pid; 1182 pid_t pid;
1179 pid_t tgid; 1183 pid_t tgid;
1180 1184
1181#ifdef CONFIG_CC_STACKPROTECTOR
1182 /* Canary value for the -fstack-protector gcc feature */ 1185 /* Canary value for the -fstack-protector gcc feature */
1183 unsigned long stack_canary; 1186 unsigned long stack_canary;
1184#endif 1187
1185 /* 1188 /*
1186 * pointers to (original) parent process, youngest child, younger sibling, 1189 * pointers to (original) parent process, youngest child, younger sibling,
1187 * older sibling, respectively. (p->father can be replaced with 1190 * older sibling, respectively. (p->father can be replaced with
@@ -1328,6 +1331,7 @@ struct task_struct {
1328 int lockdep_depth; 1331 int lockdep_depth;
1329 unsigned int lockdep_recursion; 1332 unsigned int lockdep_recursion;
1330 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1333 struct held_lock held_locks[MAX_LOCK_DEPTH];
1334 gfp_t lockdep_reclaim_gfp;
1331#endif 1335#endif
1332 1336
1333/* journalling filesystem info */ 1337/* journalling filesystem info */
@@ -1670,6 +1674,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1670 return set_cpus_allowed_ptr(p, &new_mask); 1674 return set_cpus_allowed_ptr(p, &new_mask);
1671} 1675}
1672 1676
1677/*
1678 * Architectures can set this to 1 if they have specified
1679 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1680 * but then during bootup it turns out that sched_clock()
1681 * is reliable after all:
1682 */
1683#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1684extern int sched_clock_stable;
1685#endif
1686
1673extern unsigned long long sched_clock(void); 1687extern unsigned long long sched_clock(void);
1674 1688
1675extern void sched_clock_init(void); 1689extern void sched_clock_init(void);
@@ -2087,6 +2101,19 @@ static inline int object_is_on_stack(void *obj)
2087 2101
2088extern void thread_info_cache_init(void); 2102extern void thread_info_cache_init(void);
2089 2103
2104#ifdef CONFIG_DEBUG_STACK_USAGE
2105static inline unsigned long stack_not_used(struct task_struct *p)
2106{
2107 unsigned long *n = end_of_stack(p);
2108
2109 do { /* Skip over canary */
2110 n++;
2111 } while (!*n);
2112
2113 return (unsigned long)n - (unsigned long)end_of_stack(p);
2114}
2115#endif
2116
2090/* set thread flags in other task's structures 2117/* set thread flags in other task's structures
2091 * - see asm/thread_info.h for TIF_xxxx flags available 2118 * - see asm/thread_info.h for TIF_xxxx flags available
2092 */ 2119 */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 6ca6a7b66d75..f4523651fa42 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,7 @@
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <trace/kmemtrace.h>
17 18
18/* Size description struct for general caches. */ 19/* Size description struct for general caches. */
19struct cache_sizes { 20struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
28void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 29void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags); 30void *__kmalloc(size_t size, gfp_t flags);
30 31
31static inline void *kmalloc(size_t size, gfp_t flags) 32#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
32{ 38{
39 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
33 if (__builtin_constant_p(size)) { 52 if (__builtin_constant_p(size)) {
34 int i = 0; 53 int i = 0;
35 54
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
47found: 66found:
48#ifdef CONFIG_ZONE_DMA 67#ifdef CONFIG_ZONE_DMA
49 if (flags & GFP_DMA) 68 if (flags & GFP_DMA)
50 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, 69 cachep = malloc_sizes[i].cs_dmacachep;
51 flags); 70 else
52#endif 71#endif
53 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); 72 cachep = malloc_sizes[i].cs_cachep;
73
74 ret = kmem_cache_alloc_notrace(cachep, flags);
75
76 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags);
78
79 return ret;
54 } 80 }
55 return __kmalloc(size, flags); 81 return __kmalloc(size, flags);
56} 82}
@@ -59,8 +85,25 @@ found:
59extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 85extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
60extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 86extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
61 87
62static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 88#ifdef CONFIG_KMEMTRACE
89extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
90 gfp_t flags,
91 int nodeid);
92#else
93static __always_inline void *
94kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
95 gfp_t flags,
96 int nodeid)
97{
98 return kmem_cache_alloc_node(cachep, flags, nodeid);
99}
100#endif
101
102static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63{ 103{
104 struct kmem_cache *cachep;
105 void *ret;
106
64 if (__builtin_constant_p(size)) { 107 if (__builtin_constant_p(size)) {
65 int i = 0; 108 int i = 0;
66 109
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
78found: 121found:
79#ifdef CONFIG_ZONE_DMA 122#ifdef CONFIG_ZONE_DMA
80 if (flags & GFP_DMA) 123 if (flags & GFP_DMA)
81 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, 124 cachep = malloc_sizes[i].cs_dmacachep;
82 flags, node); 125 else
83#endif 126#endif
84 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, 127 cachep = malloc_sizes[i].cs_cachep;
85 flags, node); 128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130
131 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
132 ret, size, slab_buffer_size(cachep),
133 flags, node);
134
135 return ret;
86 } 136 }
87 return __kmalloc_node(size, flags, node); 137 return __kmalloc_node(size, flags, node);
88} 138}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab9..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
3 3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
7 gfp_t flags)
7{ 8{
8 return kmem_cache_alloc_node(cachep, flags, -1); 9 return kmem_cache_alloc_node(cachep, flags, -1);
9} 10}
10 11
11void *__kmalloc_node(size_t size, gfp_t flags, int node); 12void *__kmalloc_node(size_t size, gfp_t flags, int node);
12 13
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 14static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{ 15{
15 return __kmalloc_node(size, flags, node); 16 return __kmalloc_node(size, flags, node);
16} 17}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
23 * kmalloc is the normal method of allocating memory 24 * kmalloc is the normal method of allocating memory
24 * in the kernel. 25 * in the kernel.
25 */ 26 */
26static inline void *kmalloc(size_t size, gfp_t flags) 27static __always_inline void *kmalloc(size_t size, gfp_t flags)
27{ 28{
28 return __kmalloc_node(size, flags, -1); 29 return __kmalloc_node(size, flags, -1);
29} 30}
30 31
31static inline void *__kmalloc(size_t size, gfp_t flags) 32static __always_inline void *__kmalloc(size_t size, gfp_t flags)
32{ 33{
33 return kmalloc(size, flags); 34 return kmalloc(size, flags);
34} 35}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 2f5c16b1aacd..9e3a575b2c30 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <trace/kmemtrace.h>
13 14
14enum stat_item { 15enum stat_item {
15 ALLOC_FASTPATH, /* Allocation from cpu slab */ 16 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -121,10 +122,23 @@ struct kmem_cache {
121#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 122#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
122 123
123/* 124/*
125 * Maximum kmalloc object size handled by SLUB. Larger object allocations
126 * are passed through to the page allocator. The page allocator "fastpath"
127 * is relatively slow so we need this value sufficiently high so that
128 * performance critical objects are allocated through the SLUB fastpath.
129 *
130 * This should be dropped to PAGE_SIZE / 2 once the page allocator
131 * "fastpath" becomes competitive with the slab allocator fastpaths.
132 */
133#define SLUB_MAX_SIZE (PAGE_SIZE)
134
135#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
136
137/*
124 * We keep the general caches in an array of slab caches that are used for 138 * We keep the general caches in an array of slab caches that are used for
125 * 2^x bytes of allocations. 139 * 2^x bytes of allocations.
126 */ 140 */
127extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; 141extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
128 142
129/* 143/*
130 * Sorry that the following has to be that ugly but some versions of GCC 144 * Sorry that the following has to be that ugly but some versions of GCC
@@ -204,15 +218,33 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
204void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 218void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
205void *__kmalloc(size_t size, gfp_t flags); 219void *__kmalloc(size_t size, gfp_t flags);
206 220
221#ifdef CONFIG_KMEMTRACE
222extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
223#else
224static __always_inline void *
225kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
226{
227 return kmem_cache_alloc(s, gfpflags);
228}
229#endif
230
207static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 231static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
208{ 232{
209 return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); 233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235
236 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
237 size, PAGE_SIZE << order, flags);
238
239 return ret;
210} 240}
211 241
212static __always_inline void *kmalloc(size_t size, gfp_t flags) 242static __always_inline void *kmalloc(size_t size, gfp_t flags)
213{ 243{
244 void *ret;
245
214 if (__builtin_constant_p(size)) { 246 if (__builtin_constant_p(size)) {
215 if (size > PAGE_SIZE) 247 if (size > SLUB_MAX_SIZE)
216 return kmalloc_large(size, flags); 248 return kmalloc_large(size, flags);
217 249
218 if (!(flags & SLUB_DMA)) { 250 if (!(flags & SLUB_DMA)) {
@@ -221,7 +253,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
221 if (!s) 253 if (!s)
222 return ZERO_SIZE_PTR; 254 return ZERO_SIZE_PTR;
223 255
224 return kmem_cache_alloc(s, flags); 256 ret = kmem_cache_alloc_notrace(s, flags);
257
258 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
259 _THIS_IP_, ret,
260 size, s->size, flags);
261
262 return ret;
225 } 263 }
226 } 264 }
227 return __kmalloc(size, flags); 265 return __kmalloc(size, flags);
@@ -231,16 +269,38 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
231void *__kmalloc_node(size_t size, gfp_t flags, int node); 269void *__kmalloc_node(size_t size, gfp_t flags, int node);
232void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 270void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
233 271
272#ifdef CONFIG_KMEMTRACE
273extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
274 gfp_t gfpflags,
275 int node);
276#else
277static __always_inline void *
278kmem_cache_alloc_node_notrace(struct kmem_cache *s,
279 gfp_t gfpflags,
280 int node)
281{
282 return kmem_cache_alloc_node(s, gfpflags, node);
283}
284#endif
285
234static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 286static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
235{ 287{
288 void *ret;
289
236 if (__builtin_constant_p(size) && 290 if (__builtin_constant_p(size) &&
237 size <= PAGE_SIZE && !(flags & SLUB_DMA)) { 291 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
238 struct kmem_cache *s = kmalloc_slab(size); 292 struct kmem_cache *s = kmalloc_slab(size);
239 293
240 if (!s) 294 if (!s)
241 return ZERO_SIZE_PTR; 295 return ZERO_SIZE_PTR;
242 296
243 return kmem_cache_alloc_node(s, flags, node); 297 ret = kmem_cache_alloc_node_notrace(s, flags, node);
298
299 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
300 _THIS_IP_, ret,
301 size, s->size, flags, node);
302
303 return ret;
244 } 304 }
245 return __kmalloc_node(size, flags, node); 305 return __kmalloc_node(size, flags, node);
246} 306}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 715196b09d67..bbacb7baa446 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -176,6 +176,12 @@ static inline void init_call_single_data(void)
176#define put_cpu() preempt_enable() 176#define put_cpu() preempt_enable()
177#define put_cpu_no_resched() preempt_enable_no_resched() 177#define put_cpu_no_resched() preempt_enable_no_resched()
178 178
179/*
180 * Callback to arch code if there's nosmp or maxcpus=0 on the
181 * boot command line:
182 */
183extern void arch_disable_smp_support(void);
184
179void smp_setup_processor_id(void); 185void smp_setup_processor_id(void);
180 186
181#endif /* __LINUX_SMP_H */ 187#endif /* __LINUX_SMP_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 20fc4bbfca42..afc01909a428 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -24,10 +24,12 @@ struct __kernel_sockaddr_storage {
24#include <linux/types.h> /* pid_t */ 24#include <linux/types.h> /* pid_t */
25#include <linux/compiler.h> /* __user */ 25#include <linux/compiler.h> /* __user */
26 26
27#ifdef CONFIG_PROC_FS 27#ifdef __KERNEL__
28# ifdef CONFIG_PROC_FS
28struct seq_file; 29struct seq_file;
29extern void socket_seq_show(struct seq_file *seq); 30extern void socket_seq_show(struct seq_file *seq);
30#endif 31# endif
32#endif /* __KERNEL__ */
31 33
32typedef unsigned short sa_family_t; 34typedef unsigned short sa_family_t;
33 35
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
new file mode 100644
index 000000000000..6f3e54c704c0
--- /dev/null
+++ b/include/linux/stackprotector.h
@@ -0,0 +1,16 @@
1#ifndef _LINUX_STACKPROTECTOR_H
2#define _LINUX_STACKPROTECTOR_H 1
3
4#include <linux/compiler.h>
5#include <linux/sched.h>
6#include <linux/random.h>
7
8#ifdef CONFIG_CC_STACKPROTECTOR
9# include <asm/stackprotector.h>
10#else
11static inline void boot_init_stack_canary(void)
12{
13}
14#endif
15
16#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index d18fc198aa2f..27ac31784ad2 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> /* for inline */ 10#include <linux/compiler.h> /* for inline */
11#include <linux/types.h> /* for size_t */ 11#include <linux/types.h> /* for size_t */
12#include <linux/stddef.h> /* for NULL */ 12#include <linux/stddef.h> /* for NULL */
13#include <stdarg.h>
13 14
14extern char *strndup_user(const char __user *, long); 15extern char *strndup_user(const char __user *, long);
15 16
@@ -111,6 +112,12 @@ extern void argv_free(char **argv);
111 112
112extern bool sysfs_streq(const char *s1, const char *s2); 113extern bool sysfs_streq(const char *s1, const char *s2);
113 114
115#ifdef CONFIG_BINARY_PRINTF
116int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
117int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
118int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
119#endif
120
114extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 121extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
115 const void *from, size_t available); 122 const void *from, size_t available);
116 123
diff --git a/include/linux/timer.h b/include/linux/timer.h
index daf9685b861c..51774eb87cc6 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -5,6 +5,7 @@
5#include <linux/ktime.h> 5#include <linux/ktime.h>
6#include <linux/stddef.h> 6#include <linux/stddef.h>
7#include <linux/debugobjects.h> 7#include <linux/debugobjects.h>
8#include <linux/stringify.h>
8 9
9struct tvec_base; 10struct tvec_base;
10 11
@@ -21,52 +22,126 @@ struct timer_list {
21 char start_comm[16]; 22 char start_comm[16];
22 int start_pid; 23 int start_pid;
23#endif 24#endif
25#ifdef CONFIG_LOCKDEP
26 struct lockdep_map lockdep_map;
27#endif
24}; 28};
25 29
26extern struct tvec_base boot_tvec_bases; 30extern struct tvec_base boot_tvec_bases;
27 31
32#ifdef CONFIG_LOCKDEP
33/*
34 * NB: because we have to copy the lockdep_map, setting the lockdep_map key
35 * (second argument) here is required, otherwise it could be initialised to
36 * the copy of the lockdep_map later! We use the pointer to and the string
37 * "<file>:<line>" as the key resp. the name of the lockdep_map.
38 */
39#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
40 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
41#else
42#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
43#endif
44
28#define TIMER_INITIALIZER(_function, _expires, _data) { \ 45#define TIMER_INITIALIZER(_function, _expires, _data) { \
29 .entry = { .prev = TIMER_ENTRY_STATIC }, \ 46 .entry = { .prev = TIMER_ENTRY_STATIC }, \
30 .function = (_function), \ 47 .function = (_function), \
31 .expires = (_expires), \ 48 .expires = (_expires), \
32 .data = (_data), \ 49 .data = (_data), \
33 .base = &boot_tvec_bases, \ 50 .base = &boot_tvec_bases, \
51 __TIMER_LOCKDEP_MAP_INITIALIZER( \
52 __FILE__ ":" __stringify(__LINE__)) \
34 } 53 }
35 54
36#define DEFINE_TIMER(_name, _function, _expires, _data) \ 55#define DEFINE_TIMER(_name, _function, _expires, _data) \
37 struct timer_list _name = \ 56 struct timer_list _name = \
38 TIMER_INITIALIZER(_function, _expires, _data) 57 TIMER_INITIALIZER(_function, _expires, _data)
39 58
40void init_timer(struct timer_list *timer); 59void init_timer_key(struct timer_list *timer,
41void init_timer_deferrable(struct timer_list *timer); 60 const char *name,
61 struct lock_class_key *key);
62void init_timer_deferrable_key(struct timer_list *timer,
63 const char *name,
64 struct lock_class_key *key);
65
66#ifdef CONFIG_LOCKDEP
67#define init_timer(timer) \
68 do { \
69 static struct lock_class_key __key; \
70 init_timer_key((timer), #timer, &__key); \
71 } while (0)
72
73#define init_timer_deferrable(timer) \
74 do { \
75 static struct lock_class_key __key; \
76 init_timer_deferrable_key((timer), #timer, &__key); \
77 } while (0)
78
79#define init_timer_on_stack(timer) \
80 do { \
81 static struct lock_class_key __key; \
82 init_timer_on_stack_key((timer), #timer, &__key); \
83 } while (0)
84
85#define setup_timer(timer, fn, data) \
86 do { \
87 static struct lock_class_key __key; \
88 setup_timer_key((timer), #timer, &__key, (fn), (data));\
89 } while (0)
90
91#define setup_timer_on_stack(timer, fn, data) \
92 do { \
93 static struct lock_class_key __key; \
94 setup_timer_on_stack_key((timer), #timer, &__key, \
95 (fn), (data)); \
96 } while (0)
97#else
98#define init_timer(timer)\
99 init_timer_key((timer), NULL, NULL)
100#define init_timer_deferrable(timer)\
101 init_timer_deferrable_key((timer), NULL, NULL)
102#define init_timer_on_stack(timer)\
103 init_timer_on_stack_key((timer), NULL, NULL)
104#define setup_timer(timer, fn, data)\
105 setup_timer_key((timer), NULL, NULL, (fn), (data))
106#define setup_timer_on_stack(timer, fn, data)\
107 setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data))
108#endif
42 109
43#ifdef CONFIG_DEBUG_OBJECTS_TIMERS 110#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
44extern void init_timer_on_stack(struct timer_list *timer); 111extern void init_timer_on_stack_key(struct timer_list *timer,
112 const char *name,
113 struct lock_class_key *key);
45extern void destroy_timer_on_stack(struct timer_list *timer); 114extern void destroy_timer_on_stack(struct timer_list *timer);
46#else 115#else
47static inline void destroy_timer_on_stack(struct timer_list *timer) { } 116static inline void destroy_timer_on_stack(struct timer_list *timer) { }
48static inline void init_timer_on_stack(struct timer_list *timer) 117static inline void init_timer_on_stack_key(struct timer_list *timer,
118 const char *name,
119 struct lock_class_key *key)
49{ 120{
50 init_timer(timer); 121 init_timer_key(timer, name, key);
51} 122}
52#endif 123#endif
53 124
54static inline void setup_timer(struct timer_list * timer, 125static inline void setup_timer_key(struct timer_list * timer,
126 const char *name,
127 struct lock_class_key *key,
55 void (*function)(unsigned long), 128 void (*function)(unsigned long),
56 unsigned long data) 129 unsigned long data)
57{ 130{
58 timer->function = function; 131 timer->function = function;
59 timer->data = data; 132 timer->data = data;
60 init_timer(timer); 133 init_timer_key(timer, name, key);
61} 134}
62 135
63static inline void setup_timer_on_stack(struct timer_list *timer, 136static inline void setup_timer_on_stack_key(struct timer_list *timer,
137 const char *name,
138 struct lock_class_key *key,
64 void (*function)(unsigned long), 139 void (*function)(unsigned long),
65 unsigned long data) 140 unsigned long data)
66{ 141{
67 timer->function = function; 142 timer->function = function;
68 timer->data = data; 143 timer->data = data;
69 init_timer_on_stack(timer); 144 init_timer_on_stack_key(timer, name, key);
70} 145}
71 146
72/** 147/**
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e632d29f0544..a16b9e06f2e5 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
193#ifndef topology_core_siblings 193#ifndef topology_core_siblings
194#define topology_core_siblings(cpu) cpumask_of_cpu(cpu) 194#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
195#endif 195#endif
196#ifndef topology_thread_cpumask
197#define topology_thread_cpumask(cpu) cpumask_of(cpu)
198#endif
199#ifndef topology_core_cpumask
200#define topology_core_cpumask(cpu) cpumask_of(cpu)
201#endif
196 202
197#endif /* _LINUX_TOPOLOGY_H */ 203#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 000000000000..7a8130384087
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,19 @@
1#ifndef _LINUX_TRACE_CLOCK_H
2#define _LINUX_TRACE_CLOCK_H
3
4/*
5 * 3 trace clock variants, with differing scalability/precision
6 * tradeoffs:
7 *
8 * - local: CPU-local trace clock
9 * - medium: scalable global clock with some jitter
10 * - global: globally monotonic, serialized clock
11 */
12#include <linux/compiler.h>
13#include <linux/types.h>
14
15extern u64 notrace trace_clock_local(void);
16extern u64 notrace trace_clock(void);
17extern u64 notrace trace_clock_global(void);
18
19#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 757005458366..152b2f03fb86 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -153,4 +153,11 @@ static inline void tracepoint_synchronize_unregister(void)
153 synchronize_sched(); 153 synchronize_sched();
154} 154}
155 155
156#define PARAMS(args...) args
157#define TRACE_FORMAT(name, proto, args, fmt) \
158 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
159
160#define TRACE_EVENT_FORMAT(name, proto, args, fmt, struct, tpfmt) \
161 TRACE_FORMAT(name, PARAMS(proto), PARAMS(args), PARAMS(fmt))
162
156#endif 163#endif
diff --git a/include/linux/types.h b/include/linux/types.h
index 712ca53bc348..fca82ed55f49 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -1,6 +1,9 @@
1#ifndef _LINUX_TYPES_H 1#ifndef _LINUX_TYPES_H
2#define _LINUX_TYPES_H 2#define _LINUX_TYPES_H
3 3
4#include <asm/types.h>
5
6#ifndef __ASSEMBLY__
4#ifdef __KERNEL__ 7#ifdef __KERNEL__
5 8
6#define DECLARE_BITMAP(name,bits) \ 9#define DECLARE_BITMAP(name,bits) \
@@ -9,7 +12,6 @@
9#endif 12#endif
10 13
11#include <linux/posix_types.h> 14#include <linux/posix_types.h>
12#include <asm/types.h>
13 15
14#ifndef __KERNEL_STRICT_NAMES 16#ifndef __KERNEL_STRICT_NAMES
15 17
@@ -212,5 +214,5 @@ struct ustat {
212}; 214};
213 215
214#endif /* __KERNEL__ */ 216#endif /* __KERNEL__ */
215 217#endif /* __ASSEMBLY__ */
216#endif /* _LINUX_TYPES_H */ 218#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9c0890c7a06a..a43ebec3a7b9 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
95 95
96extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 96extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
97 struct page ***pages); 97 struct page ***pages);
98extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
99 pgprot_t prot, struct page **pages);
100extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
98extern void unmap_kernel_range(unsigned long addr, unsigned long size); 101extern void unmap_kernel_range(unsigned long addr, unsigned long size);
99 102
100/* Allocate/destroy a 'vmalloc' VM area. */ 103/* Allocate/destroy a 'vmalloc' VM area. */
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
110 */ 113 */
111extern rwlock_t vmlist_lock; 114extern rwlock_t vmlist_lock;
112extern struct vm_struct *vmlist; 115extern struct vm_struct *vmlist;
116extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
113 117
114#endif /* _LINUX_VMALLOC_H */ 118#endif /* _LINUX_VMALLOC_H */