aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/compiler-gcc4.h22
-rw-r--r--include/linux/compiler.h20
-rw-r--r--include/linux/dmar.h15
-rw-r--r--include/linux/fscache-cache.h40
-rw-r--r--include/linux/fscache.h27
-rw-r--r--include/linux/ftrace_event.h38
-rw-r--r--include/linux/gfs2_ondisk.h6
-rw-r--r--include/linux/hardirq.h24
-rw-r--r--include/linux/hw_breakpoint.h131
-rw-r--r--include/linux/i2c-pnx.h2
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/jiffies.h1
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/kernel_stat.h1
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/lsm_audit.h18
-rw-r--r--include/linux/mfd/wm831x/regulator.h4
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/of.h103
-rw-r--r--include/linux/of_fdt.h86
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/perf_counter.h3
-rw-r--r--include/linux/perf_event.h59
-rw-r--r--include/linux/posix_acl.h14
-rw-r--r--include/linux/preempt.h5
-rw-r--r--include/linux/quota.h11
-rw-r--r--include/linux/ratelimit.h33
-rw-r--r--include/linux/rcupdate.h10
-rw-r--r--include/linux/rcutiny.h104
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/sched.h45
-rw-r--r--include/linux/securebits.h24
-rw-r--r--include/linux/security.h48
-rw-r--r--include/linux/slow-work.h72
-rw-r--r--include/linux/smp.h11
-rw-r--r--include/linux/smp_lock.h21
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/spinlock_api_smp.h75
-rw-r--r--include/linux/srcu.h1
-rw-r--r--include/linux/swiotlb.h12
-rw-r--r--include/linux/syscalls.h77
-rw-r--r--include/linux/sysctl.h44
-rw-r--r--include/linux/tpm.h9
-rw-r--r--include/linux/tracepoint.h6
50 files changed, 989 insertions, 273 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 1feed71551c9..5a5385749e16 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -330,6 +330,7 @@ unifdef-y += scc.h
330unifdef-y += sched.h 330unifdef-y += sched.h
331unifdef-y += screen_info.h 331unifdef-y += screen_info.h
332unifdef-y += sdla.h 332unifdef-y += sdla.h
333unifdef-y += securebits.h
333unifdef-y += selinux_netlink.h 334unifdef-y += selinux_netlink.h
334unifdef-y += sem.h 335unifdef-y += sem.h
335unifdef-y += serial_core.h 336unifdef-y += serial_core.h
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index dd97fb8408a8..b10ec49ee2dd 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t *pgdat,
53 unsigned long addr, 53 unsigned long addr,
54 unsigned long size); 54 unsigned long size);
55extern void free_bootmem(unsigned long addr, unsigned long size); 55extern void free_bootmem(unsigned long addr, unsigned long size);
56extern void free_bootmem_late(unsigned long addr, unsigned long size);
56 57
57/* 58/*
58 * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, 59 * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
diff --git a/include/linux/capability.h b/include/linux/capability.h
index c8f2a5f70ed5..39e5ff512fbe 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -92,9 +92,7 @@ struct vfs_cap_data {
92#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 92#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
93#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 93#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
94 94
95#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
96extern int file_caps_enabled; 95extern int file_caps_enabled;
97#endif
98 96
99typedef struct kernel_cap_struct { 97typedef struct kernel_cap_struct {
100 __u32 cap[_KERNEL_CAPABILITY_U32S]; 98 __u32 cap[_KERNEL_CAPABILITY_U32S];
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a3ed7cb8ca34..73dcf804bc94 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -79,6 +79,7 @@
79#define noinline __attribute__((noinline)) 79#define noinline __attribute__((noinline))
80#define __attribute_const__ __attribute__((__const__)) 80#define __attribute_const__ __attribute__((__const__))
81#define __maybe_unused __attribute__((unused)) 81#define __maybe_unused __attribute__((unused))
82#define __always_unused __attribute__((unused))
82 83
83#define __gcc_header(x) #x 84#define __gcc_header(x) #x
84#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) 85#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 450fa597c94d..94dea3ffbfa1 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -36,4 +36,26 @@
36 the kernel context */ 36 the kernel context */
37#define __cold __attribute__((__cold__)) 37#define __cold __attribute__((__cold__))
38 38
39
40#if __GNUC_MINOR__ >= 5
41/*
42 * Mark a position in code as unreachable. This can be used to
43 * suppress control flow warnings after asm blocks that transfer
44 * control elsewhere.
45 *
46 * Early snapshots of gcc 4.5 don't support this and we can't detect
47 * this in the preprocessor, but we can live with this because they're
48 * unreleased. Really, we need to have autoconf for the kernel.
49 */
50#define unreachable() __builtin_unreachable()
51#endif
52
53#endif
54
55#if __GNUC_MINOR__ > 0
56#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
57#endif
58#if __GNUC_MINOR__ >= 4
59#define __compiletime_warning(message) __attribute__((warning(message)))
60#define __compiletime_error(message) __attribute__((error(message)))
39#endif 61#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04fb5135b4e1..5be3dab4a695 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,6 +144,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
144# define barrier() __memory_barrier() 144# define barrier() __memory_barrier()
145#endif 145#endif
146 146
147/* Unreachable code */
148#ifndef unreachable
149# define unreachable() do { } while (1)
150#endif
151
147#ifndef RELOC_HIDE 152#ifndef RELOC_HIDE
148# define RELOC_HIDE(ptr, off) \ 153# define RELOC_HIDE(ptr, off) \
149 ({ unsigned long __ptr; \ 154 ({ unsigned long __ptr; \
@@ -213,6 +218,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
213# define __maybe_unused /* unimplemented */ 218# define __maybe_unused /* unimplemented */
214#endif 219#endif
215 220
221#ifndef __always_unused
222# define __always_unused /* unimplemented */
223#endif
224
216#ifndef noinline 225#ifndef noinline
217#define noinline 226#define noinline
218#endif 227#endif
@@ -266,6 +275,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
266# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) 275# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
267#endif 276#endif
268 277
278/* Compile time object size, -1 for unknown */
279#ifndef __compiletime_object_size
280# define __compiletime_object_size(obj) -1
281#endif
282#ifndef __compiletime_warning
283# define __compiletime_warning(message)
284#endif
285#ifndef __compiletime_error
286# define __compiletime_error(message)
287#endif
288
269/* 289/*
270 * Prevent the compiler from merging or refetching accesses. The compiler 290 * Prevent the compiler from merging or refetching accesses. The compiler
271 * is also forbidden from reordering successive instances of ACCESS_ONCE(), 291 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 4a2b162c256a..5de4c9e5856d 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -208,16 +208,9 @@ struct dmar_atsr_unit {
208 u8 include_all:1; /* include all ports */ 208 u8 include_all:1; /* include all ports */
209}; 209};
210 210
211/* Intel DMAR initialization functions */
212extern int intel_iommu_init(void); 211extern int intel_iommu_init(void);
213#else 212#else /* !CONFIG_DMAR: */
214static inline int intel_iommu_init(void) 213static inline int intel_iommu_init(void) { return -ENODEV; }
215{ 214#endif /* CONFIG_DMAR */
216#ifdef CONFIG_INTR_REMAP 215
217 return dmar_dev_scope_init();
218#else
219 return -ENODEV;
220#endif
221}
222#endif /* !CONFIG_DMAR */
223#endif /* __DMAR_H__ */ 216#endif /* __DMAR_H__ */
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 84d3532dd3ea..7be0c6fbe880 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -91,6 +91,8 @@ struct fscache_operation {
91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ 91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ 92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
93#define FSCACHE_OP_DEAD 6 /* op is now dead */ 93#define FSCACHE_OP_DEAD 6 /* op is now dead */
94#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */
95#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */
94 96
95 atomic_t usage; 97 atomic_t usage;
96 unsigned debug_id; /* debugging ID */ 98 unsigned debug_id; /* debugging ID */
@@ -102,6 +104,16 @@ struct fscache_operation {
102 104
103 /* operation releaser */ 105 /* operation releaser */
104 fscache_operation_release_t release; 106 fscache_operation_release_t release;
107
108#ifdef CONFIG_SLOW_WORK_PROC
109 const char *name; /* operation name */
110 const char *state; /* operation state */
111#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
112#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
113#else
114#define fscache_set_op_name(OP, N) do { } while(0)
115#define fscache_set_op_state(OP, S) do { } while(0)
116#endif
105}; 117};
106 118
107extern atomic_t fscache_op_debug_id; 119extern atomic_t fscache_op_debug_id;
@@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
125 op->debug_id = atomic_inc_return(&fscache_op_debug_id); 137 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
126 op->release = release; 138 op->release = release;
127 INIT_LIST_HEAD(&op->pend_link); 139 INIT_LIST_HEAD(&op->pend_link);
140 fscache_set_op_state(op, "Init");
128} 141}
129 142
130/** 143/**
@@ -221,8 +234,10 @@ struct fscache_cache_ops {
221 struct fscache_object *(*alloc_object)(struct fscache_cache *cache, 234 struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
222 struct fscache_cookie *cookie); 235 struct fscache_cookie *cookie);
223 236
224 /* look up the object for a cookie */ 237 /* look up the object for a cookie
225 void (*lookup_object)(struct fscache_object *object); 238 * - return -ETIMEDOUT to be requeued
239 */
240 int (*lookup_object)(struct fscache_object *object);
226 241
227 /* finished looking up */ 242 /* finished looking up */
228 void (*lookup_complete)(struct fscache_object *object); 243 void (*lookup_complete)(struct fscache_object *object);
@@ -297,12 +312,14 @@ struct fscache_cookie {
297 atomic_t usage; /* number of users of this cookie */ 312 atomic_t usage; /* number of users of this cookie */
298 atomic_t n_children; /* number of children of this cookie */ 313 atomic_t n_children; /* number of children of this cookie */
299 spinlock_t lock; 314 spinlock_t lock;
315 spinlock_t stores_lock; /* lock on page store tree */
300 struct hlist_head backing_objects; /* object(s) backing this file/index */ 316 struct hlist_head backing_objects; /* object(s) backing this file/index */
301 const struct fscache_cookie_def *def; /* definition */ 317 const struct fscache_cookie_def *def; /* definition */
302 struct fscache_cookie *parent; /* parent of this entry */ 318 struct fscache_cookie *parent; /* parent of this entry */
303 void *netfs_data; /* back pointer to netfs */ 319 void *netfs_data; /* back pointer to netfs */
304 struct radix_tree_root stores; /* pages to be stored on this cookie */ 320 struct radix_tree_root stores; /* pages to be stored on this cookie */
305#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ 321#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
322#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
306 323
307 unsigned long flags; 324 unsigned long flags;
308#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ 325#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
@@ -337,6 +354,7 @@ struct fscache_object {
337 FSCACHE_OBJECT_RECYCLING, /* retiring object */ 354 FSCACHE_OBJECT_RECYCLING, /* retiring object */
338 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ 355 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
339 FSCACHE_OBJECT_DEAD, /* object is now dead */ 356 FSCACHE_OBJECT_DEAD, /* object is now dead */
357 FSCACHE_OBJECT__NSTATES
340 } state; 358 } state;
341 359
342 int debug_id; /* debugging ID */ 360 int debug_id; /* debugging ID */
@@ -345,6 +363,7 @@ struct fscache_object {
345 int n_obj_ops; /* number of object ops outstanding on object */ 363 int n_obj_ops; /* number of object ops outstanding on object */
346 int n_in_progress; /* number of ops in progress */ 364 int n_in_progress; /* number of ops in progress */
347 int n_exclusive; /* number of exclusive ops queued */ 365 int n_exclusive; /* number of exclusive ops queued */
366 atomic_t n_reads; /* number of read ops in progress */
348 spinlock_t lock; /* state and operations lock */ 367 spinlock_t lock; /* state and operations lock */
349 368
350 unsigned long lookup_jif; /* time at which lookup started */ 369 unsigned long lookup_jif; /* time at which lookup started */
@@ -358,6 +377,7 @@ struct fscache_object {
358#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ 377#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
359#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ 378#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
360#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ 379#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
380#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/
361 381
362 unsigned long flags; 382 unsigned long flags;
363#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ 383#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
@@ -373,7 +393,11 @@ struct fscache_object {
373 struct list_head dependents; /* FIFO of dependent objects */ 393 struct list_head dependents; /* FIFO of dependent objects */
374 struct list_head dep_link; /* link in parent's dependents list */ 394 struct list_head dep_link; /* link in parent's dependents list */
375 struct list_head pending_ops; /* unstarted operations on this object */ 395 struct list_head pending_ops; /* unstarted operations on this object */
396#ifdef CONFIG_FSCACHE_OBJECT_LIST
397 struct rb_node objlist_link; /* link in global object list */
398#endif
376 pgoff_t store_limit; /* current storage limit */ 399 pgoff_t store_limit; /* current storage limit */
400 loff_t store_limit_l; /* current storage limit */
377}; 401};
378 402
379extern const char *fscache_object_states[]; 403extern const char *fscache_object_states[];
@@ -383,6 +407,10 @@ extern const char *fscache_object_states[];
383 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ 407 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
384 (obj)->state < FSCACHE_OBJECT_DYING) 408 (obj)->state < FSCACHE_OBJECT_DYING)
385 409
410#define fscache_object_is_dead(obj) \
411 (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
412 (obj)->state >= FSCACHE_OBJECT_DYING)
413
386extern const struct slow_work_ops fscache_object_slow_work_ops; 414extern const struct slow_work_ops fscache_object_slow_work_ops;
387 415
388/** 416/**
@@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object,
414 object->events = object->event_mask = 0; 442 object->events = object->event_mask = 0;
415 object->flags = 0; 443 object->flags = 0;
416 object->store_limit = 0; 444 object->store_limit = 0;
445 object->store_limit_l = 0;
417 object->cache = cache; 446 object->cache = cache;
418 object->cookie = cookie; 447 object->cookie = cookie;
419 object->parent = NULL; 448 object->parent = NULL;
@@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object,
422extern void fscache_object_lookup_negative(struct fscache_object *object); 451extern void fscache_object_lookup_negative(struct fscache_object *object);
423extern void fscache_obtained_object(struct fscache_object *object); 452extern void fscache_obtained_object(struct fscache_object *object);
424 453
454#ifdef CONFIG_FSCACHE_OBJECT_LIST
455extern void fscache_object_destroy(struct fscache_object *object);
456#else
457#define fscache_object_destroy(object) do {} while(0)
458#endif
459
425/** 460/**
426 * fscache_object_destroyed - Note destruction of an object in a cache 461 * fscache_object_destroyed - Note destruction of an object in a cache
427 * @cache: The cache from which the object came 462 * @cache: The cache from which the object came
@@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object)
460static inline 495static inline
461void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) 496void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
462{ 497{
498 object->store_limit_l = i_size;
463 object->store_limit = i_size >> PAGE_SHIFT; 499 object->store_limit = i_size >> PAGE_SHIFT;
464 if (i_size & ~PAGE_MASK) 500 if (i_size & ~PAGE_MASK)
465 object->store_limit++; 501 object->store_limit++;
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6d8ee466e0a0..595ce49288b7 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); 202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); 203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); 204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
205extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
206 gfp_t);
205 207
206/** 208/**
207 * fscache_register_netfs - Register a filesystem as desiring caching services 209 * fscache_register_netfs - Register a filesystem as desiring caching services
@@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie,
615 __fscache_wait_on_page_write(cookie, page); 617 __fscache_wait_on_page_write(cookie, page);
616} 618}
617 619
620/**
621 * fscache_maybe_release_page - Consider releasing a page, cancelling a store
622 * @cookie: The cookie representing the cache object
623 * @page: The netfs page that is being cached.
624 * @gfp: The gfp flags passed to releasepage()
625 *
626 * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
627 * releasepage() call. A storage request on the page may cancelled if it is
628 * not currently being processed.
629 *
630 * The function returns true if the page no longer has a storage request on it,
631 * and false if a storage request is left in place. If true is returned, the
632 * page will have been passed to fscache_uncache_page(). If false is returned
633 * the page cannot be freed yet.
634 */
635static inline
636bool fscache_maybe_release_page(struct fscache_cookie *cookie,
637 struct page *page,
638 gfp_t gfp)
639{
640 if (fscache_cookie_valid(cookie) && PageFsCache(page))
641 return __fscache_maybe_release_page(cookie, page, gfp);
642 return false;
643}
644
618#endif /* _LINUX_FSCACHE_H */ 645#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4ec5e67e18cf..47bbdf9c38d0 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -117,12 +117,12 @@ struct ftrace_event_call {
117 struct dentry *dir; 117 struct dentry *dir;
118 struct trace_event *event; 118 struct trace_event *event;
119 int enabled; 119 int enabled;
120 int (*regfunc)(void *); 120 int (*regfunc)(struct ftrace_event_call *);
121 void (*unregfunc)(void *); 121 void (*unregfunc)(struct ftrace_event_call *);
122 int id; 122 int id;
123 int (*raw_init)(void); 123 int (*raw_init)(struct ftrace_event_call *);
124 int (*show_format)(struct ftrace_event_call *call, 124 int (*show_format)(struct ftrace_event_call *,
125 struct trace_seq *s); 125 struct trace_seq *);
126 int (*define_fields)(struct ftrace_event_call *); 126 int (*define_fields)(struct ftrace_event_call *);
127 struct list_head fields; 127 struct list_head fields;
128 int filter_active; 128 int filter_active;
@@ -131,20 +131,20 @@ struct ftrace_event_call {
131 void *data; 131 void *data;
132 132
133 atomic_t profile_count; 133 atomic_t profile_count;
134 int (*profile_enable)(void); 134 int (*profile_enable)(struct ftrace_event_call *);
135 void (*profile_disable)(void); 135 void (*profile_disable)(struct ftrace_event_call *);
136}; 136};
137 137
138#define FTRACE_MAX_PROFILE_SIZE 2048 138#define FTRACE_MAX_PROFILE_SIZE 2048
139 139
140extern char *trace_profile_buf; 140extern char *perf_trace_buf;
141extern char *trace_profile_buf_nmi; 141extern char *perf_trace_buf_nmi;
142 142
143#define MAX_FILTER_PRED 32 143#define MAX_FILTER_PRED 32
144#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 144#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
145 145
146extern void destroy_preds(struct ftrace_event_call *call); 146extern void destroy_preds(struct ftrace_event_call *call);
147extern int filter_match_preds(struct ftrace_event_call *call, void *rec); 147extern int filter_match_preds(struct event_filter *filter, void *rec);
148extern int filter_current_check_discard(struct ring_buffer *buffer, 148extern int filter_current_check_discard(struct ring_buffer *buffer,
149 struct ftrace_event_call *call, 149 struct ftrace_event_call *call,
150 void *rec, 150 void *rec,
@@ -157,11 +157,12 @@ enum {
157 FILTER_PTR_STRING, 157 FILTER_PTR_STRING,
158}; 158};
159 159
160extern int trace_define_field(struct ftrace_event_call *call,
161 const char *type, const char *name,
162 int offset, int size, int is_signed,
163 int filter_type);
164extern int trace_define_common_fields(struct ftrace_event_call *call); 160extern int trace_define_common_fields(struct ftrace_event_call *call);
161extern int trace_define_field(struct ftrace_event_call *call, const char *type,
162 const char *name, int offset, int size,
163 int is_signed, int filter_type);
164extern int trace_add_event_call(struct ftrace_event_call *call);
165extern void trace_remove_event_call(struct ftrace_event_call *call);
165 166
166#define is_signed_type(type) (((type)(-1)) < 0) 167#define is_signed_type(type) (((type)(-1)) < 0)
167 168
@@ -186,4 +187,13 @@ do { \
186 __trace_printk(ip, fmt, ##args); \ 187 __trace_printk(ip, fmt, ##args); \
187} while (0) 188} while (0)
188 189
190#ifdef CONFIG_EVENT_PROFILE
191struct perf_event;
192extern int ftrace_profile_enable(int event_id);
193extern void ftrace_profile_disable(int event_id);
194extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
195 char *filter_str);
196extern void ftrace_profile_free_filter(struct perf_event *event);
197#endif
198
189#endif /* _LINUX_FTRACE_EVENT_H */ 199#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index b80c88dedbbb..81f90a59cda6 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -81,7 +81,11 @@ struct gfs2_meta_header {
81 __be32 mh_type; 81 __be32 mh_type;
82 __be64 __pad0; /* Was generation number in gfs1 */ 82 __be64 __pad0; /* Was generation number in gfs1 */
83 __be32 mh_format; 83 __be32 mh_format;
84 __be32 __pad1; /* Was incarnation number in gfs1 */ 84 /* This union is to keep userspace happy */
85 union {
86 __be32 mh_jid; /* Was incarnation number in gfs1 */
87 __be32 __pad1;
88 };
85}; 89};
86 90
87/* 91/*
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 6d527ee82b2b..d5b387669dab 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,10 +139,34 @@ static inline void account_system_vtime(struct task_struct *tsk)
139#endif 139#endif
140 140
141#if defined(CONFIG_NO_HZ) 141#if defined(CONFIG_NO_HZ)
142#if defined(CONFIG_TINY_RCU)
143extern void rcu_enter_nohz(void);
144extern void rcu_exit_nohz(void);
145
146static inline void rcu_irq_enter(void)
147{
148 rcu_exit_nohz();
149}
150
151static inline void rcu_irq_exit(void)
152{
153 rcu_enter_nohz();
154}
155
156static inline void rcu_nmi_enter(void)
157{
158}
159
160static inline void rcu_nmi_exit(void)
161{
162}
163
164#else
142extern void rcu_irq_enter(void); 165extern void rcu_irq_enter(void);
143extern void rcu_irq_exit(void); 166extern void rcu_irq_exit(void);
144extern void rcu_nmi_enter(void); 167extern void rcu_nmi_enter(void);
145extern void rcu_nmi_exit(void); 168extern void rcu_nmi_exit(void);
169#endif
146#else 170#else
147# define rcu_irq_enter() do { } while (0) 171# define rcu_irq_enter() do { } while (0)
148# define rcu_irq_exit() do { } while (0) 172# define rcu_irq_exit() do { } while (0)
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
new file mode 100644
index 000000000000..a03daed08c59
--- /dev/null
+++ b/include/linux/hw_breakpoint.h
@@ -0,0 +1,131 @@
1#ifndef _LINUX_HW_BREAKPOINT_H
2#define _LINUX_HW_BREAKPOINT_H
3
4enum {
5 HW_BREAKPOINT_LEN_1 = 1,
6 HW_BREAKPOINT_LEN_2 = 2,
7 HW_BREAKPOINT_LEN_4 = 4,
8 HW_BREAKPOINT_LEN_8 = 8,
9};
10
11enum {
12 HW_BREAKPOINT_R = 1,
13 HW_BREAKPOINT_W = 2,
14 HW_BREAKPOINT_X = 4,
15};
16
17#ifdef __KERNEL__
18
19#include <linux/perf_event.h>
20
21#ifdef CONFIG_HAVE_HW_BREAKPOINT
22
23/* As it's for in-kernel or ptrace use, we want it to be pinned */
24#define DEFINE_BREAKPOINT_ATTR(name) \
25struct perf_event_attr name = { \
26 .type = PERF_TYPE_BREAKPOINT, \
27 .size = sizeof(name), \
28 .pinned = 1, \
29};
30
31static inline void hw_breakpoint_init(struct perf_event_attr *attr)
32{
33 attr->type = PERF_TYPE_BREAKPOINT;
34 attr->size = sizeof(*attr);
35 attr->pinned = 1;
36}
37
38static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
39{
40 return bp->attr.bp_addr;
41}
42
43static inline int hw_breakpoint_type(struct perf_event *bp)
44{
45 return bp->attr.bp_type;
46}
47
48static inline int hw_breakpoint_len(struct perf_event *bp)
49{
50 return bp->attr.bp_len;
51}
52
53extern struct perf_event *
54register_user_hw_breakpoint(struct perf_event_attr *attr,
55 perf_callback_t triggered,
56 struct task_struct *tsk);
57
58/* FIXME: only change from the attr, and don't unregister */
59extern struct perf_event *
60modify_user_hw_breakpoint(struct perf_event *bp,
61 struct perf_event_attr *attr,
62 perf_callback_t triggered,
63 struct task_struct *tsk);
64
65/*
66 * Kernel breakpoints are not associated with any particular thread.
67 */
68extern struct perf_event *
69register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
70 perf_callback_t triggered,
71 int cpu);
72
73extern struct perf_event **
74register_wide_hw_breakpoint(struct perf_event_attr *attr,
75 perf_callback_t triggered);
76
77extern int register_perf_hw_breakpoint(struct perf_event *bp);
78extern int __register_perf_hw_breakpoint(struct perf_event *bp);
79extern void unregister_hw_breakpoint(struct perf_event *bp);
80extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events);
81
82extern int reserve_bp_slot(struct perf_event *bp);
83extern void release_bp_slot(struct perf_event *bp);
84
85extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
86
87static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
88{
89 return &bp->hw.info;
90}
91
92#else /* !CONFIG_HAVE_HW_BREAKPOINT */
93
94static inline struct perf_event *
95register_user_hw_breakpoint(struct perf_event_attr *attr,
96 perf_callback_t triggered,
97 struct task_struct *tsk) { return NULL; }
98static inline struct perf_event *
99modify_user_hw_breakpoint(struct perf_event *bp,
100 struct perf_event_attr *attr,
101 perf_callback_t triggered,
102 struct task_struct *tsk) { return NULL; }
103static inline struct perf_event *
104register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
105 perf_callback_t triggered,
106 int cpu) { return NULL; }
107static inline struct perf_event **
108register_wide_hw_breakpoint(struct perf_event_attr *attr,
109 perf_callback_t triggered) { return NULL; }
110static inline int
111register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
112static inline int
113__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
114static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
115static inline void
116unregister_wide_hw_breakpoint(struct perf_event **cpu_events) { }
117static inline int
118reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
119static inline void release_bp_slot(struct perf_event *bp) { }
120
121static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { }
122
123static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
124{
125 return NULL;
126}
127
128#endif /* CONFIG_HAVE_HW_BREAKPOINT */
129#endif /* __KERNEL__ */
130
131#endif /* _LINUX_HW_BREAKPOINT_H */
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
index f13255e06406..9eb07bbc6522 100644
--- a/include/linux/i2c-pnx.h
+++ b/include/linux/i2c-pnx.h
@@ -21,7 +21,7 @@ struct i2c_pnx_mif {
21 int mode; /* Interface mode */ 21 int mode; /* Interface mode */
22 struct completion complete; /* I/O completion */ 22 struct completion complete; /* I/O completion */
23 struct timer_list timer; /* Timeout */ 23 struct timer_list timer; /* Timeout */
24 char * buf; /* Data buffer */ 24 u8 * buf; /* Data buffer */
25 int len; /* Length of data buffer */ 25 int len; /* Length of data buffer */
26}; 26};
27 27
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 21a6f5d9af22..8d10aa7fd4c9 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -83,16 +83,12 @@ extern struct group_info init_groups;
83#define INIT_IDS 83#define INIT_IDS
84#endif 84#endif
85 85
86#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
87/* 86/*
88 * Because of the reduced scope of CAP_SETPCAP when filesystem 87 * Because of the reduced scope of CAP_SETPCAP when filesystem
89 * capabilities are in effect, it is safe to allow CAP_SETPCAP to 88 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
90 * be available in the default configuration. 89 * be available in the default configuration.
91 */ 90 */
92# define CAP_INIT_BSET CAP_FULL_SET 91# define CAP_INIT_BSET CAP_FULL_SET
93#else
94# define CAP_INIT_BSET CAP_INIT_EFF_SET
95#endif
96 92
97#ifdef CONFIG_TREE_PREEMPT_RCU 93#ifdef CONFIG_TREE_PREEMPT_RCU
98#define INIT_TASK_RCU_PREEMPT(tsk) \ 94#define INIT_TASK_RCU_PREEMPT(tsk) \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 7ca72b74eec7..75f3f00ac1e5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -603,12 +603,6 @@ static inline void init_irq_proc(void)
603} 603}
604#endif 604#endif
605 605
606#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
607extern void debug_poll_all_shared_irqs(void);
608#else
609static inline void debug_poll_all_shared_irqs(void) { }
610#endif
611
612struct seq_file; 606struct seq_file;
613int show_interrupts(struct seq_file *p, void *v); 607int show_interrupts(struct seq_file *p, void *v);
614 608
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index b02a3f1d46a0..006bf45eae30 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -124,6 +124,6 @@
124 typecheck(unsigned long, flags); \ 124 typecheck(unsigned long, flags); \
125 raw_irqs_disabled_flags(flags); \ 125 raw_irqs_disabled_flags(flags); \
126}) 126})
127#endif /* CONFIG_X86 */ 127#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
128 128
129#endif 129#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1a9cf78bfce5..6811f4bfc6e7 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x);
307extern unsigned long clock_t_to_jiffies(unsigned long x); 307extern unsigned long clock_t_to_jiffies(unsigned long x);
308extern u64 jiffies_64_to_clock_t(u64 x); 308extern u64 jiffies_64_to_clock_t(u64 x);
309extern u64 nsec_to_clock_t(u64 x); 309extern u64 nsec_to_clock_t(u64 x);
310extern unsigned long nsecs_to_jiffies(u64 n);
310 311
311#define TIMESTAMP_SIZE 30 312#define TIMESTAMP_SIZE 30
312 313
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f4e3184fa054..3fa4c590cf12 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -15,7 +15,6 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/typecheck.h> 17#include <linux/typecheck.h>
18#include <linux/ratelimit.h>
19#include <linux/dynamic_debug.h> 18#include <linux/dynamic_debug.h>
20#include <asm/byteorder.h> 19#include <asm/byteorder.h>
21#include <asm/bug.h> 20#include <asm/bug.h>
@@ -241,8 +240,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
241asmlinkage int printk(const char * fmt, ...) 240asmlinkage int printk(const char * fmt, ...)
242 __attribute__ ((format (printf, 1, 2))) __cold; 241 __attribute__ ((format (printf, 1, 2))) __cold;
243 242
244extern struct ratelimit_state printk_ratelimit_state; 243extern int __printk_ratelimit(const char *func);
245extern int printk_ratelimit(void); 244#define printk_ratelimit() __printk_ratelimit(__func__)
246extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 245extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
247 unsigned int interval_msec); 246 unsigned int interval_msec);
248 247
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 348fa8874b52..c059044bc6dc 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -25,6 +25,7 @@ struct cpu_usage_stat {
25 cputime64_t iowait; 25 cputime64_t iowait;
26 cputime64_t steal; 26 cputime64_t steal;
27 cputime64_t guest; 27 cputime64_t guest;
28 cputime64_t guest_nice;
28}; 29};
29 30
30struct kernel_stat { 31struct kernel_stat {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 3a46b7b7abb2..1b672f74a32f 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -296,6 +296,8 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
296int disable_kprobe(struct kprobe *kp); 296int disable_kprobe(struct kprobe *kp);
297int enable_kprobe(struct kprobe *kp); 297int enable_kprobe(struct kprobe *kp);
298 298
299void dump_kprobe(struct kprobe *kp);
300
299#else /* !CONFIG_KPROBES: */ 301#else /* !CONFIG_KPROBES: */
300 302
301static inline int kprobes_built_in(void) 303static inline int kprobes_built_in(void)
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 190c37854870..f78f83d7663f 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -26,14 +26,15 @@
26 26
27/* Auxiliary data to use in generating the audit record. */ 27/* Auxiliary data to use in generating the audit record. */
28struct common_audit_data { 28struct common_audit_data {
29 char type; 29 char type;
30#define LSM_AUDIT_DATA_FS 1 30#define LSM_AUDIT_DATA_FS 1
31#define LSM_AUDIT_DATA_NET 2 31#define LSM_AUDIT_DATA_NET 2
32#define LSM_AUDIT_DATA_CAP 3 32#define LSM_AUDIT_DATA_CAP 3
33#define LSM_AUDIT_DATA_IPC 4 33#define LSM_AUDIT_DATA_IPC 4
34#define LSM_AUDIT_DATA_TASK 5 34#define LSM_AUDIT_DATA_TASK 5
35#define LSM_AUDIT_DATA_KEY 6 35#define LSM_AUDIT_DATA_KEY 6
36#define LSM_AUDIT_NO_AUDIT 7 36#define LSM_AUDIT_NO_AUDIT 7
37#define LSM_AUDIT_DATA_KMOD 8
37 struct task_struct *tsk; 38 struct task_struct *tsk;
38 union { 39 union {
39 struct { 40 struct {
@@ -66,6 +67,7 @@ struct common_audit_data {
66 char *key_desc; 67 char *key_desc;
67 } key_struct; 68 } key_struct;
68#endif 69#endif
70 char *kmod_name;
69 } u; 71 } u;
70 /* this union contains LSM specific data */ 72 /* this union contains LSM specific data */
71 union { 73 union {
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h
index f95466343fb2..955d30fc6a27 100644
--- a/include/linux/mfd/wm831x/regulator.h
+++ b/include/linux/mfd/wm831x/regulator.h
@@ -1212,7 +1212,7 @@
1212#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ 1212#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */
1213#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ 1213#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
1214 1214
1215#define WM831X_ISINK_MAX_ISEL 56 1215#define WM831X_ISINK_MAX_ISEL 55
1216extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; 1216extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
1217 1217
1218#endif 1218#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 529a0931711d..d7e26e30c8c2 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -358,6 +358,7 @@ static const struct proto_ops name##_ops = { \
358 358
359#ifdef CONFIG_SYSCTL 359#ifdef CONFIG_SYSCTL
360#include <linux/sysctl.h> 360#include <linux/sysctl.h>
361#include <linux/ratelimit.h>
361extern struct ratelimit_state net_ratelimit_state; 362extern struct ratelimit_state net_ratelimit_state;
362#endif 363#endif
363 364
diff --git a/include/linux/of.h b/include/linux/of.h
index 7be2d1043c16..e7facd8fbce8 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -17,14 +17,117 @@
17 */ 17 */
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20#include <linux/kref.h>
20#include <linux/mod_devicetable.h> 21#include <linux/mod_devicetable.h>
21 22
23typedef u32 phandle;
24typedef u32 ihandle;
25
26struct property {
27 char *name;
28 int length;
29 void *value;
30 struct property *next;
31 unsigned long _flags;
32 unsigned int unique_id;
33};
34
35#if defined(CONFIG_SPARC)
36struct of_irq_controller;
37#endif
38
39struct device_node {
40 const char *name;
41 const char *type;
42 phandle node;
43#if !defined(CONFIG_SPARC)
44 phandle linux_phandle;
45#endif
46 char *full_name;
47
48 struct property *properties;
49 struct property *deadprops; /* removed properties */
50 struct device_node *parent;
51 struct device_node *child;
52 struct device_node *sibling;
53 struct device_node *next; /* next device of same type */
54 struct device_node *allnext; /* next in list of all nodes */
55 struct proc_dir_entry *pde; /* this node's proc directory */
56 struct kref kref;
57 unsigned long _flags;
58 void *data;
59#if defined(CONFIG_SPARC)
60 char *path_component_name;
61 unsigned int unique_id;
62 struct of_irq_controller *irq_trans;
63#endif
64};
65
66static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
67{
68 return test_bit(flag, &n->_flags);
69}
70
71static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
72{
73 set_bit(flag, &n->_flags);
74}
75
76static inline void
77set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
78{
79 dn->pde = de;
80}
81
82extern struct device_node *of_find_all_nodes(struct device_node *prev);
83
84#if defined(CONFIG_SPARC)
85/* Dummy ref counting routines - to be implemented later */
86static inline struct device_node *of_node_get(struct device_node *node)
87{
88 return node;
89}
90static inline void of_node_put(struct device_node *node)
91{
92}
93
94#else
95extern struct device_node *of_node_get(struct device_node *node);
96extern void of_node_put(struct device_node *node);
97#endif
98
99/*
100 * OF address retreival & translation
101 */
102
103/* Helper to read a big number; size is in cells (not bytes) */
104static inline u64 of_read_number(const u32 *cell, int size)
105{
106 u64 r = 0;
107 while (size--)
108 r = (r << 32) | *(cell++);
109 return r;
110}
111
112/* Like of_read_number, but we want an unsigned long result */
113#ifdef CONFIG_PPC32
114static inline unsigned long of_read_ulong(const u32 *cell, int size)
115{
116 return cell[size-1];
117}
118#else
119#define of_read_ulong(cell, size) of_read_number(cell, size)
120#endif
121
22#include <asm/prom.h> 122#include <asm/prom.h>
23 123
24/* flag descriptions */ 124/* flag descriptions */
25#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ 125#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
26#define OF_DETACHED 2 /* node has been detached from the device tree */ 126#define OF_DETACHED 2 /* node has been detached from the device tree */
27 127
128#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
129#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
130
28#define OF_BAD_ADDR ((u64)-1) 131#define OF_BAD_ADDR ((u64)-1)
29 132
30extern struct device_node *of_find_node_by_name(struct device_node *from, 133extern struct device_node *of_find_node_by_name(struct device_node *from,
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
new file mode 100644
index 000000000000..41d432b13553
--- /dev/null
+++ b/include/linux/of_fdt.h
@@ -0,0 +1,86 @@
1/*
2 * Definitions for working with the Flattened Device Tree data format
3 *
4 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
5 * benh@kernel.crashing.org
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 */
11
12#ifndef _LINUX_OF_FDT_H
13#define _LINUX_OF_FDT_H
14
15#include <linux/types.h>
16#include <linux/init.h>
17
18/* Definitions used by the flattened device tree */
19#define OF_DT_HEADER 0xd00dfeed /* marker */
20#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
21#define OF_DT_END_NODE 0x2 /* End node */
22#define OF_DT_PROP 0x3 /* Property: name off, size,
23 * content */
24#define OF_DT_NOP 0x4 /* nop */
25#define OF_DT_END 0x9
26
27#define OF_DT_VERSION 0x10
28
29#ifndef __ASSEMBLY__
30/*
31 * This is what gets passed to the kernel by prom_init or kexec
32 *
33 * The dt struct contains the device tree structure, full pathes and
34 * property contents. The dt strings contain a separate block with just
35 * the strings for the property names, and is fully page aligned and
36 * self contained in a page, so that it can be kept around by the kernel,
37 * each property name appears only once in this page (cheap compression)
38 *
39 * the mem_rsvmap contains a map of reserved ranges of physical memory,
40 * passing it here instead of in the device-tree itself greatly simplifies
41 * the job of everybody. It's just a list of u64 pairs (base/size) that
42 * ends when size is 0
43 */
44struct boot_param_header {
45 u32 magic; /* magic word OF_DT_HEADER */
46 u32 totalsize; /* total size of DT block */
47 u32 off_dt_struct; /* offset to structure */
48 u32 off_dt_strings; /* offset to strings */
49 u32 off_mem_rsvmap; /* offset to memory reserve map */
50 u32 version; /* format version */
51 u32 last_comp_version; /* last compatible version */
52 /* version 2 fields below */
53 u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
54 /* version 3 fields below */
55 u32 dt_strings_size; /* size of the DT strings block */
56 /* version 17 fields below */
57 u32 dt_struct_size; /* size of the DT structure block */
58};
59
60/* For scanning the flat device-tree at boot time */
61extern int __init of_scan_flat_dt(int (*it)(unsigned long node,
62 const char *uname, int depth,
63 void *data),
64 void *data);
65extern void __init *of_get_flat_dt_prop(unsigned long node, const char *name,
66 unsigned long *size);
67extern int __init of_flat_dt_is_compatible(unsigned long node,
68 const char *name);
69extern unsigned long __init of_get_flat_dt_root(void);
70
71/* Other Prototypes */
72extern void finish_device_tree(void);
73extern void unflatten_device_tree(void);
74extern void early_init_devtree(void *);
75extern int machine_is_compatible(const char *compat);
76extern void print_properties(struct device_node *node);
77extern int prom_n_intr_cells(struct device_node* np);
78extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
79extern int prom_add_property(struct device_node* np, struct property* prop);
80extern int prom_remove_property(struct device_node *np, struct property *prop);
81extern int prom_update_property(struct device_node *np,
82 struct property *newprop,
83 struct property *oldprop);
84
85#endif /* __ASSEMBLY__ */
86#endif /* _LINUX_OF_FDT_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 84cf1f3b7838..daecca3c8300 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1633,6 +1633,8 @@
1633#define PCI_DEVICE_ID_O2_6730 0x673a 1633#define PCI_DEVICE_ID_O2_6730 0x673a
1634#define PCI_DEVICE_ID_O2_6832 0x6832 1634#define PCI_DEVICE_ID_O2_6832 0x6832
1635#define PCI_DEVICE_ID_O2_6836 0x6836 1635#define PCI_DEVICE_ID_O2_6836 0x6836
1636#define PCI_DEVICE_ID_O2_6812 0x6872
1637#define PCI_DEVICE_ID_O2_6933 0x6933
1636 1638
1637#define PCI_VENDOR_ID_3DFX 0x121a 1639#define PCI_VENDOR_ID_3DFX 0x121a
1638#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 1640#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 7b7fbf433cff..e3fb25606706 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -106,6 +106,8 @@ enum perf_sw_ids {
106 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 106 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
107 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 107 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
108 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 108 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
109 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
110 PERF_COUNT_SW_EMULATION_FAULTS = 8,
109 111
110 PERF_COUNT_SW_MAX, /* non-ABI */ 112 PERF_COUNT_SW_MAX, /* non-ABI */
111}; 113};
@@ -225,6 +227,7 @@ struct perf_counter_attr {
225#define PERF_COUNTER_IOC_RESET _IO ('$', 3) 227#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
226#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) 228#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
227#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) 229#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
230#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *)
228 231
229enum perf_counter_ioc_flags { 232enum perf_counter_ioc_flags {
230 PERF_IOC_FLAG_GROUP = 1U << 0, 233 PERF_IOC_FLAG_GROUP = 1U << 0,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9e7012689a84..43adbd7f0010 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -18,6 +18,10 @@
18#include <linux/ioctl.h> 18#include <linux/ioctl.h>
19#include <asm/byteorder.h> 19#include <asm/byteorder.h>
20 20
21#ifdef CONFIG_HAVE_HW_BREAKPOINT
22#include <asm/hw_breakpoint.h>
23#endif
24
21/* 25/*
22 * User-space ABI bits: 26 * User-space ABI bits:
23 */ 27 */
@@ -31,6 +35,7 @@ enum perf_type_id {
31 PERF_TYPE_TRACEPOINT = 2, 35 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3, 36 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4, 37 PERF_TYPE_RAW = 4,
38 PERF_TYPE_BREAKPOINT = 5,
34 39
35 PERF_TYPE_MAX, /* non-ABI */ 40 PERF_TYPE_MAX, /* non-ABI */
36}; 41};
@@ -102,6 +107,8 @@ enum perf_sw_ids {
102 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 107 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
103 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 108 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
104 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 109 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
110 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
111 PERF_COUNT_SW_EMULATION_FAULTS = 8,
105 112
106 PERF_COUNT_SW_MAX, /* non-ABI */ 113 PERF_COUNT_SW_MAX, /* non-ABI */
107}; 114};
@@ -207,6 +214,15 @@ struct perf_event_attr {
207 __u32 wakeup_events; /* wakeup every n events */ 214 __u32 wakeup_events; /* wakeup every n events */
208 __u32 wakeup_watermark; /* bytes before wakeup */ 215 __u32 wakeup_watermark; /* bytes before wakeup */
209 }; 216 };
217
218 union {
219 struct { /* Hardware breakpoint info */
220 __u64 bp_addr;
221 __u32 bp_type;
222 __u32 bp_len;
223 };
224 };
225
210 __u32 __reserved_2; 226 __u32 __reserved_2;
211 227
212 __u64 __reserved_3; 228 __u64 __reserved_3;
@@ -219,8 +235,9 @@ struct perf_event_attr {
219#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 235#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
220#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 236#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
221#define PERF_EVENT_IOC_RESET _IO ('$', 3) 237#define PERF_EVENT_IOC_RESET _IO ('$', 3)
222#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) 238#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
223#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 239#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
240#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
224 241
225enum perf_event_ioc_flags { 242enum perf_event_ioc_flags {
226 PERF_IOC_FLAG_GROUP = 1U << 0, 243 PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -475,6 +492,11 @@ struct hw_perf_event {
475 s64 remaining; 492 s64 remaining;
476 struct hrtimer hrtimer; 493 struct hrtimer hrtimer;
477 }; 494 };
495#ifdef CONFIG_HAVE_HW_BREAKPOINT
496 union { /* breakpoint */
497 struct arch_hw_breakpoint info;
498 };
499#endif
478 }; 500 };
479 atomic64_t prev_count; 501 atomic64_t prev_count;
480 u64 sample_period; 502 u64 sample_period;
@@ -543,6 +565,10 @@ struct perf_pending_entry {
543 void (*func)(struct perf_pending_entry *); 565 void (*func)(struct perf_pending_entry *);
544}; 566};
545 567
568typedef void (*perf_callback_t)(struct perf_event *, void *);
569
570struct perf_sample_data;
571
546/** 572/**
547 * struct perf_event - performance event kernel representation: 573 * struct perf_event - performance event kernel representation:
548 */ 574 */
@@ -585,7 +611,7 @@ struct perf_event {
585 u64 tstamp_running; 611 u64 tstamp_running;
586 u64 tstamp_stopped; 612 u64 tstamp_stopped;
587 613
588 struct perf_event_attr attr; 614 struct perf_event_attr attr;
589 struct hw_perf_event hw; 615 struct hw_perf_event hw;
590 616
591 struct perf_event_context *ctx; 617 struct perf_event_context *ctx;
@@ -633,7 +659,20 @@ struct perf_event {
633 659
634 struct pid_namespace *ns; 660 struct pid_namespace *ns;
635 u64 id; 661 u64 id;
662
663 void (*overflow_handler)(struct perf_event *event,
664 int nmi, struct perf_sample_data *data,
665 struct pt_regs *regs);
666
667#ifdef CONFIG_EVENT_PROFILE
668 struct event_filter *filter;
636#endif 669#endif
670
671 perf_callback_t callback;
672
673 perf_callback_t event_callback;
674
675#endif /* CONFIG_PERF_EVENTS */
637}; 676};
638 677
639/** 678/**
@@ -706,7 +745,6 @@ struct perf_output_handle {
706 int nmi; 745 int nmi;
707 int sample; 746 int sample;
708 int locked; 747 int locked;
709 unsigned long flags;
710}; 748};
711 749
712#ifdef CONFIG_PERF_EVENTS 750#ifdef CONFIG_PERF_EVENTS
@@ -738,6 +776,14 @@ extern int hw_perf_group_sched_in(struct perf_event *group_leader,
738 struct perf_cpu_context *cpuctx, 776 struct perf_cpu_context *cpuctx,
739 struct perf_event_context *ctx, int cpu); 777 struct perf_event_context *ctx, int cpu);
740extern void perf_event_update_userpage(struct perf_event *event); 778extern void perf_event_update_userpage(struct perf_event *event);
779extern int perf_event_release_kernel(struct perf_event *event);
780extern struct perf_event *
781perf_event_create_kernel_counter(struct perf_event_attr *attr,
782 int cpu,
783 pid_t pid,
784 perf_callback_t callback);
785extern u64 perf_event_read_value(struct perf_event *event,
786 u64 *enabled, u64 *running);
741 787
742struct perf_sample_data { 788struct perf_sample_data {
743 u64 type; 789 u64 type;
@@ -814,6 +860,7 @@ extern int sysctl_perf_event_sample_rate;
814extern void perf_event_init(void); 860extern void perf_event_init(void);
815extern void perf_tp_event(int event_id, u64 addr, u64 count, 861extern void perf_tp_event(int event_id, u64 addr, u64 count,
816 void *record, int entry_size); 862 void *record, int entry_size);
863extern void perf_bp_event(struct perf_event *event, void *data);
817 864
818#ifndef perf_misc_flags 865#ifndef perf_misc_flags
819#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ 866#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
@@ -827,6 +874,8 @@ extern int perf_output_begin(struct perf_output_handle *handle,
827extern void perf_output_end(struct perf_output_handle *handle); 874extern void perf_output_end(struct perf_output_handle *handle);
828extern void perf_output_copy(struct perf_output_handle *handle, 875extern void perf_output_copy(struct perf_output_handle *handle,
829 const void *buf, unsigned int len); 876 const void *buf, unsigned int len);
877extern int perf_swevent_get_recursion_context(void);
878extern void perf_swevent_put_recursion_context(int rctx);
830#else 879#else
831static inline void 880static inline void
832perf_event_task_sched_in(struct task_struct *task, int cpu) { } 881perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -848,11 +897,15 @@ static inline int perf_event_task_enable(void) { return -EINVAL; }
848static inline void 897static inline void
849perf_sw_event(u32 event_id, u64 nr, int nmi, 898perf_sw_event(u32 event_id, u64 nr, int nmi,
850 struct pt_regs *regs, u64 addr) { } 899 struct pt_regs *regs, u64 addr) { }
900static inline void
901perf_bp_event(struct perf_event *event, void *data) { }
851 902
852static inline void perf_event_mmap(struct vm_area_struct *vma) { } 903static inline void perf_event_mmap(struct vm_area_struct *vma) { }
853static inline void perf_event_comm(struct task_struct *tsk) { } 904static inline void perf_event_comm(struct task_struct *tsk) { }
854static inline void perf_event_fork(struct task_struct *tsk) { } 905static inline void perf_event_fork(struct task_struct *tsk) { }
855static inline void perf_event_init(void) { } 906static inline void perf_event_init(void) { }
907static inline int perf_swevent_get_recursion_context(void) { return -1; }
908static inline void perf_swevent_put_recursion_context(int rctx) { }
856 909
857#endif 910#endif
858 911
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 065a3652a3ea..67608161df6b 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type)
147 if (old != ACL_NOT_CACHED) 147 if (old != ACL_NOT_CACHED)
148 posix_acl_release(old); 148 posix_acl_release(old);
149} 149}
150
151static inline void forget_all_cached_acls(struct inode *inode)
152{
153 struct posix_acl *old_access, *old_default;
154 spin_lock(&inode->i_lock);
155 old_access = inode->i_acl;
156 old_default = inode->i_default_acl;
157 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
158 spin_unlock(&inode->i_lock);
159 if (old_access != ACL_NOT_CACHED)
160 posix_acl_release(old_access);
161 if (old_default != ACL_NOT_CACHED)
162 posix_acl_release(old_default);
163}
150#endif 164#endif
151 165
152static inline void cache_no_acl(struct inode *inode) 166static inline void cache_no_acl(struct inode *inode)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 72b1a10a59b6..2e681d9555bd 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -105,6 +105,11 @@ struct preempt_notifier;
105 * @sched_out: we've just been preempted 105 * @sched_out: we've just been preempted
106 * notifier: struct preempt_notifier for the task being preempted 106 * notifier: struct preempt_notifier for the task being preempted
107 * next: the task that's kicking us out 107 * next: the task that's kicking us out
108 *
109 * Please note that sched_in and out are called under different
110 * contexts. sched_out is called with rq lock held and irq disabled
111 * while sched_in is called without rq lock and irq enabled. This
112 * difference is intentional and depended upon by its users.
108 */ 113 */
109struct preempt_ops { 114struct preempt_ops {
110 void (*sched_in)(struct preempt_notifier *notifier, int cpu); 115 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 78c48895b12a..ce9a9b2e5cd4 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
376 return flags >> _DQUOT_STATE_FLAGS; 376 return flags >> _DQUOT_STATE_FLAGS;
377} 377}
378 378
379#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
380extern void quota_send_warning(short type, unsigned int id, dev_t dev,
381 const char warntype);
382#else
383static inline void quota_send_warning(short type, unsigned int id, dev_t dev,
384 const char warntype)
385{
386 return;
387}
388#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */
389
379struct quota_info { 390struct quota_info {
380 unsigned int flags; /* Flags for diskquotas on this device */ 391 unsigned int flags; /* Flags for diskquotas on this device */
381 struct mutex dqio_mutex; /* lock device while I/O in progress */ 392 struct mutex dqio_mutex; /* lock device while I/O in progress */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 00044b856453..668cf1bef030 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -1,20 +1,31 @@
1#ifndef _LINUX_RATELIMIT_H 1#ifndef _LINUX_RATELIMIT_H
2#define _LINUX_RATELIMIT_H 2#define _LINUX_RATELIMIT_H
3
3#include <linux/param.h> 4#include <linux/param.h>
5#include <linux/spinlock_types.h>
4 6
5#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) 7#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
6#define DEFAULT_RATELIMIT_BURST 10 8#define DEFAULT_RATELIMIT_BURST 10
7 9
8struct ratelimit_state { 10struct ratelimit_state {
9 int interval; 11 spinlock_t lock; /* protect the state */
10 int burst; 12
11 int printed; 13 int interval;
12 int missed; 14 int burst;
13 unsigned long begin; 15 int printed;
16 int missed;
17 unsigned long begin;
14}; 18};
15 19
16#define DEFINE_RATELIMIT_STATE(name, interval, burst) \ 20#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
17 struct ratelimit_state name = {interval, burst,} 21 \
22 struct ratelimit_state name = { \
23 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
24 .interval = interval_init, \
25 .burst = burst_init, \
26 }
27
28extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
29#define __ratelimit(state) ___ratelimit(state, __func__)
18 30
19extern int __ratelimit(struct ratelimit_state *rs); 31#endif /* _LINUX_RATELIMIT_H */
20#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 3ebd0b7bcb08..24440f4bf476 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,11 +52,6 @@ struct rcu_head {
52}; 52};
53 53
54/* Exported common interfaces */ 54/* Exported common interfaces */
55#ifdef CONFIG_TREE_PREEMPT_RCU
56extern void synchronize_rcu(void);
57#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
58#define synchronize_rcu synchronize_sched
59#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
60extern void synchronize_rcu_bh(void); 55extern void synchronize_rcu_bh(void);
61extern void synchronize_sched(void); 56extern void synchronize_sched(void);
62extern void rcu_barrier(void); 57extern void rcu_barrier(void);
@@ -67,12 +62,11 @@ extern int sched_expedited_torture_stats(char *page);
67 62
68/* Internal to kernel */ 63/* Internal to kernel */
69extern void rcu_init(void); 64extern void rcu_init(void);
70extern void rcu_scheduler_starting(void);
71extern int rcu_needs_cpu(int cpu);
72extern int rcu_scheduler_active;
73 65
74#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 66#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
75#include <linux/rcutree.h> 67#include <linux/rcutree.h>
68#elif defined(CONFIG_TINY_RCU)
69#include <linux/rcutiny.h>
76#else 70#else
77#error "Unknown RCU implementation specified to kernel configuration" 71#error "Unknown RCU implementation specified to kernel configuration"
78#endif 72#endif
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
new file mode 100644
index 000000000000..c4ba9a78721e
--- /dev/null
+++ b/include/linux/rcutiny.h
@@ -0,0 +1,104 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25#ifndef __LINUX_TINY_H
26#define __LINUX_TINY_H
27
28#include <linux/cache.h>
29
30void rcu_sched_qs(int cpu);
31void rcu_bh_qs(int cpu);
32
33#define __rcu_read_lock() preempt_disable()
34#define __rcu_read_unlock() preempt_enable()
35#define __rcu_read_lock_bh() local_bh_disable()
36#define __rcu_read_unlock_bh() local_bh_enable()
37#define call_rcu_sched call_rcu
38
39#define rcu_init_sched() do { } while (0)
40extern void rcu_check_callbacks(int cpu, int user);
41
42static inline int rcu_needs_cpu(int cpu)
43{
44 return 0;
45}
46
47/*
48 * Return the number of grace periods.
49 */
50static inline long rcu_batches_completed(void)
51{
52 return 0;
53}
54
55/*
56 * Return the number of bottom-half grace periods.
57 */
58static inline long rcu_batches_completed_bh(void)
59{
60 return 0;
61}
62
63extern int rcu_expedited_torture_stats(char *page);
64
65#define synchronize_rcu synchronize_sched
66
67static inline void synchronize_rcu_expedited(void)
68{
69 synchronize_sched();
70}
71
72static inline void synchronize_rcu_bh_expedited(void)
73{
74 synchronize_sched();
75}
76
77struct notifier_block;
78
79#ifdef CONFIG_NO_HZ
80
81extern void rcu_enter_nohz(void);
82extern void rcu_exit_nohz(void);
83
84#else /* #ifdef CONFIG_NO_HZ */
85
86static inline void rcu_enter_nohz(void)
87{
88}
89
90static inline void rcu_exit_nohz(void)
91{
92}
93
94#endif /* #else #ifdef CONFIG_NO_HZ */
95
96static inline void rcu_scheduler_starting(void)
97{
98}
99
100static inline void exit_rcu(void)
101{
102}
103
104#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 9642c6bcb399..c93eee5911b0 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -34,15 +34,15 @@ struct notifier_block;
34 34
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern int rcu_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu);
39extern int rcu_needs_cpu(int cpu); 37extern int rcu_needs_cpu(int cpu);
38extern void rcu_scheduler_starting(void);
40extern int rcu_expedited_torture_stats(char *page); 39extern int rcu_expedited_torture_stats(char *page);
41 40
42#ifdef CONFIG_TREE_PREEMPT_RCU 41#ifdef CONFIG_TREE_PREEMPT_RCU
43 42
44extern void __rcu_read_lock(void); 43extern void __rcu_read_lock(void);
45extern void __rcu_read_unlock(void); 44extern void __rcu_read_unlock(void);
45extern void synchronize_rcu(void);
46extern void exit_rcu(void); 46extern void exit_rcu(void);
47 47
48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
@@ -57,7 +57,7 @@ static inline void __rcu_read_unlock(void)
57 preempt_enable(); 57 preempt_enable();
58} 58}
59 59
60#define __synchronize_sched() synchronize_rcu() 60#define synchronize_rcu synchronize_sched
61 61
62static inline void exit_rcu(void) 62static inline void exit_rcu(void)
63{ 63{
@@ -83,7 +83,6 @@ static inline void synchronize_rcu_bh_expedited(void)
83 synchronize_sched_expedited(); 83 synchronize_sched_expedited();
84} 84}
85 85
86extern void __rcu_init(void);
87extern void rcu_check_callbacks(int cpu, int user); 86extern void rcu_check_callbacks(int cpu, int user);
88 87
89extern long rcu_batches_completed(void); 88extern long rcu_batches_completed(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 75e6e60bf583..89115ec7d43f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -145,7 +145,6 @@ extern unsigned long this_cpu_load(void);
145 145
146 146
147extern void calc_global_load(void); 147extern void calc_global_load(void);
148extern u64 cpu_nr_migrations(int cpu);
149 148
150extern unsigned long get_parent_ip(unsigned long addr); 149extern unsigned long get_parent_ip(unsigned long addr);
151 150
@@ -171,8 +170,6 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
171} 170}
172#endif 171#endif
173 172
174extern unsigned long long time_sync_thresh;
175
176/* 173/*
177 * Task state bitmask. NOTE! These bits are also 174 * Task state bitmask. NOTE! These bits are also
178 * encoded in fs/proc/array.c: get_task_state(). 175 * encoded in fs/proc/array.c: get_task_state().
@@ -349,7 +346,6 @@ extern signed long schedule_timeout(signed long timeout);
349extern signed long schedule_timeout_interruptible(signed long timeout); 346extern signed long schedule_timeout_interruptible(signed long timeout);
350extern signed long schedule_timeout_killable(signed long timeout); 347extern signed long schedule_timeout_killable(signed long timeout);
351extern signed long schedule_timeout_uninterruptible(signed long timeout); 348extern signed long schedule_timeout_uninterruptible(signed long timeout);
352asmlinkage void __schedule(void);
353asmlinkage void schedule(void); 349asmlinkage void schedule(void);
354extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); 350extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
355 351
@@ -628,6 +624,9 @@ struct signal_struct {
628 cputime_t utime, stime, cutime, cstime; 624 cputime_t utime, stime, cutime, cstime;
629 cputime_t gtime; 625 cputime_t gtime;
630 cputime_t cgtime; 626 cputime_t cgtime;
627#ifndef CONFIG_VIRT_CPU_ACCOUNTING
628 cputime_t prev_utime, prev_stime;
629#endif
631 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 630 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
632 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 631 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
633 unsigned long inblock, oublock, cinblock, coublock; 632 unsigned long inblock, oublock, cinblock, coublock;
@@ -1013,9 +1012,13 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1013 return to_cpumask(sd->span); 1012 return to_cpumask(sd->span);
1014} 1013}
1015 1014
1016extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 1015extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1017 struct sched_domain_attr *dattr_new); 1016 struct sched_domain_attr *dattr_new);
1018 1017
1018/* Allocate an array of sched domains, for partition_sched_domains(). */
1019cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1020void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1021
1019/* Test a flag in parent sched domain */ 1022/* Test a flag in parent sched domain */
1020static inline int test_sd_parent(struct sched_domain *sd, int flag) 1023static inline int test_sd_parent(struct sched_domain *sd, int flag)
1021{ 1024{
@@ -1033,7 +1036,7 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1033struct sched_domain_attr; 1036struct sched_domain_attr;
1034 1037
1035static inline void 1038static inline void
1036partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 1039partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1037 struct sched_domain_attr *dattr_new) 1040 struct sched_domain_attr *dattr_new)
1038{ 1041{
1039} 1042}
@@ -1331,7 +1334,9 @@ struct task_struct {
1331 1334
1332 cputime_t utime, stime, utimescaled, stimescaled; 1335 cputime_t utime, stime, utimescaled, stimescaled;
1333 cputime_t gtime; 1336 cputime_t gtime;
1337#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1334 cputime_t prev_utime, prev_stime; 1338 cputime_t prev_utime, prev_stime;
1339#endif
1335 unsigned long nvcsw, nivcsw; /* context switch counts */ 1340 unsigned long nvcsw, nivcsw; /* context switch counts */
1336 struct timespec start_time; /* monotonic time */ 1341 struct timespec start_time; /* monotonic time */
1337 struct timespec real_start_time; /* boot based time */ 1342 struct timespec real_start_time; /* boot based time */
@@ -1421,17 +1426,17 @@ struct task_struct {
1421#endif 1426#endif
1422#ifdef CONFIG_TRACE_IRQFLAGS 1427#ifdef CONFIG_TRACE_IRQFLAGS
1423 unsigned int irq_events; 1428 unsigned int irq_events;
1424 int hardirqs_enabled;
1425 unsigned long hardirq_enable_ip; 1429 unsigned long hardirq_enable_ip;
1426 unsigned int hardirq_enable_event;
1427 unsigned long hardirq_disable_ip; 1430 unsigned long hardirq_disable_ip;
1431 unsigned int hardirq_enable_event;
1428 unsigned int hardirq_disable_event; 1432 unsigned int hardirq_disable_event;
1429 int softirqs_enabled; 1433 int hardirqs_enabled;
1434 int hardirq_context;
1430 unsigned long softirq_disable_ip; 1435 unsigned long softirq_disable_ip;
1431 unsigned int softirq_disable_event;
1432 unsigned long softirq_enable_ip; 1436 unsigned long softirq_enable_ip;
1437 unsigned int softirq_disable_event;
1433 unsigned int softirq_enable_event; 1438 unsigned int softirq_enable_event;
1434 int hardirq_context; 1439 int softirqs_enabled;
1435 int softirq_context; 1440 int softirq_context;
1436#endif 1441#endif
1437#ifdef CONFIG_LOCKDEP 1442#ifdef CONFIG_LOCKDEP
@@ -1720,9 +1725,8 @@ static inline void put_task_struct(struct task_struct *t)
1720 __put_task_struct(t); 1725 __put_task_struct(t);
1721} 1726}
1722 1727
1723extern cputime_t task_utime(struct task_struct *p); 1728extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1724extern cputime_t task_stime(struct task_struct *p); 1729extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1725extern cputime_t task_gtime(struct task_struct *p);
1726 1730
1727/* 1731/*
1728 * Per process flags 1732 * Per process flags
@@ -2086,11 +2090,18 @@ static inline int is_si_special(const struct siginfo *info)
2086 return info <= SEND_SIG_FORCED; 2090 return info <= SEND_SIG_FORCED;
2087} 2091}
2088 2092
2089/* True if we are on the alternate signal stack. */ 2093/*
2090 2094 * True if we are on the alternate signal stack.
2095 */
2091static inline int on_sig_stack(unsigned long sp) 2096static inline int on_sig_stack(unsigned long sp)
2092{ 2097{
2093 return (sp - current->sas_ss_sp < current->sas_ss_size); 2098#ifdef CONFIG_STACK_GROWSUP
2099 return sp >= current->sas_ss_sp &&
2100 sp - current->sas_ss_sp < current->sas_ss_size;
2101#else
2102 return sp > current->sas_ss_sp &&
2103 sp - current->sas_ss_sp <= current->sas_ss_size;
2104#endif
2094} 2105}
2095 2106
2096static inline int sas_ss_flags(unsigned long sp) 2107static inline int sas_ss_flags(unsigned long sp)
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index d2c5ed845bcc..33406174cbe8 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -1,6 +1,15 @@
1#ifndef _LINUX_SECUREBITS_H 1#ifndef _LINUX_SECUREBITS_H
2#define _LINUX_SECUREBITS_H 1 2#define _LINUX_SECUREBITS_H 1
3 3
4/* Each securesetting is implemented using two bits. One bit specifies
5 whether the setting is on or off. The other bit specify whether the
6 setting is locked or not. A setting which is locked cannot be
7 changed from user-level. */
8#define issecure_mask(X) (1 << (X))
9#ifdef __KERNEL__
10#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
11#endif
12
4#define SECUREBITS_DEFAULT 0x00000000 13#define SECUREBITS_DEFAULT 0x00000000
5 14
6/* When set UID 0 has no special privileges. When unset, we support 15/* When set UID 0 has no special privileges. When unset, we support
@@ -12,6 +21,9 @@
12#define SECURE_NOROOT 0 21#define SECURE_NOROOT 0
13#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ 22#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */
14 23
24#define SECBIT_NOROOT (issecure_mask(SECURE_NOROOT))
25#define SECBIT_NOROOT_LOCKED (issecure_mask(SECURE_NOROOT_LOCKED))
26
15/* When set, setuid to/from uid 0 does not trigger capability-"fixup". 27/* When set, setuid to/from uid 0 does not trigger capability-"fixup".
16 When unset, to provide compatiblility with old programs relying on 28 When unset, to provide compatiblility with old programs relying on
17 set*uid to gain/lose privilege, transitions to/from uid 0 cause 29 set*uid to gain/lose privilege, transitions to/from uid 0 cause
@@ -19,6 +31,10 @@
19#define SECURE_NO_SETUID_FIXUP 2 31#define SECURE_NO_SETUID_FIXUP 2
20#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ 32#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */
21 33
34#define SECBIT_NO_SETUID_FIXUP (issecure_mask(SECURE_NO_SETUID_FIXUP))
35#define SECBIT_NO_SETUID_FIXUP_LOCKED \
36 (issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED))
37
22/* When set, a process can retain its capabilities even after 38/* When set, a process can retain its capabilities even after
23 transitioning to a non-root user (the set-uid fixup suppressed by 39 transitioning to a non-root user (the set-uid fixup suppressed by
24 bit 2). Bit-4 is cleared when a process calls exec(); setting both 40 bit 2). Bit-4 is cleared when a process calls exec(); setting both
@@ -27,12 +43,8 @@
27#define SECURE_KEEP_CAPS 4 43#define SECURE_KEEP_CAPS 4
28#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ 44#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */
29 45
30/* Each securesetting is implemented using two bits. One bit specifies 46#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS))
31 whether the setting is on or off. The other bit specify whether the 47#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED))
32 setting is locked or not. A setting which is locked cannot be
33 changed from user-level. */
34#define issecure_mask(X) (1 << (X))
35#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
36 48
37#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ 49#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
38 issecure_mask(SECURE_NO_SETUID_FIXUP) | \ 50 issecure_mask(SECURE_NO_SETUID_FIXUP) | \
diff --git a/include/linux/security.h b/include/linux/security.h
index 239e40d0450b..466cbadbd1ef 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -447,6 +447,22 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
447 * @new_dir contains the path structure for parent of the new link. 447 * @new_dir contains the path structure for parent of the new link.
448 * @new_dentry contains the dentry structure of the new link. 448 * @new_dentry contains the dentry structure of the new link.
449 * Return 0 if permission is granted. 449 * Return 0 if permission is granted.
450 * @path_chmod:
451 * Check for permission to change DAC's permission of a file or directory.
452 * @dentry contains the dentry structure.
453 * @mnt contains the vfsmnt structure.
454 * @mode contains DAC's mode.
455 * Return 0 if permission is granted.
456 * @path_chown:
457 * Check for permission to change owner/group of a file or directory.
458 * @path contains the path structure.
459 * @uid contains new owner's ID.
460 * @gid contains new group's ID.
461 * Return 0 if permission is granted.
462 * @path_chroot:
463 * Check for permission to change root directory.
464 * @path contains the path structure.
465 * Return 0 if permission is granted.
450 * @inode_readlink: 466 * @inode_readlink:
451 * Check the permission to read the symbolic link. 467 * Check the permission to read the symbolic link.
452 * @dentry contains the dentry structure for the file link. 468 * @dentry contains the dentry structure for the file link.
@@ -690,6 +706,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
690 * @kernel_module_request: 706 * @kernel_module_request:
691 * Ability to trigger the kernel to automatically upcall to userspace for 707 * Ability to trigger the kernel to automatically upcall to userspace for
692 * userspace to load a kernel module with the given name. 708 * userspace to load a kernel module with the given name.
709 * @kmod_name name of the module requested by the kernel
693 * Return 0 if successful. 710 * Return 0 if successful.
694 * @task_setuid: 711 * @task_setuid:
695 * Check permission before setting one or more of the user identity 712 * Check permission before setting one or more of the user identity
@@ -1488,6 +1505,10 @@ struct security_operations {
1488 struct dentry *new_dentry); 1505 struct dentry *new_dentry);
1489 int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, 1506 int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
1490 struct path *new_dir, struct dentry *new_dentry); 1507 struct path *new_dir, struct dentry *new_dentry);
1508 int (*path_chmod) (struct dentry *dentry, struct vfsmount *mnt,
1509 mode_t mode);
1510 int (*path_chown) (struct path *path, uid_t uid, gid_t gid);
1511 int (*path_chroot) (struct path *path);
1491#endif 1512#endif
1492 1513
1493 int (*inode_alloc_security) (struct inode *inode); 1514 int (*inode_alloc_security) (struct inode *inode);
@@ -1557,7 +1578,7 @@ struct security_operations {
1557 void (*cred_transfer)(struct cred *new, const struct cred *old); 1578 void (*cred_transfer)(struct cred *new, const struct cred *old);
1558 int (*kernel_act_as)(struct cred *new, u32 secid); 1579 int (*kernel_act_as)(struct cred *new, u32 secid);
1559 int (*kernel_create_files_as)(struct cred *new, struct inode *inode); 1580 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1560 int (*kernel_module_request)(void); 1581 int (*kernel_module_request)(char *kmod_name);
1561 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); 1582 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
1562 int (*task_fix_setuid) (struct cred *new, const struct cred *old, 1583 int (*task_fix_setuid) (struct cred *new, const struct cred *old,
1563 int flags); 1584 int flags);
@@ -1822,7 +1843,7 @@ void security_commit_creds(struct cred *new, const struct cred *old);
1822void security_transfer_creds(struct cred *new, const struct cred *old); 1843void security_transfer_creds(struct cred *new, const struct cred *old);
1823int security_kernel_act_as(struct cred *new, u32 secid); 1844int security_kernel_act_as(struct cred *new, u32 secid);
1824int security_kernel_create_files_as(struct cred *new, struct inode *inode); 1845int security_kernel_create_files_as(struct cred *new, struct inode *inode);
1825int security_kernel_module_request(void); 1846int security_kernel_module_request(char *kmod_name);
1826int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); 1847int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
1827int security_task_fix_setuid(struct cred *new, const struct cred *old, 1848int security_task_fix_setuid(struct cred *new, const struct cred *old,
1828 int flags); 1849 int flags);
@@ -2387,7 +2408,7 @@ static inline int security_kernel_create_files_as(struct cred *cred,
2387 return 0; 2408 return 0;
2388} 2409}
2389 2410
2390static inline int security_kernel_module_request(void) 2411static inline int security_kernel_module_request(char *kmod_name)
2391{ 2412{
2392 return 0; 2413 return 0;
2393} 2414}
@@ -2952,6 +2973,10 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
2952 struct dentry *new_dentry); 2973 struct dentry *new_dentry);
2953int security_path_rename(struct path *old_dir, struct dentry *old_dentry, 2974int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
2954 struct path *new_dir, struct dentry *new_dentry); 2975 struct path *new_dir, struct dentry *new_dentry);
2976int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
2977 mode_t mode);
2978int security_path_chown(struct path *path, uid_t uid, gid_t gid);
2979int security_path_chroot(struct path *path);
2955#else /* CONFIG_SECURITY_PATH */ 2980#else /* CONFIG_SECURITY_PATH */
2956static inline int security_path_unlink(struct path *dir, struct dentry *dentry) 2981static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
2957{ 2982{
@@ -3001,6 +3026,23 @@ static inline int security_path_rename(struct path *old_dir,
3001{ 3026{
3002 return 0; 3027 return 0;
3003} 3028}
3029
3030static inline int security_path_chmod(struct dentry *dentry,
3031 struct vfsmount *mnt,
3032 mode_t mode)
3033{
3034 return 0;
3035}
3036
3037static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid)
3038{
3039 return 0;
3040}
3041
3042static inline int security_path_chroot(struct path *path)
3043{
3044 return 0;
3045}
3004#endif /* CONFIG_SECURITY_PATH */ 3046#endif /* CONFIG_SECURITY_PATH */
3005 3047
3006#ifdef CONFIG_KEYS 3048#ifdef CONFIG_KEYS
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
index b65c8881f07a..13337bf6c3f5 100644
--- a/include/linux/slow-work.h
+++ b/include/linux/slow-work.h
@@ -17,13 +17,20 @@
17#ifdef CONFIG_SLOW_WORK 17#ifdef CONFIG_SLOW_WORK
18 18
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/timer.h>
20 21
21struct slow_work; 22struct slow_work;
23#ifdef CONFIG_SLOW_WORK_DEBUG
24struct seq_file;
25#endif
22 26
23/* 27/*
24 * The operations used to support slow work items 28 * The operations used to support slow work items
25 */ 29 */
26struct slow_work_ops { 30struct slow_work_ops {
31 /* owner */
32 struct module *owner;
33
27 /* get a ref on a work item 34 /* get a ref on a work item
28 * - return 0 if successful, -ve if not 35 * - return 0 if successful, -ve if not
29 */ 36 */
@@ -34,6 +41,11 @@ struct slow_work_ops {
34 41
35 /* execute a work item */ 42 /* execute a work item */
36 void (*execute)(struct slow_work *work); 43 void (*execute)(struct slow_work *work);
44
45#ifdef CONFIG_SLOW_WORK_DEBUG
46 /* describe a work item for debugfs */
47 void (*desc)(struct slow_work *work, struct seq_file *m);
48#endif
37}; 49};
38 50
39/* 51/*
@@ -42,13 +54,24 @@ struct slow_work_ops {
42 * queued 54 * queued
43 */ 55 */
44struct slow_work { 56struct slow_work {
57 struct module *owner; /* the owning module */
45 unsigned long flags; 58 unsigned long flags;
46#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ 59#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
47#define SLOW_WORK_EXECUTING 1 /* item currently executing */ 60#define SLOW_WORK_EXECUTING 1 /* item currently executing */
48#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ 61#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
49#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ 62#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
63#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
64#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
50 const struct slow_work_ops *ops; /* operations table for this item */ 65 const struct slow_work_ops *ops; /* operations table for this item */
51 struct list_head link; /* link in queue */ 66 struct list_head link; /* link in queue */
67#ifdef CONFIG_SLOW_WORK_DEBUG
68 struct timespec mark; /* jiffies at which queued or exec begun */
69#endif
70};
71
72struct delayed_slow_work {
73 struct slow_work work;
74 struct timer_list timer;
52}; 75};
53 76
54/** 77/**
@@ -67,6 +90,20 @@ static inline void slow_work_init(struct slow_work *work,
67} 90}
68 91
69/** 92/**
93 * slow_work_init - Initialise a delayed slow work item
94 * @work: The work item to initialise
95 * @ops: The operations to use to handle the slow work item
96 *
97 * Initialise a delayed slow work item.
98 */
99static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
100 const struct slow_work_ops *ops)
101{
102 init_timer(&dwork->timer);
103 slow_work_init(&dwork->work, ops);
104}
105
106/**
70 * vslow_work_init - Initialise a very slow work item 107 * vslow_work_init - Initialise a very slow work item
71 * @work: The work item to initialise 108 * @work: The work item to initialise
72 * @ops: The operations to use to handle the slow work item 109 * @ops: The operations to use to handle the slow work item
@@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work,
83 INIT_LIST_HEAD(&work->link); 120 INIT_LIST_HEAD(&work->link);
84} 121}
85 122
123/**
124 * slow_work_is_queued - Determine if a slow work item is on the work queue
125 * work: The work item to test
126 *
127 * Determine if the specified slow-work item is on the work queue. This
128 * returns true if it is actually on the queue.
129 *
130 * If the item is executing and has been marked for requeue when execution
131 * finishes, then false will be returned.
132 *
133 * Anyone wishing to wait for completion of execution can wait on the
134 * SLOW_WORK_EXECUTING bit.
135 */
136static inline bool slow_work_is_queued(struct slow_work *work)
137{
138 unsigned long flags = work->flags;
139 return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING);
140}
141
86extern int slow_work_enqueue(struct slow_work *work); 142extern int slow_work_enqueue(struct slow_work *work);
87extern int slow_work_register_user(void); 143extern void slow_work_cancel(struct slow_work *work);
88extern void slow_work_unregister_user(void); 144extern int slow_work_register_user(struct module *owner);
145extern void slow_work_unregister_user(struct module *owner);
146
147extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
148 unsigned long delay);
149
150static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
151{
152 slow_work_cancel(&dwork->work);
153}
154
155extern bool slow_work_sleep_till_thread_needed(struct slow_work *work,
156 signed long *_timeout);
89 157
90#ifdef CONFIG_SYSCTL 158#ifdef CONFIG_SYSCTL
91extern ctl_table slow_work_sysctls[]; 159extern ctl_table slow_work_sysctls[];
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 39c64bae776d..7a0570e6a596 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -76,6 +76,9 @@ void smp_call_function_many(const struct cpumask *mask,
76void __smp_call_function_single(int cpuid, struct call_single_data *data, 76void __smp_call_function_single(int cpuid, struct call_single_data *data,
77 int wait); 77 int wait);
78 78
79int smp_call_function_any(const struct cpumask *mask,
80 void (*func)(void *info), void *info, int wait);
81
79/* 82/*
80 * Generic and arch helpers 83 * Generic and arch helpers
81 */ 84 */
@@ -137,9 +140,15 @@ static inline void smp_send_reschedule(int cpu) { }
137#define smp_prepare_boot_cpu() do {} while (0) 140#define smp_prepare_boot_cpu() do {} while (0)
138#define smp_call_function_many(mask, func, info, wait) \ 141#define smp_call_function_many(mask, func, info, wait) \
139 (up_smp_call_function(func, info)) 142 (up_smp_call_function(func, info))
140static inline void init_call_single_data(void) 143static inline void init_call_single_data(void) { }
144
145static inline int
146smp_call_function_any(const struct cpumask *mask, void (*func)(void *info),
147 void *info, int wait)
141{ 148{
149 return smp_call_function_single(0, func, info, wait);
142} 150}
151
143#endif /* !SMP */ 152#endif /* !SMP */
144 153
145/* 154/*
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 813be59bf345..2ea1dd1ba21c 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
24 return 0; 24 return 0;
25} 25}
26 26
27extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); 27extern void __lockfunc
28extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); 28_lock_kernel(const char *func, const char *file, int line)
29__acquires(kernel_lock);
30
31extern void __lockfunc
32_unlock_kernel(const char *func, const char *file, int line)
33__releases(kernel_lock);
34
35#define lock_kernel() do { \
36 _lock_kernel(__func__, __FILE__, __LINE__); \
37} while (0)
38
39#define unlock_kernel() do { \
40 _unlock_kernel(__func__, __FILE__, __LINE__); \
41} while (0)
29 42
30/* 43/*
31 * Various legacy drivers don't really need the BKL in a specific 44 * Various legacy drivers don't really need the BKL in a specific
@@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void)
41 54
42#else 55#else
43 56
44#define lock_kernel() do { } while(0) 57#define lock_kernel()
45#define unlock_kernel() do { } while(0) 58#define unlock_kernel()
46#define release_kernel_lock(task) do { } while(0) 59#define release_kernel_lock(task) do { } while(0)
47#define cycle_kernel_lock() do { } while(0) 60#define cycle_kernel_lock() do { } while(0)
48#define reacquire_kernel_lock(task) 0 61#define reacquire_kernel_lock(task) 0
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index f0ca7a7a1757..71dccfeb0d88 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -79,8 +79,6 @@
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
83
84/* 82/*
85 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
86 */ 84 */
@@ -102,7 +100,7 @@ do { \
102 100
103#else 101#else
104# define spin_lock_init(lock) \ 102# define spin_lock_init(lock) \
105 do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) 103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
106#endif 104#endif
107 105
108#ifdef CONFIG_DEBUG_SPINLOCK 106#ifdef CONFIG_DEBUG_SPINLOCK
@@ -116,7 +114,7 @@ do { \
116} while (0) 114} while (0)
117#else 115#else
118# define rwlock_init(lock) \ 116# define rwlock_init(lock) \
119 do { *(lock) = RW_LOCK_UNLOCKED; } while (0) 117 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
120#endif 118#endif
121 119
122#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 7a7e18fc2415..8264a7f459bc 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,137 +60,118 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock); 61 __releases(lock);
62 62
63/* 63#ifdef CONFIG_INLINE_SPIN_LOCK
64 * We inline the unlock functions in the nondebug case:
65 */
66#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
67#define __always_inline__spin_unlock
68#define __always_inline__read_unlock
69#define __always_inline__write_unlock
70#define __always_inline__spin_unlock_irq
71#define __always_inline__read_unlock_irq
72#define __always_inline__write_unlock_irq
73#endif
74
75#ifndef CONFIG_DEBUG_SPINLOCK
76#ifndef CONFIG_GENERIC_LOCKBREAK
77
78#ifdef __always_inline__spin_lock
79#define _spin_lock(lock) __spin_lock(lock) 64#define _spin_lock(lock) __spin_lock(lock)
80#endif 65#endif
81 66
82#ifdef __always_inline__read_lock 67#ifdef CONFIG_INLINE_READ_LOCK
83#define _read_lock(lock) __read_lock(lock) 68#define _read_lock(lock) __read_lock(lock)
84#endif 69#endif
85 70
86#ifdef __always_inline__write_lock 71#ifdef CONFIG_INLINE_WRITE_LOCK
87#define _write_lock(lock) __write_lock(lock) 72#define _write_lock(lock) __write_lock(lock)
88#endif 73#endif
89 74
90#ifdef __always_inline__spin_lock_bh 75#ifdef CONFIG_INLINE_SPIN_LOCK_BH
91#define _spin_lock_bh(lock) __spin_lock_bh(lock) 76#define _spin_lock_bh(lock) __spin_lock_bh(lock)
92#endif 77#endif
93 78
94#ifdef __always_inline__read_lock_bh 79#ifdef CONFIG_INLINE_READ_LOCK_BH
95#define _read_lock_bh(lock) __read_lock_bh(lock) 80#define _read_lock_bh(lock) __read_lock_bh(lock)
96#endif 81#endif
97 82
98#ifdef __always_inline__write_lock_bh 83#ifdef CONFIG_INLINE_WRITE_LOCK_BH
99#define _write_lock_bh(lock) __write_lock_bh(lock) 84#define _write_lock_bh(lock) __write_lock_bh(lock)
100#endif 85#endif
101 86
102#ifdef __always_inline__spin_lock_irq 87#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
103#define _spin_lock_irq(lock) __spin_lock_irq(lock) 88#define _spin_lock_irq(lock) __spin_lock_irq(lock)
104#endif 89#endif
105 90
106#ifdef __always_inline__read_lock_irq 91#ifdef CONFIG_INLINE_READ_LOCK_IRQ
107#define _read_lock_irq(lock) __read_lock_irq(lock) 92#define _read_lock_irq(lock) __read_lock_irq(lock)
108#endif 93#endif
109 94
110#ifdef __always_inline__write_lock_irq 95#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
111#define _write_lock_irq(lock) __write_lock_irq(lock) 96#define _write_lock_irq(lock) __write_lock_irq(lock)
112#endif 97#endif
113 98
114#ifdef __always_inline__spin_lock_irqsave 99#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
115#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 100#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
116#endif 101#endif
117 102
118#ifdef __always_inline__read_lock_irqsave 103#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
119#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) 104#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
120#endif 105#endif
121 106
122#ifdef __always_inline__write_lock_irqsave 107#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
123#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) 108#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
124#endif 109#endif
125 110
126#endif /* !CONFIG_GENERIC_LOCKBREAK */ 111#ifdef CONFIG_INLINE_SPIN_TRYLOCK
127
128#ifdef __always_inline__spin_trylock
129#define _spin_trylock(lock) __spin_trylock(lock) 112#define _spin_trylock(lock) __spin_trylock(lock)
130#endif 113#endif
131 114
132#ifdef __always_inline__read_trylock 115#ifdef CONFIG_INLINE_READ_TRYLOCK
133#define _read_trylock(lock) __read_trylock(lock) 116#define _read_trylock(lock) __read_trylock(lock)
134#endif 117#endif
135 118
136#ifdef __always_inline__write_trylock 119#ifdef CONFIG_INLINE_WRITE_TRYLOCK
137#define _write_trylock(lock) __write_trylock(lock) 120#define _write_trylock(lock) __write_trylock(lock)
138#endif 121#endif
139 122
140#ifdef __always_inline__spin_trylock_bh 123#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
141#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 124#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
142#endif 125#endif
143 126
144#ifdef __always_inline__spin_unlock 127#ifdef CONFIG_INLINE_SPIN_UNLOCK
145#define _spin_unlock(lock) __spin_unlock(lock) 128#define _spin_unlock(lock) __spin_unlock(lock)
146#endif 129#endif
147 130
148#ifdef __always_inline__read_unlock 131#ifdef CONFIG_INLINE_READ_UNLOCK
149#define _read_unlock(lock) __read_unlock(lock) 132#define _read_unlock(lock) __read_unlock(lock)
150#endif 133#endif
151 134
152#ifdef __always_inline__write_unlock 135#ifdef CONFIG_INLINE_WRITE_UNLOCK
153#define _write_unlock(lock) __write_unlock(lock) 136#define _write_unlock(lock) __write_unlock(lock)
154#endif 137#endif
155 138
156#ifdef __always_inline__spin_unlock_bh 139#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
157#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 140#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
158#endif 141#endif
159 142
160#ifdef __always_inline__read_unlock_bh 143#ifdef CONFIG_INLINE_READ_UNLOCK_BH
161#define _read_unlock_bh(lock) __read_unlock_bh(lock) 144#define _read_unlock_bh(lock) __read_unlock_bh(lock)
162#endif 145#endif
163 146
164#ifdef __always_inline__write_unlock_bh 147#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
165#define _write_unlock_bh(lock) __write_unlock_bh(lock) 148#define _write_unlock_bh(lock) __write_unlock_bh(lock)
166#endif 149#endif
167 150
168#ifdef __always_inline__spin_unlock_irq 151#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
169#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 152#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
170#endif 153#endif
171 154
172#ifdef __always_inline__read_unlock_irq 155#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
173#define _read_unlock_irq(lock) __read_unlock_irq(lock) 156#define _read_unlock_irq(lock) __read_unlock_irq(lock)
174#endif 157#endif
175 158
176#ifdef __always_inline__write_unlock_irq 159#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
177#define _write_unlock_irq(lock) __write_unlock_irq(lock) 160#define _write_unlock_irq(lock) __write_unlock_irq(lock)
178#endif 161#endif
179 162
180#ifdef __always_inline__spin_unlock_irqrestore 163#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
181#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 164#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
182#endif 165#endif
183 166
184#ifdef __always_inline__read_unlock_irqrestore 167#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
185#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) 168#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
186#endif 169#endif
187 170
188#ifdef __always_inline__write_unlock_irqrestore 171#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
189#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) 172#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
190#endif 173#endif
191 174
192#endif /* CONFIG_DEBUG_SPINLOCK */
193
194static inline int __spin_trylock(spinlock_t *lock) 175static inline int __spin_trylock(spinlock_t *lock)
195{ 176{
196 preempt_disable(); 177 preempt_disable();
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index aca0eee53930..4765d97dcafb 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -48,6 +48,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp);
48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
50void synchronize_srcu(struct srcu_struct *sp); 50void synchronize_srcu(struct srcu_struct *sp);
51void synchronize_srcu_expedited(struct srcu_struct *sp);
51long srcu_batches_completed(struct srcu_struct *sp); 52long srcu_batches_completed(struct srcu_struct *sp);
52 53
53#endif 54#endif
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 73b1f1cec423..febedcf67c7e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -7,6 +7,8 @@ struct device;
7struct dma_attrs; 7struct dma_attrs;
8struct scatterlist; 8struct scatterlist;
9 9
10extern int swiotlb_force;
11
10/* 12/*
11 * Maximum allowable number of contiguous slabs to map, 13 * Maximum allowable number of contiguous slabs to map,
12 * must be a power of 2. What is the appropriate value ? 14 * must be a power of 2. What is the appropriate value ?
@@ -20,8 +22,7 @@ struct scatterlist;
20 */ 22 */
21#define IO_TLB_SHIFT 11 23#define IO_TLB_SHIFT 11
22 24
23extern void 25extern void swiotlb_init(int verbose);
24swiotlb_init(void);
25 26
26extern void 27extern void
27*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 28*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -88,4 +89,11 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
88extern int 89extern int
89swiotlb_dma_supported(struct device *hwdev, u64 mask); 90swiotlb_dma_supported(struct device *hwdev, u64 mask);
90 91
92#ifdef CONFIG_SWIOTLB
93extern void __init swiotlb_free(void);
94#else
95static inline void swiotlb_free(void) { }
96#endif
97
98extern void swiotlb_print_info(void);
91#endif /* __LINUX_SWIOTLB_H */ 99#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a990ace1a838..e79e2f3ccc51 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -99,37 +99,16 @@ struct perf_event_attr;
99#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 99#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
100 100
101#ifdef CONFIG_EVENT_PROFILE 101#ifdef CONFIG_EVENT_PROFILE
102#define TRACE_SYS_ENTER_PROFILE(sname) \
103static int prof_sysenter_enable_##sname(void) \
104{ \
105 return reg_prof_syscall_enter("sys"#sname); \
106} \
107 \
108static void prof_sysenter_disable_##sname(void) \
109{ \
110 unreg_prof_syscall_enter("sys"#sname); \
111}
112
113#define TRACE_SYS_EXIT_PROFILE(sname) \
114static int prof_sysexit_enable_##sname(void) \
115{ \
116 return reg_prof_syscall_exit("sys"#sname); \
117} \
118 \
119static void prof_sysexit_disable_##sname(void) \
120{ \
121 unreg_prof_syscall_exit("sys"#sname); \
122}
123 102
124#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 103#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
125 .profile_count = ATOMIC_INIT(-1), \ 104 .profile_count = ATOMIC_INIT(-1), \
126 .profile_enable = prof_sysenter_enable_##sname, \ 105 .profile_enable = prof_sysenter_enable, \
127 .profile_disable = prof_sysenter_disable_##sname, 106 .profile_disable = prof_sysenter_disable,
128 107
129#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ 108#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
130 .profile_count = ATOMIC_INIT(-1), \ 109 .profile_count = ATOMIC_INIT(-1), \
131 .profile_enable = prof_sysexit_enable_##sname, \ 110 .profile_enable = prof_sysexit_enable, \
132 .profile_disable = prof_sysexit_disable_##sname, 111 .profile_disable = prof_sysexit_disable,
133#else 112#else
134#define TRACE_SYS_ENTER_PROFILE(sname) 113#define TRACE_SYS_ENTER_PROFILE(sname)
135#define TRACE_SYS_ENTER_PROFILE_INIT(sname) 114#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
@@ -153,74 +132,46 @@ static void prof_sysexit_disable_##sname(void) \
153#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) 132#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
154 133
155#define SYSCALL_TRACE_ENTER_EVENT(sname) \ 134#define SYSCALL_TRACE_ENTER_EVENT(sname) \
135 static const struct syscall_metadata __syscall_meta_##sname; \
156 static struct ftrace_event_call event_enter_##sname; \ 136 static struct ftrace_event_call event_enter_##sname; \
157 struct trace_event enter_syscall_print_##sname = { \ 137 static struct trace_event enter_syscall_print_##sname = { \
158 .trace = print_syscall_enter, \ 138 .trace = print_syscall_enter, \
159 }; \ 139 }; \
160 static int init_enter_##sname(void) \
161 { \
162 int num, id; \
163 num = syscall_name_to_nr("sys"#sname); \
164 if (num < 0) \
165 return -ENOSYS; \
166 id = register_ftrace_event(&enter_syscall_print_##sname);\
167 if (!id) \
168 return -ENODEV; \
169 event_enter_##sname.id = id; \
170 set_syscall_enter_id(num, id); \
171 INIT_LIST_HEAD(&event_enter_##sname.fields); \
172 return 0; \
173 } \
174 TRACE_SYS_ENTER_PROFILE(sname); \
175 static struct ftrace_event_call __used \ 140 static struct ftrace_event_call __used \
176 __attribute__((__aligned__(4))) \ 141 __attribute__((__aligned__(4))) \
177 __attribute__((section("_ftrace_events"))) \ 142 __attribute__((section("_ftrace_events"))) \
178 event_enter_##sname = { \ 143 event_enter_##sname = { \
179 .name = "sys_enter"#sname, \ 144 .name = "sys_enter"#sname, \
180 .system = "syscalls", \ 145 .system = "syscalls", \
181 .event = &event_syscall_enter, \ 146 .event = &enter_syscall_print_##sname, \
182 .raw_init = init_enter_##sname, \ 147 .raw_init = init_syscall_trace, \
183 .show_format = syscall_enter_format, \ 148 .show_format = syscall_enter_format, \
184 .define_fields = syscall_enter_define_fields, \ 149 .define_fields = syscall_enter_define_fields, \
185 .regfunc = reg_event_syscall_enter, \ 150 .regfunc = reg_event_syscall_enter, \
186 .unregfunc = unreg_event_syscall_enter, \ 151 .unregfunc = unreg_event_syscall_enter, \
187 .data = "sys"#sname, \ 152 .data = (void *)&__syscall_meta_##sname,\
188 TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 153 TRACE_SYS_ENTER_PROFILE_INIT(sname) \
189 } 154 }
190 155
191#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 156#define SYSCALL_TRACE_EXIT_EVENT(sname) \
157 static const struct syscall_metadata __syscall_meta_##sname; \
192 static struct ftrace_event_call event_exit_##sname; \ 158 static struct ftrace_event_call event_exit_##sname; \
193 struct trace_event exit_syscall_print_##sname = { \ 159 static struct trace_event exit_syscall_print_##sname = { \
194 .trace = print_syscall_exit, \ 160 .trace = print_syscall_exit, \
195 }; \ 161 }; \
196 static int init_exit_##sname(void) \
197 { \
198 int num, id; \
199 num = syscall_name_to_nr("sys"#sname); \
200 if (num < 0) \
201 return -ENOSYS; \
202 id = register_ftrace_event(&exit_syscall_print_##sname);\
203 if (!id) \
204 return -ENODEV; \
205 event_exit_##sname.id = id; \
206 set_syscall_exit_id(num, id); \
207 INIT_LIST_HEAD(&event_exit_##sname.fields); \
208 return 0; \
209 } \
210 TRACE_SYS_EXIT_PROFILE(sname); \
211 static struct ftrace_event_call __used \ 162 static struct ftrace_event_call __used \
212 __attribute__((__aligned__(4))) \ 163 __attribute__((__aligned__(4))) \
213 __attribute__((section("_ftrace_events"))) \ 164 __attribute__((section("_ftrace_events"))) \
214 event_exit_##sname = { \ 165 event_exit_##sname = { \
215 .name = "sys_exit"#sname, \ 166 .name = "sys_exit"#sname, \
216 .system = "syscalls", \ 167 .system = "syscalls", \
217 .event = &event_syscall_exit, \ 168 .event = &exit_syscall_print_##sname, \
218 .raw_init = init_exit_##sname, \ 169 .raw_init = init_syscall_trace, \
219 .show_format = syscall_exit_format, \ 170 .show_format = syscall_exit_format, \
220 .define_fields = syscall_exit_define_fields, \ 171 .define_fields = syscall_exit_define_fields, \
221 .regfunc = reg_event_syscall_exit, \ 172 .regfunc = reg_event_syscall_exit, \
222 .unregfunc = unreg_event_syscall_exit, \ 173 .unregfunc = unreg_event_syscall_exit, \
223 .data = "sys"#sname, \ 174 .data = (void *)&__syscall_meta_##sname,\
224 TRACE_SYS_EXIT_PROFILE_INIT(sname) \ 175 TRACE_SYS_EXIT_PROFILE_INIT(sname) \
225 } 176 }
226 177
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 1e4743ee6831..c83a86a22381 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -15,9 +15,6 @@
15 ** The kernel will then return -ENOTDIR to any application using 15 ** The kernel will then return -ENOTDIR to any application using
16 ** the old binary interface. 16 ** the old binary interface.
17 ** 17 **
18 ** For new interfaces unless you really need a binary number
19 ** please use CTL_UNNUMBERED.
20 **
21 **************************************************************** 18 ****************************************************************
22 **************************************************************** 19 ****************************************************************
23 */ 20 */
@@ -50,12 +47,6 @@ struct __sysctl_args {
50 47
51/* Top-level names: */ 48/* Top-level names: */
52 49
53/* For internal pattern-matching use only: */
54#ifdef __KERNEL__
55#define CTL_NONE 0
56#define CTL_UNNUMBERED CTL_NONE /* sysctl without a binary number */
57#endif
58
59enum 50enum
60{ 51{
61 CTL_KERN=1, /* General kernel info and control */ 52 CTL_KERN=1, /* General kernel info and control */
@@ -972,10 +963,6 @@ extern int sysctl_perm(struct ctl_table_root *root,
972 963
973typedef struct ctl_table ctl_table; 964typedef struct ctl_table ctl_table;
974 965
975typedef int ctl_handler (struct ctl_table *table,
976 void __user *oldval, size_t __user *oldlenp,
977 void __user *newval, size_t newlen);
978
979typedef int proc_handler (struct ctl_table *ctl, int write, 966typedef int proc_handler (struct ctl_table *ctl, int write,
980 void __user *buffer, size_t *lenp, loff_t *ppos); 967 void __user *buffer, size_t *lenp, loff_t *ppos);
981 968
@@ -996,21 +983,10 @@ extern int proc_doulongvec_minmax(struct ctl_table *, int,
996extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, 983extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
997 void __user *, size_t *, loff_t *); 984 void __user *, size_t *, loff_t *);
998 985
999extern int do_sysctl (int __user *name, int nlen,
1000 void __user *oldval, size_t __user *oldlenp,
1001 void __user *newval, size_t newlen);
1002
1003extern ctl_handler sysctl_data;
1004extern ctl_handler sysctl_string;
1005extern ctl_handler sysctl_intvec;
1006extern ctl_handler sysctl_jiffies;
1007extern ctl_handler sysctl_ms_jiffies;
1008
1009
1010/* 986/*
1011 * Register a set of sysctl names by calling register_sysctl_table 987 * Register a set of sysctl names by calling register_sysctl_table
1012 * with an initialised array of struct ctl_table's. An entry with zero 988 * with an initialised array of struct ctl_table's. An entry with
1013 * ctl_name and NULL procname terminates the table. table->de will be 989 * NULL procname terminates the table. table->de will be
1014 * set up by the registration and need not be initialised in advance. 990 * set up by the registration and need not be initialised in advance.
1015 * 991 *
1016 * sysctl names can be mirrored automatically under /proc/sys. The 992 * sysctl names can be mirrored automatically under /proc/sys. The
@@ -1023,24 +999,11 @@ extern ctl_handler sysctl_ms_jiffies;
1023 * under /proc; non-leaf nodes will be represented by directories. A 999 * under /proc; non-leaf nodes will be represented by directories. A
1024 * null procname disables /proc mirroring at this node. 1000 * null procname disables /proc mirroring at this node.
1025 * 1001 *
1026 * sysctl entries with a zero ctl_name will not be available through
1027 * the binary sysctl interface.
1028 *
1029 * sysctl(2) can automatically manage read and write requests through 1002 * sysctl(2) can automatically manage read and write requests through
1030 * the sysctl table. The data and maxlen fields of the ctl_table 1003 * the sysctl table. The data and maxlen fields of the ctl_table
1031 * struct enable minimal validation of the values being written to be 1004 * struct enable minimal validation of the values being written to be
1032 * performed, and the mode field allows minimal authentication. 1005 * performed, and the mode field allows minimal authentication.
1033 * 1006 *
1034 * More sophisticated management can be enabled by the provision of a
1035 * strategy routine with the table entry. This will be called before
1036 * any automatic read or write of the data is performed.
1037 *
1038 * The strategy routine may return:
1039 * <0: Error occurred (error is passed to user process)
1040 * 0: OK - proceed with automatic read or write.
1041 * >0: OK - read or write has been done by the strategy routine, so
1042 * return immediately.
1043 *
1044 * There must be a proc_handler routine for any terminal nodes 1007 * There must be a proc_handler routine for any terminal nodes
1045 * mirrored under /proc/sys (non-terminals are handled by a built-in 1008 * mirrored under /proc/sys (non-terminals are handled by a built-in
1046 * directory handler). Several default handlers are available to 1009 * directory handler). Several default handlers are available to
@@ -1050,7 +1013,6 @@ extern ctl_handler sysctl_ms_jiffies;
1050/* A sysctl table is an array of struct ctl_table: */ 1013/* A sysctl table is an array of struct ctl_table: */
1051struct ctl_table 1014struct ctl_table
1052{ 1015{
1053 int ctl_name; /* Binary ID */
1054 const char *procname; /* Text ID for /proc/sys, or zero */ 1016 const char *procname; /* Text ID for /proc/sys, or zero */
1055 void *data; 1017 void *data;
1056 int maxlen; 1018 int maxlen;
@@ -1058,7 +1020,6 @@ struct ctl_table
1058 struct ctl_table *child; 1020 struct ctl_table *child;
1059 struct ctl_table *parent; /* Automatically set */ 1021 struct ctl_table *parent; /* Automatically set */
1060 proc_handler *proc_handler; /* Callback for text formatting */ 1022 proc_handler *proc_handler; /* Callback for text formatting */
1061 ctl_handler *strategy; /* Callback function for all r/w */
1062 void *extra1; 1023 void *extra1;
1063 void *extra2; 1024 void *extra2;
1064}; 1025};
@@ -1092,7 +1053,6 @@ struct ctl_table_header
1092/* struct ctl_path describes where in the hierarchy a table is added */ 1053/* struct ctl_path describes where in the hierarchy a table is added */
1093struct ctl_path { 1054struct ctl_path {
1094 const char *procname; 1055 const char *procname;
1095 int ctl_name;
1096}; 1056};
1097 1057
1098void register_sysctl_root(struct ctl_table_root *root); 1058void register_sysctl_root(struct ctl_table_root *root);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 3338b3f5c21a..ac5d1c1285d9 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -27,9 +27,16 @@
27 */ 27 */
28#define TPM_ANY_NUM 0xFFFF 28#define TPM_ANY_NUM 0xFFFF
29 29
30#if defined(CONFIG_TCG_TPM) 30#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
31 31
32extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); 32extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
33extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); 33extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
34#else
35static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
36 return -ENODEV;
37}
38static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
39 return -ENODEV;
40}
34#endif 41#endif
35#endif 42#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 2aac8a83e89b..f59604ed0ec6 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -280,6 +280,12 @@ static inline void tracepoint_synchronize_unregister(void)
280 * TRACE_EVENT_FN to perform any (un)registration work. 280 * TRACE_EVENT_FN to perform any (un)registration work.
281 */ 281 */
282 282
283#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)
284#define DEFINE_EVENT(template, name, proto, args) \
285 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
286#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
287 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
288
283#define TRACE_EVENT(name, proto, args, struct, assign, print) \ 289#define TRACE_EVENT(name, proto, args, struct, assign, print) \
284 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 290 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
285#define TRACE_EVENT_FN(name, proto, args, struct, \ 291#define TRACE_EVENT_FN(name, proto, args, struct, \