aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/cputime.h3
-rw-r--r--include/asm-generic/fcntl.h4
-rw-r--r--include/asm-generic/futex.h7
-rw-r--r--include/asm-generic/sections.h1
-rw-r--r--include/asm-generic/unistd.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/debugobjects.h5
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/exportfs.h9
-rw-r--r--include/linux/fcntl.h1
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/fs.h19
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/hrtimer.h24
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/interrupt.h85
-rw-r--r--include/linux/irq.h368
-rw-r--r--include/linux/irqdesc.h78
-rw-r--r--include/linux/jiffies.h1
-rw-r--r--include/linux/kthread.h2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/namei.h7
-rw-r--r--include/linux/of.h16
-rw-r--r--include/linux/of_pci.h9
-rw-r--r--include/linux/perf_event.h50
-rw-r--r--include/linux/plist.h47
-rw-r--r--include/linux/posix-clock.h150
-rw-r--r--include/linux/posix-timers.h44
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rtc.h5
-rw-r--r--include/linux/rwlock_types.h8
-rw-r--r--include/linux/rwsem-spinlock.h31
-rw-r--r--include/linux/rwsem.h53
-rw-r--r--include/linux/sched.h22
-rw-r--r--include/linux/security.h9
-rw-r--r--include/linux/spinlock_types.h8
-rw-r--r--include/linux/syscalls.h20
-rw-r--r--include/linux/thread_info.h3
-rw-r--r--include/linux/time.h14
-rw-r--r--include/linux/timex.h3
-rw-r--r--include/trace/events/mce.h8
-rw-r--r--include/trace/events/module.h5
-rw-r--r--include/trace/events/skb.h4
-rw-r--r--include/xen/events.h8
-rw-r--r--include/xen/interface/io/blkif.h37
-rw-r--r--include/xen/interface/xen.h4
-rw-r--r--include/xen/xen-ops.h6
50 files changed, 930 insertions, 287 deletions
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 2bcc5c7c22a6..61e03dd7939e 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -30,6 +30,9 @@ typedef u64 cputime64_t;
30#define cputime64_to_jiffies64(__ct) (__ct) 30#define cputime64_to_jiffies64(__ct) (__ct)
31#define jiffies64_to_cputime64(__jif) (__jif) 31#define jiffies64_to_cputime64(__jif) (__jif)
32#define cputime_to_cputime64(__ct) ((u64) __ct) 32#define cputime_to_cputime64(__ct) ((u64) __ct)
33#define cputime64_gt(__a, __b) ((__a) > (__b))
34
35#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct)
33 36
34 37
35/* 38/*
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index 0fc16e3f0bfc..84793c7025e2 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -80,6 +80,10 @@
80#define O_SYNC (__O_SYNC|O_DSYNC) 80#define O_SYNC (__O_SYNC|O_DSYNC)
81#endif 81#endif
82 82
83#ifndef O_PATH
84#define O_PATH 010000000
85#endif
86
83#ifndef O_NDELAY 87#ifndef O_NDELAY
84#define O_NDELAY O_NONBLOCK 88#define O_NDELAY O_NONBLOCK
85#endif 89#endif
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 3c2344f48136..01f227e14254 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -6,7 +6,7 @@
6#include <asm/errno.h> 6#include <asm/errno.h>
7 7
8static inline int 8static inline int
9futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 9futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
10{ 10{
11 int op = (encoded_op >> 28) & 7; 11 int op = (encoded_op >> 28) & 7;
12 int cmp = (encoded_op >> 24) & 15; 12 int cmp = (encoded_op >> 24) & 15;
@@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
16 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 16 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
17 oparg = 1 << oparg; 17 oparg = 1 << oparg;
18 18
19 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 19 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
20 return -EFAULT; 20 return -EFAULT;
21 21
22 pagefault_disable(); 22 pagefault_disable();
@@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
48} 48}
49 49
50static inline int 50static inline int
51futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 51futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
52 u32 oldval, u32 newval)
52{ 53{
53 return -ENOSYS; 54 return -ENOSYS;
54} 55}
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index b3bfabc258f3..c1a1216e29ce 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -11,6 +11,7 @@ extern char _sinittext[], _einittext[];
11extern char _end[]; 11extern char _end[];
12extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; 12extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
13extern char __kprobes_text_start[], __kprobes_text_end[]; 13extern char __kprobes_text_start[], __kprobes_text_end[];
14extern char __entry_text_start[], __entry_text_end[];
14extern char __initdata_begin[], __initdata_end[]; 15extern char __initdata_begin[], __initdata_end[];
15extern char __start_rodata[], __end_rodata[]; 16extern char __start_rodata[], __end_rodata[];
16 17
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index b969770196c2..57af0338d270 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -646,9 +646,13 @@ __SYSCALL(__NR_prlimit64, sys_prlimit64)
646__SYSCALL(__NR_fanotify_init, sys_fanotify_init) 646__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
647#define __NR_fanotify_mark 263 647#define __NR_fanotify_mark 263
648__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) 648__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
649#define __NR_name_to_handle_at 264
650__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
651#define __NR_open_by_handle_at 265
652__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
649 653
650#undef __NR_syscalls 654#undef __NR_syscalls
651#define __NR_syscalls 264 655#define __NR_syscalls 266
652 656
653/* 657/*
654 * All syscalls below here should go away really, 658 * All syscalls below here should go away really,
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index fe77e3395b40..906c3ceca9a2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -424,6 +424,12 @@
424 *(.kprobes.text) \ 424 *(.kprobes.text) \
425 VMLINUX_SYMBOL(__kprobes_text_end) = .; 425 VMLINUX_SYMBOL(__kprobes_text_end) = .;
426 426
427#define ENTRY_TEXT \
428 ALIGN_FUNCTION(); \
429 VMLINUX_SYMBOL(__entry_text_start) = .; \
430 *(.entry.text) \
431 VMLINUX_SYMBOL(__entry_text_end) = .;
432
427#ifdef CONFIG_FUNCTION_GRAPH_TRACER 433#ifdef CONFIG_FUNCTION_GRAPH_TRACER
428#define IRQENTRY_TEXT \ 434#define IRQENTRY_TEXT \
429 ALIGN_FUNCTION(); \ 435 ALIGN_FUNCTION(); \
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ce104e33cd22..e654fa239916 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -474,7 +474,8 @@ struct cgroup_subsys {
474 struct cgroup *old_cgrp, struct task_struct *tsk, 474 struct cgroup *old_cgrp, struct task_struct *tsk,
475 bool threadgroup); 475 bool threadgroup);
476 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); 476 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
477 void (*exit)(struct cgroup_subsys *ss, struct task_struct *task); 477 void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
478 struct cgroup *old_cgrp, struct task_struct *task);
478 int (*populate)(struct cgroup_subsys *ss, 479 int (*populate)(struct cgroup_subsys *ss,
479 struct cgroup *cgrp); 480 struct cgroup *cgrp);
480 void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp); 481 void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
@@ -626,6 +627,7 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg,
626/* Get id and depth of css */ 627/* Get id and depth of css */
627unsigned short css_id(struct cgroup_subsys_state *css); 628unsigned short css_id(struct cgroup_subsys_state *css);
628unsigned short css_depth(struct cgroup_subsys_state *css); 629unsigned short css_depth(struct cgroup_subsys_state *css);
630struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
629 631
630#else /* !CONFIG_CGROUPS */ 632#else /* !CONFIG_CGROUPS */
631 633
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index ccefff02b6cb..cdbfcb8780ec 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -65,4 +65,8 @@ SUBSYS(net_cls)
65SUBSYS(blkio) 65SUBSYS(blkio)
66#endif 66#endif
67 67
68#ifdef CONFIG_CGROUP_PERF
69SUBSYS(perf)
70#endif
71
68/* */ 72/* */
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 597692f1fc8d..65970b811e22 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -34,7 +34,10 @@ struct debug_obj {
34 34
35/** 35/**
36 * struct debug_obj_descr - object type specific debug description structure 36 * struct debug_obj_descr - object type specific debug description structure
37 *
37 * @name: name of the object typee 38 * @name: name of the object typee
39 * @debug_hint: function returning address, which have associated
40 * kernel symbol, to allow identify the object
38 * @fixup_init: fixup function, which is called when the init check 41 * @fixup_init: fixup function, which is called when the init check
39 * fails 42 * fails
40 * @fixup_activate: fixup function, which is called when the activate check 43 * @fixup_activate: fixup function, which is called when the activate check
@@ -46,7 +49,7 @@ struct debug_obj {
46 */ 49 */
47struct debug_obj_descr { 50struct debug_obj_descr {
48 const char *name; 51 const char *name;
49 52 void *(*debug_hint) (void *addr);
50 int (*fixup_init) (void *addr, enum debug_obj_state state); 53 int (*fixup_init) (void *addr, enum debug_obj_state state);
51 int (*fixup_activate) (void *addr, enum debug_obj_state state); 54 int (*fixup_activate) (void *addr, enum debug_obj_state state);
52 int (*fixup_destroy) (void *addr, enum debug_obj_state state); 55 int (*fixup_destroy) (void *addr, enum debug_obj_state state);
diff --git a/include/linux/device.h b/include/linux/device.h
index 1bf5cf0b4513..ca5d25225aab 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -128,9 +128,7 @@ struct device_driver {
128 128
129 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ 129 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
130 130
131#if defined(CONFIG_OF)
132 const struct of_device_id *of_match_table; 131 const struct of_device_id *of_match_table;
133#endif
134 132
135 int (*probe) (struct device *dev); 133 int (*probe) (struct device *dev);
136 int (*remove) (struct device *dev); 134 int (*remove) (struct device *dev);
@@ -441,9 +439,8 @@ struct device {
441 override */ 439 override */
442 /* arch specific additions */ 440 /* arch specific additions */
443 struct dev_archdata archdata; 441 struct dev_archdata archdata;
444#ifdef CONFIG_OF 442
445 struct device_node *of_node; 443 struct device_node *of_node; /* associated device tree node */
446#endif
447 444
448 dev_t devt; /* dev_t, creates the sysfs "dev" */ 445 dev_t devt; /* dev_t, creates the sysfs "dev" */
449 446
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 28028988c862..33a42f24b275 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -8,6 +8,9 @@ struct inode;
8struct super_block; 8struct super_block;
9struct vfsmount; 9struct vfsmount;
10 10
11/* limit the handle size to NFSv4 handle size now */
12#define MAX_HANDLE_SZ 128
13
11/* 14/*
12 * The fileid_type identifies how the file within the filesystem is encoded. 15 * The fileid_type identifies how the file within the filesystem is encoded.
13 * In theory this is freely set and parsed by the filesystem, but we try to 16 * In theory this is freely set and parsed by the filesystem, but we try to
@@ -121,8 +124,10 @@ struct fid {
121 * set, the encode_fh() should store sufficient information so that a good 124 * set, the encode_fh() should store sufficient information so that a good
122 * attempt can be made to find not only the file but also it's place in the 125 * attempt can be made to find not only the file but also it's place in the
123 * filesystem. This typically means storing a reference to de->d_parent in 126 * filesystem. This typically means storing a reference to de->d_parent in
124 * the filehandle fragment. encode_fh() should return the number of bytes 127 * the filehandle fragment. encode_fh() should return the fileid_type on
125 * stored or a negative error code such as %-ENOSPC 128 * success and on error returns 255 (if the space needed to encode fh is
129 * greater than @max_len*4 bytes). On error @max_len contains the minimum
130 * size(in 4 byte unit) needed to encode the file handle.
126 * 131 *
127 * fh_to_dentry: 132 * fh_to_dentry:
128 * @fh_to_dentry is given a &struct super_block (@sb) and a file handle 133 * @fh_to_dentry is given a &struct super_block (@sb) and a file handle
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index a562fa5fb4e3..f550f894ba15 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -46,6 +46,7 @@
46 unlinking file. */ 46 unlinking file. */
47#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */ 47#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
48#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */ 48#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
49#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
49 50
50#ifdef __KERNEL__ 51#ifdef __KERNEL__
51 52
diff --git a/include/linux/file.h b/include/linux/file.h
index e85baebf6279..21a79958541c 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -29,6 +29,8 @@ static inline void fput_light(struct file *file, int fput_needed)
29 29
30extern struct file *fget(unsigned int fd); 30extern struct file *fget(unsigned int fd);
31extern struct file *fget_light(unsigned int fd, int *fput_needed); 31extern struct file *fget_light(unsigned int fd, int *fput_needed);
32extern struct file *fget_raw(unsigned int fd);
33extern struct file *fget_raw_light(unsigned int fd, int *fput_needed);
32extern void set_close_on_exec(unsigned int fd, int flag); 34extern void set_close_on_exec(unsigned int fd, int flag);
33extern void put_filp(struct file *); 35extern void put_filp(struct file *);
34extern int alloc_fd(unsigned start, unsigned flags); 36extern int alloc_fd(unsigned start, unsigned flags);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e38b50a4b9d2..13df14e2c42e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -102,6 +102,9 @@ struct inodes_stat_t {
102/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ 102/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
103#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) 103#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
104 104
105/* File is opened with O_PATH; almost nothing can be done with it */
106#define FMODE_PATH ((__force fmode_t)0x4000)
107
105/* File was opened by fanotify and shouldn't generate fanotify events */ 108/* File was opened by fanotify and shouldn't generate fanotify events */
106#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 109#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
107 110
@@ -978,6 +981,13 @@ struct file {
978#endif 981#endif
979}; 982};
980 983
984struct file_handle {
985 __u32 handle_bytes;
986 int handle_type;
987 /* file identifier */
988 unsigned char f_handle[0];
989};
990
981#define get_file(x) atomic_long_inc(&(x)->f_count) 991#define get_file(x) atomic_long_inc(&(x)->f_count)
982#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) 992#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
983#define file_count(x) atomic_long_read(&(x)->f_count) 993#define file_count(x) atomic_long_read(&(x)->f_count)
@@ -1401,6 +1411,7 @@ struct super_block {
1401 wait_queue_head_t s_wait_unfrozen; 1411 wait_queue_head_t s_wait_unfrozen;
1402 1412
1403 char s_id[32]; /* Informational name */ 1413 char s_id[32]; /* Informational name */
1414 u8 s_uuid[16]; /* UUID */
1404 1415
1405 void *s_fs_info; /* Filesystem private info */ 1416 void *s_fs_info; /* Filesystem private info */
1406 fmode_t s_mode; 1417 fmode_t s_mode;
@@ -1874,6 +1885,8 @@ extern void drop_collected_mounts(struct vfsmount *);
1874extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, 1885extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
1875 struct vfsmount *); 1886 struct vfsmount *);
1876extern int vfs_statfs(struct path *, struct kstatfs *); 1887extern int vfs_statfs(struct path *, struct kstatfs *);
1888extern int user_statfs(const char __user *, struct kstatfs *);
1889extern int fd_statfs(int, struct kstatfs *);
1877extern int statfs_by_dentry(struct dentry *, struct kstatfs *); 1890extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
1878extern int freeze_super(struct super_block *super); 1891extern int freeze_super(struct super_block *super);
1879extern int thaw_super(struct super_block *super); 1892extern int thaw_super(struct super_block *super);
@@ -1990,6 +2003,8 @@ extern int do_fallocate(struct file *file, int mode, loff_t offset,
1990extern long do_sys_open(int dfd, const char __user *filename, int flags, 2003extern long do_sys_open(int dfd, const char __user *filename, int flags,
1991 int mode); 2004 int mode);
1992extern struct file *filp_open(const char *, int, int); 2005extern struct file *filp_open(const char *, int, int);
2006extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2007 const char *, int);
1993extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, 2008extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
1994 const struct cred *); 2009 const struct cred *);
1995extern int filp_close(struct file *, fl_owner_t id); 2010extern int filp_close(struct file *, fl_owner_t id);
@@ -2205,10 +2220,6 @@ extern struct file *create_read_pipe(struct file *f, int flags);
2205extern struct file *create_write_pipe(int flags); 2220extern struct file *create_write_pipe(int flags);
2206extern void free_write_pipe(struct file *); 2221extern void free_write_pipe(struct file *);
2207 2222
2208extern struct file *do_filp_open(int dfd, const char *pathname,
2209 int open_flag, int mode, int acc_mode);
2210extern int may_open(struct path *, int, int);
2211
2212extern int kernel_read(struct file *, loff_t, char *, unsigned long); 2223extern int kernel_read(struct file *, loff_t, char *, unsigned long);
2213extern struct file * open_exec(const char *); 2224extern struct file * open_exec(const char *);
2214 2225
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dcd6a7c3a435..ca29e03c1fac 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void);
428 428
429extern void ftrace_graph_init_task(struct task_struct *t); 429extern void ftrace_graph_init_task(struct task_struct *t);
430extern void ftrace_graph_exit_task(struct task_struct *t); 430extern void ftrace_graph_exit_task(struct task_struct *t);
431extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
431 432
432static inline int task_curr_ret_stack(struct task_struct *t) 433static inline int task_curr_ret_stack(struct task_struct *t)
433{ 434{
@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void)
451 452
452static inline void ftrace_graph_init_task(struct task_struct *t) { } 453static inline void ftrace_graph_init_task(struct task_struct *t) { }
453static inline void ftrace_graph_exit_task(struct task_struct *t) { } 454static inline void ftrace_graph_exit_task(struct task_struct *t) { }
455static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
454 456
455static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, 457static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
456 trace_func_graph_ent_t entryfunc) 458 trace_func_graph_ent_t entryfunc)
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 47e3997f7b5c..22b32af1b5ec 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -37,7 +37,6 @@ struct trace_entry {
37 unsigned char flags; 37 unsigned char flags;
38 unsigned char preempt_count; 38 unsigned char preempt_count;
39 int pid; 39 int pid;
40 int lock_depth;
41}; 40};
42 41
43#define FTRACE_MAX_EVENT \ 42#define FTRACE_MAX_EVENT \
@@ -208,7 +207,6 @@ struct ftrace_event_call {
208 207
209#define PERF_MAX_TRACE_SIZE 2048 208#define PERF_MAX_TRACE_SIZE 2048
210 209
211#define MAX_FILTER_PRED 32
212#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 210#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
213 211
214extern void destroy_preds(struct ftrace_event_call *call); 212extern void destroy_preds(struct ftrace_event_call *call);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index f376ddc64c4d..62f500c724f9 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -54,11 +54,13 @@ enum hrtimer_restart {
54 * 0x00 inactive 54 * 0x00 inactive
55 * 0x01 enqueued into rbtree 55 * 0x01 enqueued into rbtree
56 * 0x02 callback function running 56 * 0x02 callback function running
57 * 0x04 timer is migrated to another cpu
57 * 58 *
58 * Special cases: 59 * Special cases:
59 * 0x03 callback function running and enqueued 60 * 0x03 callback function running and enqueued
60 * (was requeued on another CPU) 61 * (was requeued on another CPU)
61 * 0x09 timer was migrated on CPU hotunplug 62 * 0x05 timer was migrated on CPU hotunplug
63 *
62 * The "callback function running and enqueued" status is only possible on 64 * The "callback function running and enqueued" status is only possible on
63 * SMP. It happens for example when a posix timer expired and the callback 65 * SMP. It happens for example when a posix timer expired and the callback
64 * queued a signal. Between dropping the lock which protects the posix timer 66 * queued a signal. Between dropping the lock which protects the posix timer
@@ -67,8 +69,11 @@ enum hrtimer_restart {
67 * as otherwise the timer could be removed before the softirq code finishes the 69 * as otherwise the timer could be removed before the softirq code finishes the
68 * the handling of the timer. 70 * the handling of the timer.
69 * 71 *
70 * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state to 72 * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
71 * preserve the HRTIMER_STATE_CALLBACK bit in the above scenario. 73 * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
74 * also affects HRTIMER_STATE_MIGRATE where the preservation is not
75 * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
76 * enqueued on the new cpu.
72 * 77 *
73 * All state transitions are protected by cpu_base->lock. 78 * All state transitions are protected by cpu_base->lock.
74 */ 79 */
@@ -148,7 +153,12 @@ struct hrtimer_clock_base {
148#endif 153#endif
149}; 154};
150 155
151#define HRTIMER_MAX_CLOCK_BASES 2 156enum hrtimer_base_type {
157 HRTIMER_BASE_REALTIME,
158 HRTIMER_BASE_MONOTONIC,
159 HRTIMER_BASE_BOOTTIME,
160 HRTIMER_MAX_CLOCK_BASES,
161};
152 162
153/* 163/*
154 * struct hrtimer_cpu_base - the per cpu clock bases 164 * struct hrtimer_cpu_base - the per cpu clock bases
@@ -308,6 +318,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
308 318
309extern ktime_t ktime_get(void); 319extern ktime_t ktime_get(void);
310extern ktime_t ktime_get_real(void); 320extern ktime_t ktime_get_real(void);
321extern ktime_t ktime_get_boottime(void);
311 322
312 323
313DECLARE_PER_CPU(struct tick_device, tick_cpu_device); 324DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
@@ -370,8 +381,9 @@ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
370extern ktime_t hrtimer_get_next_event(void); 381extern ktime_t hrtimer_get_next_event(void);
371 382
372/* 383/*
373 * A timer is active, when it is enqueued into the rbtree or the callback 384 * A timer is active, when it is enqueued into the rbtree or the
374 * function is running. 385 * callback function is running or it's in the state of being migrated
386 * to another cpu.
375 */ 387 */
376static inline int hrtimer_active(const struct hrtimer *timer) 388static inline int hrtimer_active(const struct hrtimer *timer)
377{ 389{
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 903576df88dc..06a8d9c7de98 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -258,9 +258,7 @@ struct i2c_board_info {
258 unsigned short addr; 258 unsigned short addr;
259 void *platform_data; 259 void *platform_data;
260 struct dev_archdata *archdata; 260 struct dev_archdata *archdata;
261#ifdef CONFIG_OF
262 struct device_node *of_node; 261 struct device_node *of_node;
263#endif
264 int irq; 262 int irq;
265}; 263};
266 264
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 55e0d4253e49..59b72ca1c5d1 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,8 @@
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/kref.h>
18#include <linux/workqueue.h>
17 19
18#include <asm/atomic.h> 20#include <asm/atomic.h>
19#include <asm/ptrace.h> 21#include <asm/ptrace.h>
@@ -55,7 +57,8 @@
55 * Used by threaded interrupts which need to keep the 57 * Used by threaded interrupts which need to keep the
56 * irq line disabled until the threaded handler has been run. 58 * irq line disabled until the threaded handler has been run.
57 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend 59 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
58 * 60 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
61 * IRQF_NO_THREAD - Interrupt cannot be threaded
59 */ 62 */
60#define IRQF_DISABLED 0x00000020 63#define IRQF_DISABLED 0x00000020
61#define IRQF_SAMPLE_RANDOM 0x00000040 64#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -67,22 +70,10 @@
67#define IRQF_IRQPOLL 0x00001000 70#define IRQF_IRQPOLL 0x00001000
68#define IRQF_ONESHOT 0x00002000 71#define IRQF_ONESHOT 0x00002000
69#define IRQF_NO_SUSPEND 0x00004000 72#define IRQF_NO_SUSPEND 0x00004000
73#define IRQF_FORCE_RESUME 0x00008000
74#define IRQF_NO_THREAD 0x00010000
70 75
71#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) 76#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
72
73/*
74 * Bits used by threaded handlers:
75 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
76 * IRQTF_DIED - handler thread died
77 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
78 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
79 */
80enum {
81 IRQTF_RUNTHREAD,
82 IRQTF_DIED,
83 IRQTF_WARNED,
84 IRQTF_AFFINITY,
85};
86 77
87/* 78/*
88 * These values can be returned by request_any_context_irq() and 79 * These values can be returned by request_any_context_irq() and
@@ -110,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
110 * @thread_fn: interupt handler function for threaded interrupts 101 * @thread_fn: interupt handler function for threaded interrupts
111 * @thread: thread pointer for threaded interrupts 102 * @thread: thread pointer for threaded interrupts
112 * @thread_flags: flags related to @thread 103 * @thread_flags: flags related to @thread
104 * @thread_mask: bitmask for keeping track of @thread activity
113 */ 105 */
114struct irqaction { 106struct irqaction {
115 irq_handler_t handler; 107 irq_handler_t handler;
@@ -120,6 +112,7 @@ struct irqaction {
120 irq_handler_t thread_fn; 112 irq_handler_t thread_fn;
121 struct task_struct *thread; 113 struct task_struct *thread;
122 unsigned long thread_flags; 114 unsigned long thread_flags;
115 unsigned long thread_mask;
123 const char *name; 116 const char *name;
124 struct proc_dir_entry *dir; 117 struct proc_dir_entry *dir;
125} ____cacheline_internodealigned_in_smp; 118} ____cacheline_internodealigned_in_smp;
@@ -240,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq);
240extern int irq_select_affinity(unsigned int irq); 233extern int irq_select_affinity(unsigned int irq);
241 234
242extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); 235extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
236
237/**
238 * struct irq_affinity_notify - context for notification of IRQ affinity changes
239 * @irq: Interrupt to which notification applies
240 * @kref: Reference count, for internal use
241 * @work: Work item, for internal use
242 * @notify: Function to be called on change. This will be
243 * called in process context.
244 * @release: Function to be called on release. This will be
245 * called in process context. Once registered, the
246 * structure must only be freed when this function is
247 * called or later.
248 */
249struct irq_affinity_notify {
250 unsigned int irq;
251 struct kref kref;
252 struct work_struct work;
253 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
254 void (*release)(struct kref *ref);
255};
256
257extern int
258irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
259
260static inline void irq_run_affinity_notifiers(void)
261{
262 flush_scheduled_work();
263}
264
243#else /* CONFIG_SMP */ 265#else /* CONFIG_SMP */
244 266
245static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 267static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -255,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq)
255static inline int irq_select_affinity(unsigned int irq) { return 0; } 277static inline int irq_select_affinity(unsigned int irq) { return 0; }
256 278
257static inline int irq_set_affinity_hint(unsigned int irq, 279static inline int irq_set_affinity_hint(unsigned int irq,
258 const struct cpumask *m) 280 const struct cpumask *m)
259{ 281{
260 return -EINVAL; 282 return -EINVAL;
261} 283}
@@ -314,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long
314} 336}
315 337
316/* IRQ wakeup (PM) control: */ 338/* IRQ wakeup (PM) control: */
317extern int set_irq_wake(unsigned int irq, unsigned int on); 339extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
340
341#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
342/* Please do not use: Use the replacement functions instead */
343static inline int set_irq_wake(unsigned int irq, unsigned int on)
344{
345 return irq_set_irq_wake(irq, on);
346}
347#endif
318 348
319static inline int enable_irq_wake(unsigned int irq) 349static inline int enable_irq_wake(unsigned int irq)
320{ 350{
321 return set_irq_wake(irq, 1); 351 return irq_set_irq_wake(irq, 1);
322} 352}
323 353
324static inline int disable_irq_wake(unsigned int irq) 354static inline int disable_irq_wake(unsigned int irq)
325{ 355{
326 return set_irq_wake(irq, 0); 356 return irq_set_irq_wake(irq, 0);
327} 357}
328 358
329#else /* !CONFIG_GENERIC_HARDIRQS */ 359#else /* !CONFIG_GENERIC_HARDIRQS */
@@ -353,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq)
353} 383}
354#endif /* CONFIG_GENERIC_HARDIRQS */ 384#endif /* CONFIG_GENERIC_HARDIRQS */
355 385
386
387#ifdef CONFIG_IRQ_FORCED_THREADING
388extern bool force_irqthreads;
389#else
390#define force_irqthreads (0)
391#endif
392
356#ifndef __ARCH_SET_SOFTIRQ_PENDING 393#ifndef __ARCH_SET_SOFTIRQ_PENDING
357#define set_softirq_pending(x) (local_softirq_pending() = (x)) 394#define set_softirq_pending(x) (local_softirq_pending() = (x))
358#define or_softirq_pending(x) (local_softirq_pending() |= (x)) 395#define or_softirq_pending(x) (local_softirq_pending() |= (x))
@@ -426,6 +463,13 @@ extern void raise_softirq(unsigned int nr);
426 */ 463 */
427DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 464DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
428 465
466DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
467
468static inline struct task_struct *this_cpu_ksoftirqd(void)
469{
470 return this_cpu_read(ksoftirqd);
471}
472
429/* Try to send a softirq to a remote cpu. If this cannot be done, the 473/* Try to send a softirq to a remote cpu. If this cannot be done, the
430 * work will be queued to the local cpu. 474 * work will be queued to the local cpu.
431 */ 475 */
@@ -645,6 +689,7 @@ static inline void init_irq_proc(void)
645 689
646struct seq_file; 690struct seq_file;
647int show_interrupts(struct seq_file *p, void *v); 691int show_interrupts(struct seq_file *p, void *v);
692int arch_show_interrupts(struct seq_file *p, int prec);
648 693
649extern int early_irq_init(void); 694extern int early_irq_init(void);
650extern int arch_probe_nr_irqs(void); 695extern int arch_probe_nr_irqs(void);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 80fcb53057bc..1d3577f30d45 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -29,61 +29,104 @@
29#include <asm/irq_regs.h> 29#include <asm/irq_regs.h>
30 30
31struct irq_desc; 31struct irq_desc;
32struct irq_data;
32typedef void (*irq_flow_handler_t)(unsigned int irq, 33typedef void (*irq_flow_handler_t)(unsigned int irq,
33 struct irq_desc *desc); 34 struct irq_desc *desc);
34 35typedef void (*irq_preflow_handler_t)(struct irq_data *data);
35 36
36/* 37/*
37 * IRQ line status. 38 * IRQ line status.
38 * 39 *
39 * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h 40 * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
41 *
42 * IRQ_TYPE_NONE - default, unspecified type
43 * IRQ_TYPE_EDGE_RISING - rising edge triggered
44 * IRQ_TYPE_EDGE_FALLING - falling edge triggered
45 * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered
46 * IRQ_TYPE_LEVEL_HIGH - high level triggered
47 * IRQ_TYPE_LEVEL_LOW - low level triggered
48 * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits
49 * IRQ_TYPE_SENSE_MASK - Mask for all the above bits
50 * IRQ_TYPE_PROBE - Special flag for probing in progress
51 *
52 * Bits which can be modified via irq_set/clear/modify_status_flags()
53 * IRQ_LEVEL - Interrupt is level type. Will be also
54 * updated in the code when the above trigger
55 * bits are modified via set_irq_type()
56 * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect
57 * it from affinity setting
58 * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing
59 * IRQ_NOREQUEST - Interrupt cannot be requested via
60 * request_irq()
61 * IRQ_NOAUTOEN - Interrupt is not automatically enabled in
62 * request/setup_irq()
63 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
64 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
65 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
66 *
67 * Deprecated bits. They are kept updated as long as
68 * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits
69 * are internal state of the core code and if you really need to acces
70 * them then talk to the genirq maintainer instead of hacking
71 * something weird.
40 * 72 *
41 * IRQ types
42 */ 73 */
43#define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ 74enum {
44#define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ 75 IRQ_TYPE_NONE = 0x00000000,
45#define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ 76 IRQ_TYPE_EDGE_RISING = 0x00000001,
46#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) 77 IRQ_TYPE_EDGE_FALLING = 0x00000002,
47#define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ 78 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
48#define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ 79 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
49#define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ 80 IRQ_TYPE_LEVEL_LOW = 0x00000008,
50#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ 81 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
51 82 IRQ_TYPE_SENSE_MASK = 0x0000000f,
52/* Internal flags */ 83
53#define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */ 84 IRQ_TYPE_PROBE = 0x00000010,
54#define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */ 85
55#define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */ 86 IRQ_LEVEL = (1 << 8),
56#define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */ 87 IRQ_PER_CPU = (1 << 9),
57#define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */ 88 IRQ_NOPROBE = (1 << 10),
58#define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */ 89 IRQ_NOREQUEST = (1 << 11),
59#define IRQ_LEVEL 0x00004000 /* IRQ level triggered */ 90 IRQ_NOAUTOEN = (1 << 12),
60#define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */ 91 IRQ_NO_BALANCING = (1 << 13),
61#define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */ 92 IRQ_MOVE_PCNTXT = (1 << 14),
62#define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */ 93 IRQ_NESTED_THREAD = (1 << 15),
63#define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */ 94
64#define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */ 95#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
65#define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ 96 IRQ_INPROGRESS = (1 << 16),
66#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ 97 IRQ_REPLAY = (1 << 17),
67#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ 98 IRQ_WAITING = (1 << 18),
68#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ 99 IRQ_DISABLED = (1 << 19),
69#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ 100 IRQ_PENDING = (1 << 20),
70#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ 101 IRQ_MASKED = (1 << 21),
71#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ 102 IRQ_MOVE_PENDING = (1 << 22),
72#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ 103 IRQ_AFFINITY_SET = (1 << 23),
73#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ 104 IRQ_WAKEUP = (1 << 24),
105#endif
106};
74 107
75#define IRQF_MODIFY_MASK \ 108#define IRQF_MODIFY_MASK \
76 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 109 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
77 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ 110 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
78 IRQ_PER_CPU) 111 IRQ_PER_CPU | IRQ_NESTED_THREAD)
79 112
80#ifdef CONFIG_IRQ_PER_CPU 113#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
81# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 114
82# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) 115static inline __deprecated bool CHECK_IRQ_PER_CPU(unsigned int status)
83#else 116{
84# define CHECK_IRQ_PER_CPU(var) 0 117 return status & IRQ_PER_CPU;
85# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING 118}
86#endif 119
120/*
121 * Return value for chip->irq_set_affinity()
122 *
123 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
124 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
125 */
126enum {
127 IRQ_SET_MASK_OK = 0,
128 IRQ_SET_MASK_OK_NOCOPY,
129};
87 130
88struct msi_desc; 131struct msi_desc;
89 132
@@ -91,6 +134,8 @@ struct msi_desc;
91 * struct irq_data - per irq and irq chip data passed down to chip functions 134 * struct irq_data - per irq and irq chip data passed down to chip functions
92 * @irq: interrupt number 135 * @irq: interrupt number
93 * @node: node index useful for balancing 136 * @node: node index useful for balancing
137 * @state_use_accessor: status information for irq chip functions.
138 * Use accessor functions to deal with it
94 * @chip: low level interrupt hardware access 139 * @chip: low level interrupt hardware access
95 * @handler_data: per-IRQ data for the irq_chip methods 140 * @handler_data: per-IRQ data for the irq_chip methods
96 * @chip_data: platform-specific per-chip private data for the chip 141 * @chip_data: platform-specific per-chip private data for the chip
@@ -105,6 +150,7 @@ struct msi_desc;
105struct irq_data { 150struct irq_data {
106 unsigned int irq; 151 unsigned int irq;
107 unsigned int node; 152 unsigned int node;
153 unsigned int state_use_accessors;
108 struct irq_chip *chip; 154 struct irq_chip *chip;
109 void *handler_data; 155 void *handler_data;
110 void *chip_data; 156 void *chip_data;
@@ -114,6 +160,80 @@ struct irq_data {
114#endif 160#endif
115}; 161};
116 162
163/*
164 * Bit masks for irq_data.state
165 *
166 * IRQD_TRIGGER_MASK - Mask for the trigger type bits
167 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
168 * IRQD_NO_BALANCING - Balancing disabled for this IRQ
169 * IRQD_PER_CPU - Interrupt is per cpu
170 * IRQD_AFFINITY_SET - Interrupt affinity was set
171 * IRQD_LEVEL - Interrupt is level triggered
172 * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
173 * from suspend
174 * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
175 * context
176 */
177enum {
178 IRQD_TRIGGER_MASK = 0xf,
179 IRQD_SETAFFINITY_PENDING = (1 << 8),
180 IRQD_NO_BALANCING = (1 << 10),
181 IRQD_PER_CPU = (1 << 11),
182 IRQD_AFFINITY_SET = (1 << 12),
183 IRQD_LEVEL = (1 << 13),
184 IRQD_WAKEUP_STATE = (1 << 14),
185 IRQD_MOVE_PCNTXT = (1 << 15),
186};
187
188static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
189{
190 return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
191}
192
193static inline bool irqd_is_per_cpu(struct irq_data *d)
194{
195 return d->state_use_accessors & IRQD_PER_CPU;
196}
197
198static inline bool irqd_can_balance(struct irq_data *d)
199{
200 return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
201}
202
203static inline bool irqd_affinity_was_set(struct irq_data *d)
204{
205 return d->state_use_accessors & IRQD_AFFINITY_SET;
206}
207
208static inline u32 irqd_get_trigger_type(struct irq_data *d)
209{
210 return d->state_use_accessors & IRQD_TRIGGER_MASK;
211}
212
213/*
214 * Must only be called inside irq_chip.irq_set_type() functions.
215 */
216static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
217{
218 d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
219 d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
220}
221
222static inline bool irqd_is_level_type(struct irq_data *d)
223{
224 return d->state_use_accessors & IRQD_LEVEL;
225}
226
227static inline bool irqd_is_wakeup_set(struct irq_data *d)
228{
229 return d->state_use_accessors & IRQD_WAKEUP_STATE;
230}
231
232static inline bool irqd_can_move_in_process_context(struct irq_data *d)
233{
234 return d->state_use_accessors & IRQD_MOVE_PCNTXT;
235}
236
117/** 237/**
118 * struct irq_chip - hardware interrupt chip descriptor 238 * struct irq_chip - hardware interrupt chip descriptor
119 * 239 *
@@ -150,6 +270,7 @@ struct irq_data {
150 * @irq_set_wake: enable/disable power-management wake-on of an IRQ 270 * @irq_set_wake: enable/disable power-management wake-on of an IRQ
151 * @irq_bus_lock: function to lock access to slow bus (i2c) chips 271 * @irq_bus_lock: function to lock access to slow bus (i2c) chips
152 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips 272 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
273 * @flags: chip specific flags
153 * 274 *
154 * @release: release function solely used by UML 275 * @release: release function solely used by UML
155 */ 276 */
@@ -196,12 +317,27 @@ struct irq_chip {
196 void (*irq_bus_lock)(struct irq_data *data); 317 void (*irq_bus_lock)(struct irq_data *data);
197 void (*irq_bus_sync_unlock)(struct irq_data *data); 318 void (*irq_bus_sync_unlock)(struct irq_data *data);
198 319
320 unsigned long flags;
321
199 /* Currently used only by UML, might disappear one day.*/ 322 /* Currently used only by UML, might disappear one day.*/
200#ifdef CONFIG_IRQ_RELEASE_METHOD 323#ifdef CONFIG_IRQ_RELEASE_METHOD
201 void (*release)(unsigned int irq, void *dev_id); 324 void (*release)(unsigned int irq, void *dev_id);
202#endif 325#endif
203}; 326};
204 327
328/*
329 * irq_chip specific flags
330 *
331 * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
332 * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
333 * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
334 */
335enum {
336 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
337 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
338 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
339};
340
205/* This include will go away once we isolated irq_desc usage to core code */ 341/* This include will go away once we isolated irq_desc usage to core code */
206#include <linux/irqdesc.h> 342#include <linux/irqdesc.h>
207 343
@@ -218,7 +354,7 @@ struct irq_chip {
218# define ARCH_IRQ_INIT_FLAGS 0 354# define ARCH_IRQ_INIT_FLAGS 0
219#endif 355#endif
220 356
221#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) 357#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
222 358
223struct irqaction; 359struct irqaction;
224extern int setup_irq(unsigned int irq, struct irqaction *new); 360extern int setup_irq(unsigned int irq, struct irqaction *new);
@@ -229,9 +365,13 @@ extern void remove_irq(unsigned int irq, struct irqaction *act);
229#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 365#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
230void move_native_irq(int irq); 366void move_native_irq(int irq);
231void move_masked_irq(int irq); 367void move_masked_irq(int irq);
368void irq_move_irq(struct irq_data *data);
369void irq_move_masked_irq(struct irq_data *data);
232#else 370#else
233static inline void move_native_irq(int irq) { } 371static inline void move_native_irq(int irq) { }
234static inline void move_masked_irq(int irq) { } 372static inline void move_masked_irq(int irq) { }
373static inline void irq_move_irq(struct irq_data *data) { }
374static inline void irq_move_masked_irq(struct irq_data *data) { }
235#endif 375#endif
236 376
237extern int no_irq_affinity; 377extern int no_irq_affinity;
@@ -267,23 +407,23 @@ extern struct irq_chip no_irq_chip;
267extern struct irq_chip dummy_irq_chip; 407extern struct irq_chip dummy_irq_chip;
268 408
269extern void 409extern void
270set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, 410irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
271 irq_flow_handler_t handle);
272extern void
273set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
274 irq_flow_handler_t handle, const char *name); 411 irq_flow_handler_t handle, const char *name);
275 412
413static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
414 irq_flow_handler_t handle)
415{
416 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
417}
418
276extern void 419extern void
277__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 420__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
278 const char *name); 421 const char *name);
279 422
280/*
281 * Set a highlevel flow handler for a given IRQ:
282 */
283static inline void 423static inline void
284set_irq_handler(unsigned int irq, irq_flow_handler_t handle) 424irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
285{ 425{
286 __set_irq_handler(irq, handle, 0, NULL); 426 __irq_set_handler(irq, handle, 0, NULL);
287} 427}
288 428
289/* 429/*
@@ -292,14 +432,11 @@ set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
292 * IRQ_NOREQUEST and IRQ_NOPROBE) 432 * IRQ_NOREQUEST and IRQ_NOPROBE)
293 */ 433 */
294static inline void 434static inline void
295set_irq_chained_handler(unsigned int irq, 435irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
296 irq_flow_handler_t handle)
297{ 436{
298 __set_irq_handler(irq, handle, 1, NULL); 437 __irq_set_handler(irq, handle, 1, NULL);
299} 438}
300 439
301extern void set_irq_nested_thread(unsigned int irq, int nest);
302
303void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); 440void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
304 441
305static inline void irq_set_status_flags(unsigned int irq, unsigned long set) 442static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
@@ -312,16 +449,24 @@ static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
312 irq_modify_status(irq, clr, 0); 449 irq_modify_status(irq, clr, 0);
313} 450}
314 451
315static inline void set_irq_noprobe(unsigned int irq) 452static inline void irq_set_noprobe(unsigned int irq)
316{ 453{
317 irq_modify_status(irq, 0, IRQ_NOPROBE); 454 irq_modify_status(irq, 0, IRQ_NOPROBE);
318} 455}
319 456
320static inline void set_irq_probe(unsigned int irq) 457static inline void irq_set_probe(unsigned int irq)
321{ 458{
322 irq_modify_status(irq, IRQ_NOPROBE, 0); 459 irq_modify_status(irq, IRQ_NOPROBE, 0);
323} 460}
324 461
462static inline void irq_set_nested_thread(unsigned int irq, bool nest)
463{
464 if (nest)
465 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
466 else
467 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
468}
469
325/* Handle dynamic irq creation and destruction */ 470/* Handle dynamic irq creation and destruction */
326extern unsigned int create_irq_nr(unsigned int irq_want, int node); 471extern unsigned int create_irq_nr(unsigned int irq_want, int node);
327extern int create_irq(void); 472extern int create_irq(void);
@@ -338,14 +483,14 @@ static inline void dynamic_irq_init(unsigned int irq)
338} 483}
339 484
340/* Set/get chip/data for an IRQ: */ 485/* Set/get chip/data for an IRQ: */
341extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); 486extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
342extern int set_irq_data(unsigned int irq, void *data); 487extern int irq_set_handler_data(unsigned int irq, void *data);
343extern int set_irq_chip_data(unsigned int irq, void *data); 488extern int irq_set_chip_data(unsigned int irq, void *data);
344extern int set_irq_type(unsigned int irq, unsigned int type); 489extern int irq_set_irq_type(unsigned int irq, unsigned int type);
345extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); 490extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
346extern struct irq_data *irq_get_irq_data(unsigned int irq); 491extern struct irq_data *irq_get_irq_data(unsigned int irq);
347 492
348static inline struct irq_chip *get_irq_chip(unsigned int irq) 493static inline struct irq_chip *irq_get_chip(unsigned int irq)
349{ 494{
350 struct irq_data *d = irq_get_irq_data(irq); 495 struct irq_data *d = irq_get_irq_data(irq);
351 return d ? d->chip : NULL; 496 return d ? d->chip : NULL;
@@ -356,7 +501,7 @@ static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
356 return d->chip; 501 return d->chip;
357} 502}
358 503
359static inline void *get_irq_chip_data(unsigned int irq) 504static inline void *irq_get_chip_data(unsigned int irq)
360{ 505{
361 struct irq_data *d = irq_get_irq_data(irq); 506 struct irq_data *d = irq_get_irq_data(irq);
362 return d ? d->chip_data : NULL; 507 return d ? d->chip_data : NULL;
@@ -367,18 +512,18 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
367 return d->chip_data; 512 return d->chip_data;
368} 513}
369 514
370static inline void *get_irq_data(unsigned int irq) 515static inline void *irq_get_handler_data(unsigned int irq)
371{ 516{
372 struct irq_data *d = irq_get_irq_data(irq); 517 struct irq_data *d = irq_get_irq_data(irq);
373 return d ? d->handler_data : NULL; 518 return d ? d->handler_data : NULL;
374} 519}
375 520
376static inline void *irq_data_get_irq_data(struct irq_data *d) 521static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
377{ 522{
378 return d->handler_data; 523 return d->handler_data;
379} 524}
380 525
381static inline struct msi_desc *get_irq_msi(unsigned int irq) 526static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
382{ 527{
383 struct irq_data *d = irq_get_irq_data(irq); 528 struct irq_data *d = irq_get_irq_data(irq);
384 return d ? d->msi_desc : NULL; 529 return d ? d->msi_desc : NULL;
@@ -389,6 +534,89 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
389 return d->msi_desc; 534 return d->msi_desc;
390} 535}
391 536
537#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
538/* Please do not use: Use the replacement functions instead */
539static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip)
540{
541 return irq_set_chip(irq, chip);
542}
543static inline int set_irq_data(unsigned int irq, void *data)
544{
545 return irq_set_handler_data(irq, data);
546}
547static inline int set_irq_chip_data(unsigned int irq, void *data)
548{
549 return irq_set_chip_data(irq, data);
550}
551static inline int set_irq_type(unsigned int irq, unsigned int type)
552{
553 return irq_set_irq_type(irq, type);
554}
555static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry)
556{
557 return irq_set_msi_desc(irq, entry);
558}
559static inline struct irq_chip *get_irq_chip(unsigned int irq)
560{
561 return irq_get_chip(irq);
562}
563static inline void *get_irq_chip_data(unsigned int irq)
564{
565 return irq_get_chip_data(irq);
566}
567static inline void *get_irq_data(unsigned int irq)
568{
569 return irq_get_handler_data(irq);
570}
571static inline void *irq_data_get_irq_data(struct irq_data *d)
572{
573 return irq_data_get_irq_handler_data(d);
574}
575static inline struct msi_desc *get_irq_msi(unsigned int irq)
576{
577 return irq_get_msi_desc(irq);
578}
579static inline void set_irq_noprobe(unsigned int irq)
580{
581 irq_set_noprobe(irq);
582}
583static inline void set_irq_probe(unsigned int irq)
584{
585 irq_set_probe(irq);
586}
587static inline void set_irq_nested_thread(unsigned int irq, int nest)
588{
589 irq_set_nested_thread(irq, nest);
590}
591static inline void
592set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
593 irq_flow_handler_t handle, const char *name)
594{
595 irq_set_chip_and_handler_name(irq, chip, handle, name);
596}
597static inline void
598set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
599 irq_flow_handler_t handle)
600{
601 irq_set_chip_and_handler(irq, chip, handle);
602}
603static inline void
604__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
605 const char *name)
606{
607 __irq_set_handler(irq, handle, is_chained, name);
608}
609static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
610{
611 irq_set_handler(irq, handle);
612}
613static inline void
614set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle)
615{
616 irq_set_chained_handler(irq, handle);
617}
618#endif
619
392int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); 620int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
393void irq_free_descs(unsigned int irq, unsigned int cnt); 621void irq_free_descs(unsigned int irq, unsigned int cnt);
394int irq_reserve_irqs(unsigned int from, unsigned int cnt); 622int irq_reserve_irqs(unsigned int from, unsigned int cnt);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index c1a95b7b58de..00218371518b 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -8,6 +8,7 @@
8 * For now it's included from <linux/irq.h> 8 * For now it's included from <linux/irq.h>
9 */ 9 */
10 10
11struct irq_affinity_notify;
11struct proc_dir_entry; 12struct proc_dir_entry;
12struct timer_rand_state; 13struct timer_rand_state;
13/** 14/**
@@ -18,13 +19,16 @@ struct timer_rand_state;
18 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] 19 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
19 * @action: the irq action chain 20 * @action: the irq action chain
20 * @status: status information 21 * @status: status information
22 * @core_internal_state__do_not_mess_with_it: core internal status information
21 * @depth: disable-depth, for nested irq_disable() calls 23 * @depth: disable-depth, for nested irq_disable() calls
22 * @wake_depth: enable depth, for multiple set_irq_wake() callers 24 * @wake_depth: enable depth, for multiple set_irq_wake() callers
23 * @irq_count: stats field to detect stalled irqs 25 * @irq_count: stats field to detect stalled irqs
24 * @last_unhandled: aging timer for unhandled count 26 * @last_unhandled: aging timer for unhandled count
25 * @irqs_unhandled: stats field for spurious unhandled interrupts 27 * @irqs_unhandled: stats field for spurious unhandled interrupts
26 * @lock: locking for SMP 28 * @lock: locking for SMP
29 * @affinity_notify: context for notification of affinity changes
27 * @pending_mask: pending rebalanced interrupts 30 * @pending_mask: pending rebalanced interrupts
31 * @threads_oneshot: bitfield to handle shared oneshot threads
28 * @threads_active: number of irqaction threads currently running 32 * @threads_active: number of irqaction threads currently running
29 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 33 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
30 * @dir: /proc/irq/ procfs entry 34 * @dir: /proc/irq/ procfs entry
@@ -45,6 +49,7 @@ struct irq_desc {
45 struct { 49 struct {
46 unsigned int irq; 50 unsigned int irq;
47 unsigned int node; 51 unsigned int node;
52 unsigned int pad_do_not_even_think_about_it;
48 struct irq_chip *chip; 53 struct irq_chip *chip;
49 void *handler_data; 54 void *handler_data;
50 void *chip_data; 55 void *chip_data;
@@ -59,9 +64,16 @@ struct irq_desc {
59 struct timer_rand_state *timer_rand_state; 64 struct timer_rand_state *timer_rand_state;
60 unsigned int __percpu *kstat_irqs; 65 unsigned int __percpu *kstat_irqs;
61 irq_flow_handler_t handle_irq; 66 irq_flow_handler_t handle_irq;
67#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
68 irq_preflow_handler_t preflow_handler;
69#endif
62 struct irqaction *action; /* IRQ action list */ 70 struct irqaction *action; /* IRQ action list */
71#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
72 unsigned int status_use_accessors;
73#else
63 unsigned int status; /* IRQ status */ 74 unsigned int status; /* IRQ status */
64 75#endif
76 unsigned int core_internal_state__do_not_mess_with_it;
65 unsigned int depth; /* nested irq disables */ 77 unsigned int depth; /* nested irq disables */
66 unsigned int wake_depth; /* nested wake enables */ 78 unsigned int wake_depth; /* nested wake enables */
67 unsigned int irq_count; /* For detecting broken IRQs */ 79 unsigned int irq_count; /* For detecting broken IRQs */
@@ -70,10 +82,12 @@ struct irq_desc {
70 raw_spinlock_t lock; 82 raw_spinlock_t lock;
71#ifdef CONFIG_SMP 83#ifdef CONFIG_SMP
72 const struct cpumask *affinity_hint; 84 const struct cpumask *affinity_hint;
85 struct irq_affinity_notify *affinity_notify;
73#ifdef CONFIG_GENERIC_PENDING_IRQ 86#ifdef CONFIG_GENERIC_PENDING_IRQ
74 cpumask_var_t pending_mask; 87 cpumask_var_t pending_mask;
75#endif 88#endif
76#endif 89#endif
90 unsigned long threads_oneshot;
77 atomic_t threads_active; 91 atomic_t threads_active;
78 wait_queue_head_t wait_for_threads; 92 wait_queue_head_t wait_for_threads;
79#ifdef CONFIG_PROC_FS 93#ifdef CONFIG_PROC_FS
@@ -95,10 +109,51 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
95 109
96#ifdef CONFIG_GENERIC_HARDIRQS 110#ifdef CONFIG_GENERIC_HARDIRQS
97 111
98#define get_irq_desc_chip(desc) ((desc)->irq_data.chip) 112static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
99#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) 113{
100#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) 114 return &desc->irq_data;
101#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) 115}
116
117static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
118{
119 return desc->irq_data.chip;
120}
121
122static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
123{
124 return desc->irq_data.chip_data;
125}
126
127static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
128{
129 return desc->irq_data.handler_data;
130}
131
132static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
133{
134 return desc->irq_data.msi_desc;
135}
136
137#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
138static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc)
139{
140 return irq_desc_get_chip(desc);
141}
142static inline void *get_irq_desc_data(struct irq_desc *desc)
143{
144 return irq_desc_get_handler_data(desc);
145}
146
147static inline void *get_irq_desc_chip_data(struct irq_desc *desc)
148{
149 return irq_desc_get_chip_data(desc);
150}
151
152static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc)
153{
154 return irq_desc_get_msi_desc(desc);
155}
156#endif
102 157
103/* 158/*
104 * Architectures call this to let the generic IRQ layer 159 * Architectures call this to let the generic IRQ layer
@@ -123,6 +178,7 @@ static inline int irq_has_action(unsigned int irq)
123 return desc->action != NULL; 178 return desc->action != NULL;
124} 179}
125 180
181#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
126static inline int irq_balancing_disabled(unsigned int irq) 182static inline int irq_balancing_disabled(unsigned int irq)
127{ 183{
128 struct irq_desc *desc; 184 struct irq_desc *desc;
@@ -130,6 +186,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
130 desc = irq_to_desc(irq); 186 desc = irq_to_desc(irq);
131 return desc->status & IRQ_NO_BALANCING_MASK; 187 return desc->status & IRQ_NO_BALANCING_MASK;
132} 188}
189#endif
133 190
134/* caller has locked the irq_desc and both params are valid */ 191/* caller has locked the irq_desc and both params are valid */
135static inline void __set_irq_handler_unlocked(int irq, 192static inline void __set_irq_handler_unlocked(int irq,
@@ -140,6 +197,17 @@ static inline void __set_irq_handler_unlocked(int irq,
140 desc = irq_to_desc(irq); 197 desc = irq_to_desc(irq);
141 desc->handle_irq = handler; 198 desc->handle_irq = handler;
142} 199}
200
201#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
202static inline void
203__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
204{
205 struct irq_desc *desc;
206
207 desc = irq_to_desc(irq);
208 desc->preflow_handler = handler;
209}
210#endif
143#endif 211#endif
144 212
145#endif 213#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 6811f4bfc6e7..922aa313c9f9 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x);
307extern unsigned long clock_t_to_jiffies(unsigned long x); 307extern unsigned long clock_t_to_jiffies(unsigned long x);
308extern u64 jiffies_64_to_clock_t(u64 x); 308extern u64 jiffies_64_to_clock_t(u64 x);
309extern u64 nsec_to_clock_t(u64 x); 309extern u64 nsec_to_clock_t(u64 x);
310extern u64 nsecs_to_jiffies64(u64 n);
310extern unsigned long nsecs_to_jiffies(u64 n); 311extern unsigned long nsecs_to_jiffies(u64 n);
311 312
312#define TIMESTAMP_SIZE 30 313#define TIMESTAMP_SIZE 30
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index ce0775aa64c3..7ff16f7d3ed4 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -64,7 +64,7 @@ struct kthread_work {
64}; 64};
65 65
66#define KTHREAD_WORKER_INIT(worker) { \ 66#define KTHREAD_WORKER_INIT(worker) { \
67 .lock = SPIN_LOCK_UNLOCKED, \ 67 .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
68 .work_list = LIST_HEAD_INIT((worker).work_list), \ 68 .work_list = LIST_HEAD_INIT((worker).work_list), \
69 } 69 }
70 70
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f6385fc17ad4..679300c050f5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1309,8 +1309,6 @@ int add_from_early_node_map(struct range *range, int az,
1309 int nr_range, int nid); 1309 int nr_range, int nid);
1310u64 __init find_memory_core_early(int nid, u64 size, u64 align, 1310u64 __init find_memory_core_early(int nid, u64 size, u64 align,
1311 u64 goal, u64 limit); 1311 u64 goal, u64 limit);
1312void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
1313 u64 goal, u64 limit);
1314typedef int (*work_fn_t)(unsigned long, unsigned long, void *); 1312typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1315extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); 1313extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1316extern void sparse_memory_present_with_active_regions(int nid); 1314extern void sparse_memory_present_with_active_regions(int nid);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index f276d4fa01fc..9c8603872c36 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -19,7 +19,6 @@ struct nameidata {
19 struct path path; 19 struct path path;
20 struct qstr last; 20 struct qstr last;
21 struct path root; 21 struct path root;
22 struct file *file;
23 struct inode *inode; /* path.dentry.d_inode */ 22 struct inode *inode; /* path.dentry.d_inode */
24 unsigned int flags; 23 unsigned int flags;
25 unsigned seq; 24 unsigned seq;
@@ -63,6 +62,10 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
63#define LOOKUP_EXCL 0x0400 62#define LOOKUP_EXCL 0x0400
64#define LOOKUP_RENAME_TARGET 0x0800 63#define LOOKUP_RENAME_TARGET 0x0800
65 64
65#define LOOKUP_JUMPED 0x1000
66#define LOOKUP_ROOT 0x2000
67#define LOOKUP_EMPTY 0x4000
68
66extern int user_path_at(int, const char __user *, unsigned, struct path *); 69extern int user_path_at(int, const char __user *, unsigned, struct path *);
67 70
68#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path) 71#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
@@ -72,7 +75,7 @@ extern int user_path_at(int, const char __user *, unsigned, struct path *);
72 75
73extern int kern_path(const char *, unsigned, struct path *); 76extern int kern_path(const char *, unsigned, struct path *);
74 77
75extern int path_lookup(const char *, unsigned, struct nameidata *); 78extern int kern_path_parent(const char *, struct nameidata *);
76extern int vfs_path_lookup(struct dentry *, struct vfsmount *, 79extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
77 const char *, unsigned int, struct nameidata *); 80 const char *, unsigned int, struct nameidata *);
78 81
diff --git a/include/linux/of.h b/include/linux/of.h
index cad7cf0ab278..266db1d0baa9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -23,8 +23,6 @@
23 23
24#include <asm/byteorder.h> 24#include <asm/byteorder.h>
25 25
26#ifdef CONFIG_OF
27
28typedef u32 phandle; 26typedef u32 phandle;
29typedef u32 ihandle; 27typedef u32 ihandle;
30 28
@@ -65,11 +63,18 @@ struct device_node {
65#endif 63#endif
66}; 64};
67 65
66#ifdef CONFIG_OF
67
68/* Pointer for first entry in chain of all nodes. */ 68/* Pointer for first entry in chain of all nodes. */
69extern struct device_node *allnodes; 69extern struct device_node *allnodes;
70extern struct device_node *of_chosen; 70extern struct device_node *of_chosen;
71extern rwlock_t devtree_lock; 71extern rwlock_t devtree_lock;
72 72
73static inline bool of_have_populated_dt(void)
74{
75 return allnodes != NULL;
76}
77
73static inline bool of_node_is_root(const struct device_node *node) 78static inline bool of_node_is_root(const struct device_node *node)
74{ 79{
75 return node && (node->parent == NULL); 80 return node && (node->parent == NULL);
@@ -222,5 +227,12 @@ extern void of_attach_node(struct device_node *);
222extern void of_detach_node(struct device_node *); 227extern void of_detach_node(struct device_node *);
223#endif 228#endif
224 229
230#else
231
232static inline bool of_have_populated_dt(void)
233{
234 return false;
235}
236
225#endif /* CONFIG_OF */ 237#endif /* CONFIG_OF */
226#endif /* _LINUX_OF_H */ 238#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
new file mode 100644
index 000000000000..85a27b650d76
--- /dev/null
+++ b/include/linux/of_pci.h
@@ -0,0 +1,9 @@
1#ifndef __OF_PCI_H
2#define __OF_PCI_H
3
4#include <linux/pci.h>
5
6struct pci_dev;
7struct of_irq;
8int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
9#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index dda5b0a3ff60..614615b8d42b 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -225,8 +225,14 @@ struct perf_event_attr {
225 }; 225 };
226 226
227 __u32 bp_type; 227 __u32 bp_type;
228 __u64 bp_addr; 228 union {
229 __u64 bp_len; 229 __u64 bp_addr;
230 __u64 config1; /* extension of config */
231 };
232 union {
233 __u64 bp_len;
234 __u64 config2; /* extension of config1 */
235 };
230}; 236};
231 237
232/* 238/*
@@ -464,6 +470,7 @@ enum perf_callchain_context {
464 470
465#define PERF_FLAG_FD_NO_GROUP (1U << 0) 471#define PERF_FLAG_FD_NO_GROUP (1U << 0)
466#define PERF_FLAG_FD_OUTPUT (1U << 1) 472#define PERF_FLAG_FD_OUTPUT (1U << 1)
473#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
467 474
468#ifdef __KERNEL__ 475#ifdef __KERNEL__
469/* 476/*
@@ -471,6 +478,7 @@ enum perf_callchain_context {
471 */ 478 */
472 479
473#ifdef CONFIG_PERF_EVENTS 480#ifdef CONFIG_PERF_EVENTS
481# include <linux/cgroup.h>
474# include <asm/perf_event.h> 482# include <asm/perf_event.h>
475# include <asm/local64.h> 483# include <asm/local64.h>
476#endif 484#endif
@@ -539,6 +547,9 @@ struct hw_perf_event {
539 unsigned long event_base; 547 unsigned long event_base;
540 int idx; 548 int idx;
541 int last_cpu; 549 int last_cpu;
550 unsigned int extra_reg;
551 u64 extra_config;
552 int extra_alloc;
542 }; 553 };
543 struct { /* software */ 554 struct { /* software */
544 struct hrtimer hrtimer; 555 struct hrtimer hrtimer;
@@ -716,6 +727,22 @@ struct swevent_hlist {
716#define PERF_ATTACH_GROUP 0x02 727#define PERF_ATTACH_GROUP 0x02
717#define PERF_ATTACH_TASK 0x04 728#define PERF_ATTACH_TASK 0x04
718 729
730#ifdef CONFIG_CGROUP_PERF
731/*
732 * perf_cgroup_info keeps track of time_enabled for a cgroup.
733 * This is a per-cpu dynamically allocated data structure.
734 */
735struct perf_cgroup_info {
736 u64 time;
737 u64 timestamp;
738};
739
740struct perf_cgroup {
741 struct cgroup_subsys_state css;
742 struct perf_cgroup_info *info; /* timing info, one per cpu */
743};
744#endif
745
719/** 746/**
720 * struct perf_event - performance event kernel representation: 747 * struct perf_event - performance event kernel representation:
721 */ 748 */
@@ -832,6 +859,11 @@ struct perf_event {
832 struct event_filter *filter; 859 struct event_filter *filter;
833#endif 860#endif
834 861
862#ifdef CONFIG_CGROUP_PERF
863 struct perf_cgroup *cgrp; /* cgroup event is attach to */
864 int cgrp_defer_enabled;
865#endif
866
835#endif /* CONFIG_PERF_EVENTS */ 867#endif /* CONFIG_PERF_EVENTS */
836}; 868};
837 869
@@ -886,6 +918,7 @@ struct perf_event_context {
886 u64 generation; 918 u64 generation;
887 int pin_count; 919 int pin_count;
888 struct rcu_head rcu_head; 920 struct rcu_head rcu_head;
921 int nr_cgroups; /* cgroup events present */
889}; 922};
890 923
891/* 924/*
@@ -905,6 +938,9 @@ struct perf_cpu_context {
905 struct list_head rotation_list; 938 struct list_head rotation_list;
906 int jiffies_interval; 939 int jiffies_interval;
907 struct pmu *active_pmu; 940 struct pmu *active_pmu;
941#ifdef CONFIG_CGROUP_PERF
942 struct perf_cgroup *cgrp;
943#endif
908}; 944};
909 945
910struct perf_output_handle { 946struct perf_output_handle {
@@ -1040,11 +1076,11 @@ have_event:
1040 __perf_sw_event(event_id, nr, nmi, regs, addr); 1076 __perf_sw_event(event_id, nr, nmi, regs, addr);
1041} 1077}
1042 1078
1043extern atomic_t perf_task_events; 1079extern atomic_t perf_sched_events;
1044 1080
1045static inline void perf_event_task_sched_in(struct task_struct *task) 1081static inline void perf_event_task_sched_in(struct task_struct *task)
1046{ 1082{
1047 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); 1083 COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task));
1048} 1084}
1049 1085
1050static inline 1086static inline
@@ -1052,7 +1088,7 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex
1052{ 1088{
1053 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); 1089 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1054 1090
1055 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); 1091 COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next));
1056} 1092}
1057 1093
1058extern void perf_event_mmap(struct vm_area_struct *vma); 1094extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1083,6 +1119,10 @@ extern int sysctl_perf_event_paranoid;
1083extern int sysctl_perf_event_mlock; 1119extern int sysctl_perf_event_mlock;
1084extern int sysctl_perf_event_sample_rate; 1120extern int sysctl_perf_event_sample_rate;
1085 1121
1122extern int perf_proc_update_handler(struct ctl_table *table, int write,
1123 void __user *buffer, size_t *lenp,
1124 loff_t *ppos);
1125
1086static inline bool perf_paranoid_tracepoint_raw(void) 1126static inline bool perf_paranoid_tracepoint_raw(void)
1087{ 1127{
1088 return sysctl_perf_event_paranoid > -1; 1128 return sysctl_perf_event_paranoid > -1;
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 7254eda078e5..c9b9f322c8d8 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -31,15 +31,17 @@
31 * 31 *
32 * Simple ASCII art explanation: 32 * Simple ASCII art explanation:
33 * 33 *
34 * |HEAD | 34 * pl:prio_list (only for plist_node)
35 * | | 35 * nl:node_list
36 * |prio_list.prev|<------------------------------------| 36 * HEAD| NODE(S)
37 * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-| 37 * |
38 * |10 | |10| |21| |21| |21| |40| (prio) 38 * ||------------------------------------|
39 * | | | | | | | | | | | | 39 * ||->|pl|<->|pl|<--------------->|pl|<-|
40 * | | | | | | | | | | | | 40 * | |10| |21| |21| |21| |40| (prio)
41 * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| 41 * | | | | | | | | | | |
42 * |node_list.prev|<------------------------------------| 42 * | | | | | | | | | | |
43 * |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
44 * |-------------------------------------------|
43 * 45 *
44 * The nodes on the prio_list list are sorted by priority to simplify 46 * The nodes on the prio_list list are sorted by priority to simplify
45 * the insertion of new nodes. There are no nodes with duplicate 47 * the insertion of new nodes. There are no nodes with duplicate
@@ -78,7 +80,6 @@
78#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
79 81
80struct plist_head { 82struct plist_head {
81 struct list_head prio_list;
82 struct list_head node_list; 83 struct list_head node_list;
83#ifdef CONFIG_DEBUG_PI_LIST 84#ifdef CONFIG_DEBUG_PI_LIST
84 raw_spinlock_t *rawlock; 85 raw_spinlock_t *rawlock;
@@ -88,7 +89,8 @@ struct plist_head {
88 89
89struct plist_node { 90struct plist_node {
90 int prio; 91 int prio;
91 struct plist_head plist; 92 struct list_head prio_list;
93 struct list_head node_list;
92}; 94};
93 95
94#ifdef CONFIG_DEBUG_PI_LIST 96#ifdef CONFIG_DEBUG_PI_LIST
@@ -100,7 +102,6 @@ struct plist_node {
100#endif 102#endif
101 103
102#define _PLIST_HEAD_INIT(head) \ 104#define _PLIST_HEAD_INIT(head) \
103 .prio_list = LIST_HEAD_INIT((head).prio_list), \
104 .node_list = LIST_HEAD_INIT((head).node_list) 105 .node_list = LIST_HEAD_INIT((head).node_list)
105 106
106/** 107/**
@@ -133,7 +134,8 @@ struct plist_node {
133#define PLIST_NODE_INIT(node, __prio) \ 134#define PLIST_NODE_INIT(node, __prio) \
134{ \ 135{ \
135 .prio = (__prio), \ 136 .prio = (__prio), \
136 .plist = { _PLIST_HEAD_INIT((node).plist) }, \ 137 .prio_list = LIST_HEAD_INIT((node).prio_list), \
138 .node_list = LIST_HEAD_INIT((node).node_list), \
137} 139}
138 140
139/** 141/**
@@ -144,7 +146,6 @@ struct plist_node {
144static inline void 146static inline void
145plist_head_init(struct plist_head *head, spinlock_t *lock) 147plist_head_init(struct plist_head *head, spinlock_t *lock)
146{ 148{
147 INIT_LIST_HEAD(&head->prio_list);
148 INIT_LIST_HEAD(&head->node_list); 149 INIT_LIST_HEAD(&head->node_list);
149#ifdef CONFIG_DEBUG_PI_LIST 150#ifdef CONFIG_DEBUG_PI_LIST
150 head->spinlock = lock; 151 head->spinlock = lock;
@@ -160,7 +161,6 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
160static inline void 161static inline void
161plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) 162plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
162{ 163{
163 INIT_LIST_HEAD(&head->prio_list);
164 INIT_LIST_HEAD(&head->node_list); 164 INIT_LIST_HEAD(&head->node_list);
165#ifdef CONFIG_DEBUG_PI_LIST 165#ifdef CONFIG_DEBUG_PI_LIST
166 head->rawlock = lock; 166 head->rawlock = lock;
@@ -176,7 +176,8 @@ plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
176static inline void plist_node_init(struct plist_node *node, int prio) 176static inline void plist_node_init(struct plist_node *node, int prio)
177{ 177{
178 node->prio = prio; 178 node->prio = prio;
179 plist_head_init(&node->plist, NULL); 179 INIT_LIST_HEAD(&node->prio_list);
180 INIT_LIST_HEAD(&node->node_list);
180} 181}
181 182
182extern void plist_add(struct plist_node *node, struct plist_head *head); 183extern void plist_add(struct plist_node *node, struct plist_head *head);
@@ -188,7 +189,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
188 * @head: the head for your list 189 * @head: the head for your list
189 */ 190 */
190#define plist_for_each(pos, head) \ 191#define plist_for_each(pos, head) \
191 list_for_each_entry(pos, &(head)->node_list, plist.node_list) 192 list_for_each_entry(pos, &(head)->node_list, node_list)
192 193
193/** 194/**
194 * plist_for_each_safe - iterate safely over a plist of given type 195 * plist_for_each_safe - iterate safely over a plist of given type
@@ -199,7 +200,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
199 * Iterate over a plist of given type, safe against removal of list entry. 200 * Iterate over a plist of given type, safe against removal of list entry.
200 */ 201 */
201#define plist_for_each_safe(pos, n, head) \ 202#define plist_for_each_safe(pos, n, head) \
202 list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) 203 list_for_each_entry_safe(pos, n, &(head)->node_list, node_list)
203 204
204/** 205/**
205 * plist_for_each_entry - iterate over list of given type 206 * plist_for_each_entry - iterate over list of given type
@@ -208,7 +209,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
208 * @mem: the name of the list_struct within the struct 209 * @mem: the name of the list_struct within the struct
209 */ 210 */
210#define plist_for_each_entry(pos, head, mem) \ 211#define plist_for_each_entry(pos, head, mem) \
211 list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) 212 list_for_each_entry(pos, &(head)->node_list, mem.node_list)
212 213
213/** 214/**
214 * plist_for_each_entry_safe - iterate safely over list of given type 215 * plist_for_each_entry_safe - iterate safely over list of given type
@@ -220,7 +221,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
220 * Iterate over list of given type, safe against removal of list entry. 221 * Iterate over list of given type, safe against removal of list entry.
221 */ 222 */
222#define plist_for_each_entry_safe(pos, n, head, m) \ 223#define plist_for_each_entry_safe(pos, n, head, m) \
223 list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) 224 list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list)
224 225
225/** 226/**
226 * plist_head_empty - return !0 if a plist_head is empty 227 * plist_head_empty - return !0 if a plist_head is empty
@@ -237,7 +238,7 @@ static inline int plist_head_empty(const struct plist_head *head)
237 */ 238 */
238static inline int plist_node_empty(const struct plist_node *node) 239static inline int plist_node_empty(const struct plist_node *node)
239{ 240{
240 return plist_head_empty(&node->plist); 241 return list_empty(&node->node_list);
241} 242}
242 243
243/* All functions below assume the plist_head is not empty. */ 244/* All functions below assume the plist_head is not empty. */
@@ -285,7 +286,7 @@ static inline int plist_node_empty(const struct plist_node *node)
285static inline struct plist_node *plist_first(const struct plist_head *head) 286static inline struct plist_node *plist_first(const struct plist_head *head)
286{ 287{
287 return list_entry(head->node_list.next, 288 return list_entry(head->node_list.next,
288 struct plist_node, plist.node_list); 289 struct plist_node, node_list);
289} 290}
290 291
291/** 292/**
@@ -297,7 +298,7 @@ static inline struct plist_node *plist_first(const struct plist_head *head)
297static inline struct plist_node *plist_last(const struct plist_head *head) 298static inline struct plist_node *plist_last(const struct plist_head *head)
298{ 299{
299 return list_entry(head->node_list.prev, 300 return list_entry(head->node_list.prev,
300 struct plist_node, plist.node_list); 301 struct plist_node, node_list);
301} 302}
302 303
303#endif 304#endif
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
new file mode 100644
index 000000000000..369e19d3750b
--- /dev/null
+++ b/include/linux/posix-clock.h
@@ -0,0 +1,150 @@
1/*
2 * posix-clock.h - support for dynamic clock devices
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#ifndef _LINUX_POSIX_CLOCK_H_
21#define _LINUX_POSIX_CLOCK_H_
22
23#include <linux/cdev.h>
24#include <linux/fs.h>
25#include <linux/poll.h>
26#include <linux/posix-timers.h>
27
28struct posix_clock;
29
30/**
31 * struct posix_clock_operations - functional interface to the clock
32 *
33 * Every posix clock is represented by a character device. Drivers may
34 * optionally offer extended capabilities by implementing the
35 * character device methods. The character device file operations are
36 * first handled by the clock device layer, then passed on to the
37 * driver by calling these functions.
38 *
39 * @owner: The clock driver should set to THIS_MODULE
40 * @clock_adjtime: Adjust the clock
41 * @clock_gettime: Read the current time
42 * @clock_getres: Get the clock resolution
43 * @clock_settime: Set the current time value
44 * @timer_create: Create a new timer
45 * @timer_delete: Remove a previously created timer
46 * @timer_gettime: Get remaining time and interval of a timer
47 * @timer_setttime: Set a timer's initial expiration and interval
48 * @fasync: Optional character device fasync method
49 * @mmap: Optional character device mmap method
50 * @open: Optional character device open method
51 * @release: Optional character device release method
52 * @ioctl: Optional character device ioctl method
53 * @read: Optional character device read method
54 * @poll: Optional character device poll method
55 */
56struct posix_clock_operations {
57 struct module *owner;
58
59 int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
60
61 int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts);
62
63 int (*clock_getres) (struct posix_clock *pc, struct timespec *ts);
64
65 int (*clock_settime)(struct posix_clock *pc,
66 const struct timespec *ts);
67
68 int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
69
70 int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
71
72 void (*timer_gettime)(struct posix_clock *pc,
73 struct k_itimer *kit, struct itimerspec *tsp);
74
75 int (*timer_settime)(struct posix_clock *pc,
76 struct k_itimer *kit, int flags,
77 struct itimerspec *tsp, struct itimerspec *old);
78 /*
79 * Optional character device methods:
80 */
81 int (*fasync) (struct posix_clock *pc,
82 int fd, struct file *file, int on);
83
84 long (*ioctl) (struct posix_clock *pc,
85 unsigned int cmd, unsigned long arg);
86
87 int (*mmap) (struct posix_clock *pc,
88 struct vm_area_struct *vma);
89
90 int (*open) (struct posix_clock *pc, fmode_t f_mode);
91
92 uint (*poll) (struct posix_clock *pc,
93 struct file *file, poll_table *wait);
94
95 int (*release) (struct posix_clock *pc);
96
97 ssize_t (*read) (struct posix_clock *pc,
98 uint flags, char __user *buf, size_t cnt);
99};
100
101/**
102 * struct posix_clock - represents a dynamic posix clock
103 *
104 * @ops: Functional interface to the clock
105 * @cdev: Character device instance for this clock
106 * @kref: Reference count.
107 * @mutex: Protects the 'zombie' field from concurrent access.
108 * @zombie: If 'zombie' is true, then the hardware has disappeared.
109 * @release: A function to free the structure when the reference count reaches
110 * zero. May be NULL if structure is statically allocated.
111 *
112 * Drivers should embed their struct posix_clock within a private
113 * structure, obtaining a reference to it during callbacks using
114 * container_of().
115 */
116struct posix_clock {
117 struct posix_clock_operations ops;
118 struct cdev cdev;
119 struct kref kref;
120 struct mutex mutex;
121 bool zombie;
122 void (*release)(struct posix_clock *clk);
123};
124
125/**
126 * posix_clock_register() - register a new clock
127 * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
128 * @devid: Allocated device id
129 *
130 * A clock driver calls this function to register itself with the
131 * clock device subsystem. If 'clk' points to dynamically allocated
132 * memory, then the caller must provide a 'release' function to free
133 * that memory.
134 *
135 * Returns zero on success, non-zero otherwise.
136 */
137int posix_clock_register(struct posix_clock *clk, dev_t devid);
138
139/**
140 * posix_clock_unregister() - unregister a clock
141 * @clk: Clock instance previously registered via posix_clock_register()
142 *
143 * A clock driver calls this function to remove itself from the clock
144 * device subsystem. The posix_clock itself will remain (in an
145 * inactive state) until its reference count drops to zero, at which
146 * point it will be deallocated with its 'release' method.
147 */
148void posix_clock_unregister(struct posix_clock *clk);
149
150#endif
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 3e23844a6990..d51243ae0726 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -4,6 +4,7 @@
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/timex.h>
7 8
8union cpu_time_count { 9union cpu_time_count {
9 cputime_t cpu; 10 cputime_t cpu;
@@ -17,10 +18,21 @@ struct cpu_timer_list {
17 int firing; 18 int firing;
18}; 19};
19 20
21/*
22 * Bit fields within a clockid:
23 *
24 * The most significant 29 bits hold either a pid or a file descriptor.
25 *
26 * Bit 2 indicates whether a cpu clock refers to a thread or a process.
27 *
28 * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
29 *
30 * A clockid is invalid if bits 2, 1, and 0 are all set.
31 */
20#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) 32#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
21#define CPUCLOCK_PERTHREAD(clock) \ 33#define CPUCLOCK_PERTHREAD(clock) \
22 (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) 34 (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
23#define CPUCLOCK_PID_MASK 7 35
24#define CPUCLOCK_PERTHREAD_MASK 4 36#define CPUCLOCK_PERTHREAD_MASK 4
25#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) 37#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
26#define CPUCLOCK_CLOCK_MASK 3 38#define CPUCLOCK_CLOCK_MASK 3
@@ -28,12 +40,17 @@ struct cpu_timer_list {
28#define CPUCLOCK_VIRT 1 40#define CPUCLOCK_VIRT 1
29#define CPUCLOCK_SCHED 2 41#define CPUCLOCK_SCHED 2
30#define CPUCLOCK_MAX 3 42#define CPUCLOCK_MAX 3
43#define CLOCKFD CPUCLOCK_MAX
44#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
31 45
32#define MAKE_PROCESS_CPUCLOCK(pid, clock) \ 46#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
33 ((~(clockid_t) (pid) << 3) | (clockid_t) (clock)) 47 ((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
34#define MAKE_THREAD_CPUCLOCK(tid, clock) \ 48#define MAKE_THREAD_CPUCLOCK(tid, clock) \
35 MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK) 49 MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
36 50
51#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
52#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
53
37/* POSIX.1b interval timer structure. */ 54/* POSIX.1b interval timer structure. */
38struct k_itimer { 55struct k_itimer {
39 struct list_head list; /* free/ allocate list */ 56 struct list_head list; /* free/ allocate list */
@@ -67,10 +84,11 @@ struct k_itimer {
67}; 84};
68 85
69struct k_clock { 86struct k_clock {
70 int res; /* in nanoseconds */
71 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp); 87 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
72 int (*clock_set) (const clockid_t which_clock, struct timespec * tp); 88 int (*clock_set) (const clockid_t which_clock,
89 const struct timespec *tp);
73 int (*clock_get) (const clockid_t which_clock, struct timespec * tp); 90 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
91 int (*clock_adj) (const clockid_t which_clock, struct timex *tx);
74 int (*timer_create) (struct k_itimer *timer); 92 int (*timer_create) (struct k_itimer *timer);
75 int (*nsleep) (const clockid_t which_clock, int flags, 93 int (*nsleep) (const clockid_t which_clock, int flags,
76 struct timespec *, struct timespec __user *); 94 struct timespec *, struct timespec __user *);
@@ -84,28 +102,14 @@ struct k_clock {
84 struct itimerspec * cur_setting); 102 struct itimerspec * cur_setting);
85}; 103};
86 104
87void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock); 105extern struct k_clock clock_posix_cpu;
106extern struct k_clock clock_posix_dynamic;
88 107
89/* error handlers for timer_create, nanosleep and settime */ 108void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock);
90int do_posix_clock_nonanosleep(const clockid_t, int flags, struct timespec *,
91 struct timespec __user *);
92int do_posix_clock_nosettime(const clockid_t, struct timespec *tp);
93 109
94/* function to call to trigger timer event */ 110/* function to call to trigger timer event */
95int posix_timer_event(struct k_itimer *timr, int si_private); 111int posix_timer_event(struct k_itimer *timr, int si_private);
96 112
97int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *ts);
98int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *ts);
99int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *ts);
100int posix_cpu_timer_create(struct k_itimer *timer);
101int posix_cpu_nsleep(const clockid_t which_clock, int flags,
102 struct timespec *rqtp, struct timespec __user *rmtp);
103long posix_cpu_nsleep_restart(struct restart_block *restart_block);
104int posix_cpu_timer_set(struct k_itimer *timer, int flags,
105 struct itimerspec *new, struct itimerspec *old);
106int posix_cpu_timer_del(struct k_itimer *timer);
107void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp);
108
109void posix_cpu_timer_schedule(struct k_itimer *timer); 113void posix_cpu_timer_schedule(struct k_itimer *timer);
110 114
111void run_posix_cpu_timers(struct task_struct *task); 115void run_posix_cpu_timers(struct task_struct *task);
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 8d3a2486544d..ab38ac80b0f9 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -100,6 +100,8 @@ void ring_buffer_free(struct ring_buffer *buffer);
100 100
101int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); 101int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
102 102
103void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
104
103struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, 105struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
104 unsigned long length); 106 unsigned long length);
105int ring_buffer_unlock_commit(struct ring_buffer *buffer, 107int ring_buffer_unlock_commit(struct ring_buffer *buffer,
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 89c3e5182991..2ca7e8a78060 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -133,7 +133,6 @@ extern struct class *rtc_class;
133 * The (current) exceptions are mostly filesystem hooks: 133 * The (current) exceptions are mostly filesystem hooks:
134 * - the proc() hook for procfs 134 * - the proc() hook for procfs
135 * - non-ioctl() chardev hooks: open(), release(), read_callback() 135 * - non-ioctl() chardev hooks: open(), release(), read_callback()
136 * - periodic irq calls: irq_set_state(), irq_set_freq()
137 * 136 *
138 * REVISIT those periodic irq calls *do* have ops_lock when they're 137 * REVISIT those periodic irq calls *do* have ops_lock when they're
139 * issued through ioctl() ... 138 * issued through ioctl() ...
@@ -148,11 +147,8 @@ struct rtc_class_ops {
148 int (*set_alarm)(struct device *, struct rtc_wkalrm *); 147 int (*set_alarm)(struct device *, struct rtc_wkalrm *);
149 int (*proc)(struct device *, struct seq_file *); 148 int (*proc)(struct device *, struct seq_file *);
150 int (*set_mmss)(struct device *, unsigned long secs); 149 int (*set_mmss)(struct device *, unsigned long secs);
151 int (*irq_set_state)(struct device *, int enabled);
152 int (*irq_set_freq)(struct device *, int freq);
153 int (*read_callback)(struct device *, int data); 150 int (*read_callback)(struct device *, int data);
154 int (*alarm_irq_enable)(struct device *, unsigned int enabled); 151 int (*alarm_irq_enable)(struct device *, unsigned int enabled);
155 int (*update_irq_enable)(struct device *, unsigned int enabled);
156}; 152};
157 153
158#define RTC_DEVICE_NAME_SIZE 20 154#define RTC_DEVICE_NAME_SIZE 20
@@ -227,6 +223,7 @@ extern void rtc_device_unregister(struct rtc_device *rtc);
227extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); 223extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
228extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); 224extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
229extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); 225extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
226int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
230extern int rtc_read_alarm(struct rtc_device *rtc, 227extern int rtc_read_alarm(struct rtc_device *rtc,
231 struct rtc_wkalrm *alrm); 228 struct rtc_wkalrm *alrm);
232extern int rtc_set_alarm(struct rtc_device *rtc, 229extern int rtc_set_alarm(struct rtc_device *rtc,
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index bd31808c7d8e..cc0072e93e36 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -43,14 +43,6 @@ typedef struct {
43 RW_DEP_MAP_INIT(lockname) } 43 RW_DEP_MAP_INIT(lockname) }
44#endif 44#endif
45 45
46/*
47 * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
48 * deprecated.
49 *
50 * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
51 */
52#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
53
54#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) 46#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
55 47
56#endif /* __LINUX_RWLOCK_TYPES_H */ 48#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index bdfcc2527970..34701241b673 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -12,15 +12,7 @@
12#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" 12#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
13#endif 13#endif
14 14
15#include <linux/spinlock.h>
16#include <linux/list.h>
17
18#ifdef __KERNEL__ 15#ifdef __KERNEL__
19
20#include <linux/types.h>
21
22struct rwsem_waiter;
23
24/* 16/*
25 * the rw-semaphore definition 17 * the rw-semaphore definition
26 * - if activity is 0 then there are no active readers or writers 18 * - if activity is 0 then there are no active readers or writers
@@ -37,28 +29,7 @@ struct rw_semaphore {
37#endif 29#endif
38}; 30};
39 31
40#ifdef CONFIG_DEBUG_LOCK_ALLOC 32#define RWSEM_UNLOCKED_VALUE 0x00000000
41# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
42#else
43# define __RWSEM_DEP_MAP_INIT(lockname)
44#endif
45
46#define __RWSEM_INITIALIZER(name) \
47{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
48 __RWSEM_DEP_MAP_INIT(name) }
49
50#define DECLARE_RWSEM(name) \
51 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
52
53extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
54 struct lock_class_key *key);
55
56#define init_rwsem(sem) \
57do { \
58 static struct lock_class_key __key; \
59 \
60 __init_rwsem((sem), #sem, &__key); \
61} while (0)
62 33
63extern void __down_read(struct rw_semaphore *sem); 34extern void __down_read(struct rw_semaphore *sem);
64extern int __down_read_trylock(struct rw_semaphore *sem); 35extern int __down_read_trylock(struct rw_semaphore *sem);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index efd348fe8ca7..a8afe9cd000c 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -11,6 +11,9 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h>
15#include <linux/spinlock.h>
16
14#include <asm/system.h> 17#include <asm/system.h>
15#include <asm/atomic.h> 18#include <asm/atomic.h>
16 19
@@ -19,9 +22,57 @@ struct rw_semaphore;
19#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 22#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
20#include <linux/rwsem-spinlock.h> /* use a generic implementation */ 23#include <linux/rwsem-spinlock.h> /* use a generic implementation */
21#else 24#else
22#include <asm/rwsem.h> /* use an arch-specific implementation */ 25/* All arch specific implementations share the same struct */
26struct rw_semaphore {
27 long count;
28 spinlock_t wait_lock;
29 struct list_head wait_list;
30#ifdef CONFIG_DEBUG_LOCK_ALLOC
31 struct lockdep_map dep_map;
32#endif
33};
34
35extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
36extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
37extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
38extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
39
40/* Include the arch specific part */
41#include <asm/rwsem.h>
42
43/* In all implementations count != 0 means locked */
44static inline int rwsem_is_locked(struct rw_semaphore *sem)
45{
46 return sem->count != 0;
47}
48
49#endif
50
51/* Common initializer macros and functions */
52
53#ifdef CONFIG_DEBUG_LOCK_ALLOC
54# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
55#else
56# define __RWSEM_DEP_MAP_INIT(lockname)
23#endif 57#endif
24 58
59#define __RWSEM_INITIALIZER(name) \
60 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
61 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
62
63#define DECLARE_RWSEM(name) \
64 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
65
66extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
67 struct lock_class_key *key);
68
69#define init_rwsem(sem) \
70do { \
71 static struct lock_class_key __key; \
72 \
73 __init_rwsem((sem), #sem, &__key); \
74} while (0)
75
25/* 76/*
26 * lock for reading 77 * lock for reading
27 */ 78 */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 777d8a5ed06b..c15936fe998b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1058,6 +1058,7 @@ struct sched_class {
1058 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1058 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1059 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1059 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1060 void (*yield_task) (struct rq *rq); 1060 void (*yield_task) (struct rq *rq);
1061 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1061 1062
1062 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1063 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1063 1064
@@ -1084,12 +1085,10 @@ struct sched_class {
1084 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1085 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1085 void (*task_fork) (struct task_struct *p); 1086 void (*task_fork) (struct task_struct *p);
1086 1087
1087 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 1088 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1088 int running); 1089 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1089 void (*switched_to) (struct rq *this_rq, struct task_struct *task,
1090 int running);
1091 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1090 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1092 int oldprio, int running); 1091 int oldprio);
1093 1092
1094 unsigned int (*get_rr_interval) (struct rq *rq, 1093 unsigned int (*get_rr_interval) (struct rq *rq,
1095 struct task_struct *task); 1094 struct task_struct *task);
@@ -1715,7 +1714,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1715/* 1714/*
1716 * Per process flags 1715 * Per process flags
1717 */ 1716 */
1718#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
1719#define PF_STARTING 0x00000002 /* being created */ 1717#define PF_STARTING 0x00000002 /* being created */
1720#define PF_EXITING 0x00000004 /* getting shut down */ 1718#define PF_EXITING 0x00000004 /* getting shut down */
1721#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1719#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1945,8 +1943,6 @@ int sched_rt_handler(struct ctl_table *table, int write,
1945 void __user *buffer, size_t *lenp, 1943 void __user *buffer, size_t *lenp,
1946 loff_t *ppos); 1944 loff_t *ppos);
1947 1945
1948extern unsigned int sysctl_sched_compat_yield;
1949
1950#ifdef CONFIG_SCHED_AUTOGROUP 1946#ifdef CONFIG_SCHED_AUTOGROUP
1951extern unsigned int sysctl_sched_autogroup_enabled; 1947extern unsigned int sysctl_sched_autogroup_enabled;
1952 1948
@@ -1977,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p)
1977# define rt_mutex_adjust_pi(p) do { } while (0) 1973# define rt_mutex_adjust_pi(p) do { } while (0)
1978#endif 1974#endif
1979 1975
1976extern bool yield_to(struct task_struct *p, bool preempt);
1980extern void set_user_nice(struct task_struct *p, long nice); 1977extern void set_user_nice(struct task_struct *p, long nice);
1981extern int task_prio(const struct task_struct *p); 1978extern int task_prio(const struct task_struct *p);
1982extern int task_nice(const struct task_struct *p); 1979extern int task_nice(const struct task_struct *p);
@@ -2049,7 +2046,7 @@ extern void release_uids(struct user_namespace *ns);
2049 2046
2050#include <asm/current.h> 2047#include <asm/current.h>
2051 2048
2052extern void do_timer(unsigned long ticks); 2049extern void xtime_update(unsigned long ticks);
2053 2050
2054extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2051extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2055extern int wake_up_process(struct task_struct *tsk); 2052extern int wake_up_process(struct task_struct *tsk);
@@ -2578,13 +2575,6 @@ static inline void inc_syscw(struct task_struct *tsk)
2578#define TASK_SIZE_OF(tsk) TASK_SIZE 2575#define TASK_SIZE_OF(tsk) TASK_SIZE
2579#endif 2576#endif
2580 2577
2581/*
2582 * Call the function if the target task is executing on a CPU right now:
2583 */
2584extern void task_oncpu_function_call(struct task_struct *p,
2585 void (*func) (void *info), void *info);
2586
2587
2588#ifdef CONFIG_MM_OWNER 2578#ifdef CONFIG_MM_OWNER
2589extern void mm_update_next_owner(struct mm_struct *mm); 2579extern void mm_update_next_owner(struct mm_struct *mm);
2590extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2580extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
diff --git a/include/linux/security.h b/include/linux/security.h
index b2b7f9749f5e..debbd97db7ab 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -53,7 +53,7 @@ struct audit_krule;
53 */ 53 */
54extern int cap_capable(struct task_struct *tsk, const struct cred *cred, 54extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
55 int cap, int audit); 55 int cap, int audit);
56extern int cap_settime(struct timespec *ts, struct timezone *tz); 56extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
57extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); 57extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
58extern int cap_ptrace_traceme(struct task_struct *parent); 58extern int cap_ptrace_traceme(struct task_struct *parent);
59extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); 59extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -1387,7 +1387,7 @@ struct security_operations {
1387 int (*quotactl) (int cmds, int type, int id, struct super_block *sb); 1387 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
1388 int (*quota_on) (struct dentry *dentry); 1388 int (*quota_on) (struct dentry *dentry);
1389 int (*syslog) (int type); 1389 int (*syslog) (int type);
1390 int (*settime) (struct timespec *ts, struct timezone *tz); 1390 int (*settime) (const struct timespec *ts, const struct timezone *tz);
1391 int (*vm_enough_memory) (struct mm_struct *mm, long pages); 1391 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
1392 1392
1393 int (*bprm_set_creds) (struct linux_binprm *bprm); 1393 int (*bprm_set_creds) (struct linux_binprm *bprm);
@@ -1669,7 +1669,7 @@ int security_sysctl(struct ctl_table *table, int op);
1669int security_quotactl(int cmds, int type, int id, struct super_block *sb); 1669int security_quotactl(int cmds, int type, int id, struct super_block *sb);
1670int security_quota_on(struct dentry *dentry); 1670int security_quota_on(struct dentry *dentry);
1671int security_syslog(int type); 1671int security_syslog(int type);
1672int security_settime(struct timespec *ts, struct timezone *tz); 1672int security_settime(const struct timespec *ts, const struct timezone *tz);
1673int security_vm_enough_memory(long pages); 1673int security_vm_enough_memory(long pages);
1674int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); 1674int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
1675int security_vm_enough_memory_kern(long pages); 1675int security_vm_enough_memory_kern(long pages);
@@ -1904,7 +1904,8 @@ static inline int security_syslog(int type)
1904 return 0; 1904 return 0;
1905} 1905}
1906 1906
1907static inline int security_settime(struct timespec *ts, struct timezone *tz) 1907static inline int security_settime(const struct timespec *ts,
1908 const struct timezone *tz)
1908{ 1909{
1909 return cap_settime(ts, tz); 1910 return cap_settime(ts, tz);
1910} 1911}
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 851b7783720d..73548eb13a5d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -81,14 +81,6 @@ typedef struct spinlock {
81#define __SPIN_LOCK_UNLOCKED(lockname) \ 81#define __SPIN_LOCK_UNLOCKED(lockname) \
82 (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) 82 (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
83 83
84/*
85 * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
86 * deprecated.
87 * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
88 * appropriate.
89 */
90#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
91
92#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) 84#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
93 85
94#include <linux/rwlock_types.h> 86#include <linux/rwlock_types.h>
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 98664db1be47..1f5c18e6f4f1 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -62,6 +62,7 @@ struct robust_list_head;
62struct getcpu_cache; 62struct getcpu_cache;
63struct old_linux_dirent; 63struct old_linux_dirent;
64struct perf_event_attr; 64struct perf_event_attr;
65struct file_handle;
65 66
66#include <linux/types.h> 67#include <linux/types.h>
67#include <linux/aio_abi.h> 68#include <linux/aio_abi.h>
@@ -132,11 +133,11 @@ extern struct trace_event_functions exit_syscall_print_funcs;
132 .class = &event_class_syscall_enter, \ 133 .class = &event_class_syscall_enter, \
133 .event.funcs = &enter_syscall_print_funcs, \ 134 .event.funcs = &enter_syscall_print_funcs, \
134 .data = (void *)&__syscall_meta_##sname,\ 135 .data = (void *)&__syscall_meta_##sname,\
136 .flags = TRACE_EVENT_FL_CAP_ANY, \
135 }; \ 137 }; \
136 static struct ftrace_event_call __used \ 138 static struct ftrace_event_call __used \
137 __attribute__((section("_ftrace_events"))) \ 139 __attribute__((section("_ftrace_events"))) \
138 *__event_enter_##sname = &event_enter_##sname; \ 140 *__event_enter_##sname = &event_enter_##sname;
139 __TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
140 141
141#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 142#define SYSCALL_TRACE_EXIT_EVENT(sname) \
142 static struct syscall_metadata __syscall_meta_##sname; \ 143 static struct syscall_metadata __syscall_meta_##sname; \
@@ -146,11 +147,11 @@ extern struct trace_event_functions exit_syscall_print_funcs;
146 .class = &event_class_syscall_exit, \ 147 .class = &event_class_syscall_exit, \
147 .event.funcs = &exit_syscall_print_funcs, \ 148 .event.funcs = &exit_syscall_print_funcs, \
148 .data = (void *)&__syscall_meta_##sname,\ 149 .data = (void *)&__syscall_meta_##sname,\
150 .flags = TRACE_EVENT_FL_CAP_ANY, \
149 }; \ 151 }; \
150 static struct ftrace_event_call __used \ 152 static struct ftrace_event_call __used \
151 __attribute__((section("_ftrace_events"))) \ 153 __attribute__((section("_ftrace_events"))) \
152 *__event_exit_##sname = &event_exit_##sname; \ 154 *__event_exit_##sname = &event_exit_##sname;
153 __TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
154 155
155#define SYSCALL_METADATA(sname, nb) \ 156#define SYSCALL_METADATA(sname, nb) \
156 SYSCALL_TRACE_ENTER_EVENT(sname); \ 157 SYSCALL_TRACE_ENTER_EVENT(sname); \
@@ -158,6 +159,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
158 static struct syscall_metadata __used \ 159 static struct syscall_metadata __used \
159 __syscall_meta_##sname = { \ 160 __syscall_meta_##sname = { \
160 .name = "sys"#sname, \ 161 .name = "sys"#sname, \
162 .syscall_nr = -1, /* Filled in at boot */ \
161 .nb_args = nb, \ 163 .nb_args = nb, \
162 .types = types_##sname, \ 164 .types = types_##sname, \
163 .args = args_##sname, \ 165 .args = args_##sname, \
@@ -175,6 +177,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
175 static struct syscall_metadata __used \ 177 static struct syscall_metadata __used \
176 __syscall_meta__##sname = { \ 178 __syscall_meta__##sname = { \
177 .name = "sys_"#sname, \ 179 .name = "sys_"#sname, \
180 .syscall_nr = -1, /* Filled in at boot */ \
178 .nb_args = 0, \ 181 .nb_args = 0, \
179 .enter_event = &event_enter__##sname, \ 182 .enter_event = &event_enter__##sname, \
180 .exit_event = &event_exit__##sname, \ 183 .exit_event = &event_exit__##sname, \
@@ -313,6 +316,8 @@ asmlinkage long sys_clock_settime(clockid_t which_clock,
313 const struct timespec __user *tp); 316 const struct timespec __user *tp);
314asmlinkage long sys_clock_gettime(clockid_t which_clock, 317asmlinkage long sys_clock_gettime(clockid_t which_clock,
315 struct timespec __user *tp); 318 struct timespec __user *tp);
319asmlinkage long sys_clock_adjtime(clockid_t which_clock,
320 struct timex __user *tx);
316asmlinkage long sys_clock_getres(clockid_t which_clock, 321asmlinkage long sys_clock_getres(clockid_t which_clock,
317 struct timespec __user *tp); 322 struct timespec __user *tp);
318asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, 323asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags,
@@ -832,5 +837,10 @@ asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
832 unsigned long prot, unsigned long flags, 837 unsigned long prot, unsigned long flags,
833 unsigned long fd, unsigned long pgoff); 838 unsigned long fd, unsigned long pgoff);
834asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg); 839asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg);
835 840asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
841 struct file_handle __user *handle,
842 int __user *mnt_id, int flag);
843asmlinkage long sys_open_by_handle_at(int mountdirfd,
844 struct file_handle __user *handle,
845 int flags);
836#endif 846#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index c90696544176..20fc303947d3 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -18,9 +18,6 @@ struct compat_timespec;
18struct restart_block { 18struct restart_block {
19 long (*fn)(struct restart_block *); 19 long (*fn)(struct restart_block *);
20 union { 20 union {
21 struct {
22 unsigned long arg0, arg1, arg2, arg3;
23 };
24 /* For futex_wait and futex_wait_requeue_pi */ 21 /* For futex_wait and futex_wait_requeue_pi */
25 struct { 22 struct {
26 u32 __user *uaddr; 23 u32 __user *uaddr;
diff --git a/include/linux/time.h b/include/linux/time.h
index 1e6d3b59238d..454a26205787 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -113,8 +113,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
113#define timespec_valid(ts) \ 113#define timespec_valid(ts) \
114 (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) 114 (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
115 115
116extern seqlock_t xtime_lock;
117
118extern void read_persistent_clock(struct timespec *ts); 116extern void read_persistent_clock(struct timespec *ts);
119extern void read_boot_clock(struct timespec *ts); 117extern void read_boot_clock(struct timespec *ts);
120extern int update_persistent_clock(struct timespec now); 118extern int update_persistent_clock(struct timespec now);
@@ -125,8 +123,9 @@ extern int timekeeping_suspended;
125unsigned long get_seconds(void); 123unsigned long get_seconds(void);
126struct timespec current_kernel_time(void); 124struct timespec current_kernel_time(void);
127struct timespec __current_kernel_time(void); /* does not take xtime_lock */ 125struct timespec __current_kernel_time(void); /* does not take xtime_lock */
128struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */
129struct timespec get_monotonic_coarse(void); 126struct timespec get_monotonic_coarse(void);
127void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
128 struct timespec *wtom, struct timespec *sleep);
130 129
131#define CURRENT_TIME (current_kernel_time()) 130#define CURRENT_TIME (current_kernel_time())
132#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) 131#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
@@ -147,8 +146,9 @@ static inline u32 arch_gettimeoffset(void) { return 0; }
147#endif 146#endif
148 147
149extern void do_gettimeofday(struct timeval *tv); 148extern void do_gettimeofday(struct timeval *tv);
150extern int do_settimeofday(struct timespec *tv); 149extern int do_settimeofday(const struct timespec *tv);
151extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); 150extern int do_sys_settimeofday(const struct timespec *tv,
151 const struct timezone *tz);
152#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) 152#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
153extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); 153extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
154struct itimerval; 154struct itimerval;
@@ -162,12 +162,13 @@ extern void getnstime_raw_and_real(struct timespec *ts_raw,
162 struct timespec *ts_real); 162 struct timespec *ts_real);
163extern void getboottime(struct timespec *ts); 163extern void getboottime(struct timespec *ts);
164extern void monotonic_to_bootbased(struct timespec *ts); 164extern void monotonic_to_bootbased(struct timespec *ts);
165extern void get_monotonic_boottime(struct timespec *ts);
165 166
166extern struct timespec timespec_trunc(struct timespec t, unsigned gran); 167extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
167extern int timekeeping_valid_for_hres(void); 168extern int timekeeping_valid_for_hres(void);
168extern u64 timekeeping_max_deferment(void); 169extern u64 timekeeping_max_deferment(void);
169extern void update_wall_time(void);
170extern void timekeeping_leap_insert(int leapsecond); 170extern void timekeeping_leap_insert(int leapsecond);
171extern int timekeeping_inject_offset(struct timespec *ts);
171 172
172struct tms; 173struct tms;
173extern void do_sys_times(struct tms *); 174extern void do_sys_times(struct tms *);
@@ -292,6 +293,7 @@ struct itimerval {
292#define CLOCK_MONOTONIC_RAW 4 293#define CLOCK_MONOTONIC_RAW 4
293#define CLOCK_REALTIME_COARSE 5 294#define CLOCK_REALTIME_COARSE 5
294#define CLOCK_MONOTONIC_COARSE 6 295#define CLOCK_MONOTONIC_COARSE 6
296#define CLOCK_BOOTTIME 7
295 297
296/* 298/*
297 * The IDs of various hardware clocks: 299 * The IDs of various hardware clocks:
diff --git a/include/linux/timex.h b/include/linux/timex.h
index d23999f9499d..aa60fe7b6ed6 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -73,7 +73,7 @@ struct timex {
73 long tolerance; /* clock frequency tolerance (ppm) 73 long tolerance; /* clock frequency tolerance (ppm)
74 * (read only) 74 * (read only)
75 */ 75 */
76 struct timeval time; /* (read only) */ 76 struct timeval time; /* (read only, except for ADJ_SETOFFSET) */
77 long tick; /* (modified) usecs between clock ticks */ 77 long tick; /* (modified) usecs between clock ticks */
78 78
79 long ppsfreq; /* pps frequency (scaled ppm) (ro) */ 79 long ppsfreq; /* pps frequency (scaled ppm) (ro) */
@@ -102,6 +102,7 @@ struct timex {
102#define ADJ_STATUS 0x0010 /* clock status */ 102#define ADJ_STATUS 0x0010 /* clock status */
103#define ADJ_TIMECONST 0x0020 /* pll time constant */ 103#define ADJ_TIMECONST 0x0020 /* pll time constant */
104#define ADJ_TAI 0x0080 /* set TAI offset */ 104#define ADJ_TAI 0x0080 /* set TAI offset */
105#define ADJ_SETOFFSET 0x0100 /* add 'time' to current time */
105#define ADJ_MICRO 0x1000 /* select microsecond resolution */ 106#define ADJ_MICRO 0x1000 /* select microsecond resolution */
106#define ADJ_NANO 0x2000 /* select nanosecond resolution */ 107#define ADJ_NANO 0x2000 /* select nanosecond resolution */
107#define ADJ_TICK 0x4000 /* tick value */ 108#define ADJ_TICK 0x4000 /* tick value */
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h
index 7eee77895cb3..4cbbcef6baa8 100644
--- a/include/trace/events/mce.h
+++ b/include/trace/events/mce.h
@@ -17,36 +17,36 @@ TRACE_EVENT(mce_record,
17 TP_STRUCT__entry( 17 TP_STRUCT__entry(
18 __field( u64, mcgcap ) 18 __field( u64, mcgcap )
19 __field( u64, mcgstatus ) 19 __field( u64, mcgstatus )
20 __field( u8, bank )
21 __field( u64, status ) 20 __field( u64, status )
22 __field( u64, addr ) 21 __field( u64, addr )
23 __field( u64, misc ) 22 __field( u64, misc )
24 __field( u64, ip ) 23 __field( u64, ip )
25 __field( u8, cs )
26 __field( u64, tsc ) 24 __field( u64, tsc )
27 __field( u64, walltime ) 25 __field( u64, walltime )
28 __field( u32, cpu ) 26 __field( u32, cpu )
29 __field( u32, cpuid ) 27 __field( u32, cpuid )
30 __field( u32, apicid ) 28 __field( u32, apicid )
31 __field( u32, socketid ) 29 __field( u32, socketid )
30 __field( u8, cs )
31 __field( u8, bank )
32 __field( u8, cpuvendor ) 32 __field( u8, cpuvendor )
33 ), 33 ),
34 34
35 TP_fast_assign( 35 TP_fast_assign(
36 __entry->mcgcap = m->mcgcap; 36 __entry->mcgcap = m->mcgcap;
37 __entry->mcgstatus = m->mcgstatus; 37 __entry->mcgstatus = m->mcgstatus;
38 __entry->bank = m->bank;
39 __entry->status = m->status; 38 __entry->status = m->status;
40 __entry->addr = m->addr; 39 __entry->addr = m->addr;
41 __entry->misc = m->misc; 40 __entry->misc = m->misc;
42 __entry->ip = m->ip; 41 __entry->ip = m->ip;
43 __entry->cs = m->cs;
44 __entry->tsc = m->tsc; 42 __entry->tsc = m->tsc;
45 __entry->walltime = m->time; 43 __entry->walltime = m->time;
46 __entry->cpu = m->extcpu; 44 __entry->cpu = m->extcpu;
47 __entry->cpuid = m->cpuid; 45 __entry->cpuid = m->cpuid;
48 __entry->apicid = m->apicid; 46 __entry->apicid = m->apicid;
49 __entry->socketid = m->socketid; 47 __entry->socketid = m->socketid;
48 __entry->cs = m->cs;
49 __entry->bank = m->bank;
50 __entry->cpuvendor = m->cpuvendor; 50 __entry->cpuvendor = m->cpuvendor;
51 ), 51 ),
52 52
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index c6bae36547e5..21a546d27c0c 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -108,14 +108,14 @@ TRACE_EVENT(module_request,
108 TP_ARGS(name, wait, ip), 108 TP_ARGS(name, wait, ip),
109 109
110 TP_STRUCT__entry( 110 TP_STRUCT__entry(
111 __field( bool, wait )
112 __field( unsigned long, ip ) 111 __field( unsigned long, ip )
112 __field( bool, wait )
113 __string( name, name ) 113 __string( name, name )
114 ), 114 ),
115 115
116 TP_fast_assign( 116 TP_fast_assign(
117 __entry->wait = wait;
118 __entry->ip = ip; 117 __entry->ip = ip;
118 __entry->wait = wait;
119 __assign_str(name, name); 119 __assign_str(name, name);
120 ), 120 ),
121 121
@@ -129,4 +129,3 @@ TRACE_EVENT(module_request,
129 129
130/* This part must be outside protection */ 130/* This part must be outside protection */
131#include <trace/define_trace.h> 131#include <trace/define_trace.h>
132
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index f10293c41b1e..0c68ae22da22 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -19,14 +19,14 @@ TRACE_EVENT(kfree_skb,
19 19
20 TP_STRUCT__entry( 20 TP_STRUCT__entry(
21 __field( void *, skbaddr ) 21 __field( void *, skbaddr )
22 __field( unsigned short, protocol )
23 __field( void *, location ) 22 __field( void *, location )
23 __field( unsigned short, protocol )
24 ), 24 ),
25 25
26 TP_fast_assign( 26 TP_fast_assign(
27 __entry->skbaddr = skb; 27 __entry->skbaddr = skb;
28 __entry->protocol = ntohs(skb->protocol);
29 __entry->location = location; 28 __entry->location = location;
29 __entry->protocol = ntohs(skb->protocol);
30 ), 30 ),
31 31
32 TP_printk("skbaddr=%p protocol=%u location=%p", 32 TP_printk("skbaddr=%p protocol=%u location=%p",
diff --git a/include/xen/events.h b/include/xen/events.h
index 00f53ddcc062..962da2ced5b4 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -75,11 +75,9 @@ int xen_allocate_pirq(unsigned gsi, int shareable, char *name);
75int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name); 75int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name);
76 76
77#ifdef CONFIG_PCI_MSI 77#ifdef CONFIG_PCI_MSI
78/* Allocate an irq and a pirq to be used with MSIs. */ 78int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
79#define XEN_ALLOC_PIRQ (1 << 0) 79int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
80#define XEN_ALLOC_IRQ (1 << 1) 80 int pirq, int vector, const char *name);
81void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc_mask);
82int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type);
83#endif 81#endif
84 82
85/* De-allocates the above mentioned physical interrupt. */ 83/* De-allocates the above mentioned physical interrupt. */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index c2d1fa4dc1ee..61e523af3c46 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -51,11 +51,7 @@ typedef uint64_t blkif_sector_t;
51 */ 51 */
52#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 52#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
53 53
54struct blkif_request { 54struct blkif_request_rw {
55 uint8_t operation; /* BLKIF_OP_??? */
56 uint8_t nr_segments; /* number of segments */
57 blkif_vdev_t handle; /* only for read/write requests */
58 uint64_t id; /* private guest value, echoed in resp */
59 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 55 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
60 struct blkif_request_segment { 56 struct blkif_request_segment {
61 grant_ref_t gref; /* reference to I/O buffer frame */ 57 grant_ref_t gref; /* reference to I/O buffer frame */
@@ -65,6 +61,16 @@ struct blkif_request {
65 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 61 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
66}; 62};
67 63
64struct blkif_request {
65 uint8_t operation; /* BLKIF_OP_??? */
66 uint8_t nr_segments; /* number of segments */
67 blkif_vdev_t handle; /* only for read/write requests */
68 uint64_t id; /* private guest value, echoed in resp */
69 union {
70 struct blkif_request_rw rw;
71 } u;
72};
73
68struct blkif_response { 74struct blkif_response {
69 uint64_t id; /* copied from request */ 75 uint64_t id; /* copied from request */
70 uint8_t operation; /* copied from request */ 76 uint8_t operation; /* copied from request */
@@ -91,4 +97,25 @@ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
91#define VDISK_REMOVABLE 0x2 97#define VDISK_REMOVABLE 0x2
92#define VDISK_READONLY 0x4 98#define VDISK_READONLY 0x4
93 99
100/* Xen-defined major numbers for virtual disks, they look strangely
101 * familiar */
102#define XEN_IDE0_MAJOR 3
103#define XEN_IDE1_MAJOR 22
104#define XEN_SCSI_DISK0_MAJOR 8
105#define XEN_SCSI_DISK1_MAJOR 65
106#define XEN_SCSI_DISK2_MAJOR 66
107#define XEN_SCSI_DISK3_MAJOR 67
108#define XEN_SCSI_DISK4_MAJOR 68
109#define XEN_SCSI_DISK5_MAJOR 69
110#define XEN_SCSI_DISK6_MAJOR 70
111#define XEN_SCSI_DISK7_MAJOR 71
112#define XEN_SCSI_DISK8_MAJOR 128
113#define XEN_SCSI_DISK9_MAJOR 129
114#define XEN_SCSI_DISK10_MAJOR 130
115#define XEN_SCSI_DISK11_MAJOR 131
116#define XEN_SCSI_DISK12_MAJOR 132
117#define XEN_SCSI_DISK13_MAJOR 133
118#define XEN_SCSI_DISK14_MAJOR 134
119#define XEN_SCSI_DISK15_MAJOR 135
120
94#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ 121#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 2befa3e2f1bc..b33257bc7e83 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -30,7 +30,7 @@
30#define __HYPERVISOR_stack_switch 3 30#define __HYPERVISOR_stack_switch 3
31#define __HYPERVISOR_set_callbacks 4 31#define __HYPERVISOR_set_callbacks 4
32#define __HYPERVISOR_fpu_taskswitch 5 32#define __HYPERVISOR_fpu_taskswitch 5
33#define __HYPERVISOR_sched_op 6 33#define __HYPERVISOR_sched_op_compat 6
34#define __HYPERVISOR_dom0_op 7 34#define __HYPERVISOR_dom0_op 7
35#define __HYPERVISOR_set_debugreg 8 35#define __HYPERVISOR_set_debugreg 8
36#define __HYPERVISOR_get_debugreg 9 36#define __HYPERVISOR_get_debugreg 9
@@ -52,7 +52,7 @@
52#define __HYPERVISOR_mmuext_op 26 52#define __HYPERVISOR_mmuext_op 26
53#define __HYPERVISOR_acm_op 27 53#define __HYPERVISOR_acm_op 27
54#define __HYPERVISOR_nmi_op 28 54#define __HYPERVISOR_nmi_op 28
55#define __HYPERVISOR_sched_op_new 29 55#define __HYPERVISOR_sched_op 29
56#define __HYPERVISOR_callback_op 30 56#define __HYPERVISOR_callback_op 30
57#define __HYPERVISOR_xenoprof_op 31 57#define __HYPERVISOR_xenoprof_op 31
58#define __HYPERVISOR_event_channel_op 32 58#define __HYPERVISOR_event_channel_op 32
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 98b92154a264..03c85d7387fb 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -5,9 +5,9 @@
5 5
6DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 6DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
7 7
8void xen_pre_suspend(void); 8void xen_arch_pre_suspend(void);
9void xen_post_suspend(int suspend_cancelled); 9void xen_arch_post_suspend(int suspend_cancelled);
10void xen_hvm_post_suspend(int suspend_cancelled); 10void xen_arch_hvm_post_suspend(int suspend_cancelled);
11 11
12void xen_mm_pin_all(void); 12void xen_mm_pin_all(void);
13void xen_mm_unpin_all(void); 13void xen_mm_unpin_all(void);