aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/backing-dev.h55
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/bitmap.h18
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/cb710.h29
-rw-r--r--include/linux/cgroup.h28
-rw-r--r--include/linux/clockchips.h9
-rw-r--r--include/linux/clocksource.h14
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/cpu.h17
-rw-r--r--include/linux/cpumask.h20
-rw-r--r--include/linux/cred.h69
-rw-r--r--include/linux/crypto.h43
-rw-r--r--include/linux/decompress/generic.h32
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/dm-log-userspace.h13
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/fips.h10
-rw-r--r--include/linux/flex_array.h49
-rw-r--r--include/linux/fs.h35
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/gen_stats.h5
-rw-r--r--include/linux/hardirq.h4
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/input/matrix_keypad.h13
-rw-r--r--include/linux/interrupt.h32
-rw-r--r--include/linux/iocontext.h2
-rw-r--r--include/linux/irq.h18
-rw-r--r--include/linux/irqnr.h6
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/keyctl.h1
-rw-r--r--include/linux/kmemcheck.h7
-rw-r--r--include/linux/kmemleak.h18
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/lguest.h39
-rw-r--r--include/linux/lguest_launcher.h18
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/lmb.h2
-rw-r--r--include/linux/lockdep.h18
-rw-r--r--include/linux/lsm_audit.h12
-rw-r--r--include/linux/mm.h15
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/nfs_fs.h5
-rw-r--r--include/linux/nmi.h19
-rw-r--r--include/linux/nodemask.h28
-rw-r--r--include/linux/of_mdio.h3
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/perf_counter.h74
-rw-r--r--include/linux/pps.h2
-rw-r--r--include/linux/rcuclassic.h178
-rw-r--r--include/linux/rcupdate.h98
-rw-r--r--include/linux/rcupreempt.h127
-rw-r--r--include/linux/rcupreempt_trace.h97
-rw-r--r--include/linux/rcutree.h262
-rw-r--r--include/linux/rfkill.h2
-rw-r--r--include/linux/scatterlist.h2
-rw-r--r--include/linux/sched.h39
-rw-r--r--include/linux/security.h178
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/linux/spinlock.h64
-rw-r--r--include/linux/spinlock_api_smp.h394
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/tty.h5
-rw-r--r--include/linux/tty_ldisc.h2
-rw-r--r--include/linux/ucb1400.h4
-rw-r--r--include/linux/uio.h17
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/linux/virtio_blk.h6
-rw-r--r--include/linux/virtio_config.h3
-rw-r--r--include/linux/virtio_net.h20
-rw-r--r--include/linux/virtio_ring.h12
-rw-r--r--include/linux/wait.h9
-rw-r--r--include/linux/workqueue.h15
-rw-r--r--include/linux/writeback.h23
-rw-r--r--include/linux/xattr.h1
83 files changed, 1392 insertions, 1040 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 1d52425a6118..f169bcb90b58 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,8 @@
13#include <linux/proportions.h> 13#include <linux/proportions.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/sched.h>
17#include <linux/writeback.h>
16#include <asm/atomic.h> 18#include <asm/atomic.h>
17 19
18struct page; 20struct page;
@@ -23,9 +25,11 @@ struct dentry;
23 * Bits in backing_dev_info.state 25 * Bits in backing_dev_info.state
24 */ 26 */
25enum bdi_state { 27enum bdi_state {
26 BDI_pdflush, /* A pdflush thread is working this device */ 28 BDI_pending, /* On its way to being activated */
29 BDI_wb_alloc, /* Default embedded wb allocated */
27 BDI_async_congested, /* The async (write) queue is getting full */ 30 BDI_async_congested, /* The async (write) queue is getting full */
28 BDI_sync_congested, /* The sync queue is getting full */ 31 BDI_sync_congested, /* The sync queue is getting full */
32 BDI_registered, /* bdi_register() was done */
29 BDI_unused, /* Available bits start here */ 33 BDI_unused, /* Available bits start here */
30}; 34};
31 35
@@ -39,7 +43,22 @@ enum bdi_stat_item {
39 43
40#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 44#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
41 45
46struct bdi_writeback {
47 struct list_head list; /* hangs off the bdi */
48
49 struct backing_dev_info *bdi; /* our parent bdi */
50 unsigned int nr;
51
52 unsigned long last_old_flush; /* last old data flush */
53
54 struct task_struct *task; /* writeback task */
55 struct list_head b_dirty; /* dirty inodes */
56 struct list_head b_io; /* parked for writeback */
57 struct list_head b_more_io; /* parked for more writeback */
58};
59
42struct backing_dev_info { 60struct backing_dev_info {
61 struct list_head bdi_list;
43 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 62 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
44 unsigned long state; /* Always use atomic bitops on this */ 63 unsigned long state; /* Always use atomic bitops on this */
45 unsigned int capabilities; /* Device capabilities */ 64 unsigned int capabilities; /* Device capabilities */
@@ -48,6 +67,8 @@ struct backing_dev_info {
48 void (*unplug_io_fn)(struct backing_dev_info *, struct page *); 67 void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
49 void *unplug_io_data; 68 void *unplug_io_data;
50 69
70 char *name;
71
51 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; 72 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
52 73
53 struct prop_local_percpu completions; 74 struct prop_local_percpu completions;
@@ -56,6 +77,14 @@ struct backing_dev_info {
56 unsigned int min_ratio; 77 unsigned int min_ratio;
57 unsigned int max_ratio, max_prop_frac; 78 unsigned int max_ratio, max_prop_frac;
58 79
80 struct bdi_writeback wb; /* default writeback info for this bdi */
81 spinlock_t wb_lock; /* protects update side of wb_list */
82 struct list_head wb_list; /* the flusher threads hanging off this bdi */
83 unsigned long wb_mask; /* bitmask of registered tasks */
84 unsigned int wb_cnt; /* number of registered tasks */
85
86 struct list_head work_list;
87
59 struct device *dev; 88 struct device *dev;
60 89
61#ifdef CONFIG_DEBUG_FS 90#ifdef CONFIG_DEBUG_FS
@@ -71,6 +100,19 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
71 const char *fmt, ...); 100 const char *fmt, ...);
72int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 101int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
73void bdi_unregister(struct backing_dev_info *bdi); 102void bdi_unregister(struct backing_dev_info *bdi);
103void bdi_start_writeback(struct writeback_control *wbc);
104int bdi_writeback_task(struct bdi_writeback *wb);
105int bdi_has_dirty_io(struct backing_dev_info *bdi);
106
107extern spinlock_t bdi_lock;
108extern struct list_head bdi_list;
109
110static inline int wb_has_dirty_io(struct bdi_writeback *wb)
111{
112 return !list_empty(&wb->b_dirty) ||
113 !list_empty(&wb->b_io) ||
114 !list_empty(&wb->b_more_io);
115}
74 116
75static inline void __add_bdi_stat(struct backing_dev_info *bdi, 117static inline void __add_bdi_stat(struct backing_dev_info *bdi,
76 enum bdi_stat_item item, s64 amount) 118 enum bdi_stat_item item, s64 amount)
@@ -261,6 +303,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
261 return bdi->capabilities & BDI_CAP_SWAP_BACKED; 303 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
262} 304}
263 305
306static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
307{
308 return bdi == &default_backing_dev_info;
309}
310
264static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 311static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
265{ 312{
266 return bdi_cap_writeback_dirty(mapping->backing_dev_info); 313 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
@@ -276,4 +323,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping)
276 return bdi_cap_swap_backed(mapping->backing_dev_info); 323 return bdi_cap_swap_backed(mapping->backing_dev_info);
277} 324}
278 325
326static inline int bdi_sched_wait(void *word)
327{
328 schedule();
329 return 0;
330}
331
279#endif /* _LINUX_BACKING_DEV_H */ 332#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 61ee18c1bdb4..2046b5b8af48 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -117,6 +117,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
117 int executable_stack); 117 int executable_stack);
118extern int bprm_mm_init(struct linux_binprm *bprm); 118extern int bprm_mm_init(struct linux_binprm *bprm);
119extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); 119extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
120extern int prepare_bprm_creds(struct linux_binprm *bprm);
120extern void install_exec_creds(struct linux_binprm *bprm); 121extern void install_exec_creds(struct linux_binprm *bprm);
121extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); 122extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
122extern int set_binfmt(struct linux_binfmt *new); 123extern int set_binfmt(struct linux_binfmt *new);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 2878811c6134..756d78b8c1c5 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -94,13 +94,13 @@ extern void __bitmap_shift_right(unsigned long *dst,
94 const unsigned long *src, int shift, int bits); 94 const unsigned long *src, int shift, int bits);
95extern void __bitmap_shift_left(unsigned long *dst, 95extern void __bitmap_shift_left(unsigned long *dst,
96 const unsigned long *src, int shift, int bits); 96 const unsigned long *src, int shift, int bits);
97extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 97extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
98 const unsigned long *bitmap2, int bits); 98 const unsigned long *bitmap2, int bits);
99extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, 99extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
100 const unsigned long *bitmap2, int bits); 100 const unsigned long *bitmap2, int bits);
101extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, 101extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
102 const unsigned long *bitmap2, int bits); 102 const unsigned long *bitmap2, int bits);
103extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 103extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
104 const unsigned long *bitmap2, int bits); 104 const unsigned long *bitmap2, int bits);
105extern int __bitmap_intersects(const unsigned long *bitmap1, 105extern int __bitmap_intersects(const unsigned long *bitmap1,
106 const unsigned long *bitmap2, int bits); 106 const unsigned long *bitmap2, int bits);
@@ -171,13 +171,12 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
171 } 171 }
172} 172}
173 173
174static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, 174static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
175 const unsigned long *src2, int nbits) 175 const unsigned long *src2, int nbits)
176{ 176{
177 if (small_const_nbits(nbits)) 177 if (small_const_nbits(nbits))
178 *dst = *src1 & *src2; 178 return (*dst = *src1 & *src2) != 0;
179 else 179 return __bitmap_and(dst, src1, src2, nbits);
180 __bitmap_and(dst, src1, src2, nbits);
181} 180}
182 181
183static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, 182static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
@@ -198,13 +197,12 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
198 __bitmap_xor(dst, src1, src2, nbits); 197 __bitmap_xor(dst, src1, src2, nbits);
199} 198}
200 199
201static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, 200static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
202 const unsigned long *src2, int nbits) 201 const unsigned long *src2, int nbits)
203{ 202{
204 if (small_const_nbits(nbits)) 203 if (small_const_nbits(nbits))
205 *dst = *src1 & ~(*src2); 204 return (*dst = *src1 & ~(*src2)) != 0;
206 else 205 return __bitmap_andnot(dst, src1, src2, nbits);
207 __bitmap_andnot(dst, src1, src2, nbits);
208} 206}
209 207
210static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, 208static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e7cb5dbf6c26..69103e053c92 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -913,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short)
913extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 913extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
914extern void blk_queue_alignment_offset(struct request_queue *q, 914extern void blk_queue_alignment_offset(struct request_queue *q,
915 unsigned int alignment); 915 unsigned int alignment);
916extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
916extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 917extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
917extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 918extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
918extern void blk_set_default_limits(struct queue_limits *lim); 919extern void blk_set_default_limits(struct queue_limits *lim);
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
index 63bc9a4d2926..8cc10411bab2 100644
--- a/include/linux/cb710.h
+++ b/include/linux/cb710.h
@@ -140,29 +140,6 @@ void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
140#include <linux/highmem.h> 140#include <linux/highmem.h>
141#include <linux/scatterlist.h> 141#include <linux/scatterlist.h>
142 142
143/**
144 * cb710_sg_miter_stop_writing - stop mapping iteration after writing
145 * @miter: sg mapping iter to be stopped
146 *
147 * Description:
148 * Stops mapping iterator @miter. @miter should have been started
149 * started using sg_miter_start(). A stopped iteration can be
150 * resumed by calling sg_miter_next() on it. This is useful when
151 * resources (kmap) need to be released during iteration.
152 *
153 * This is a convenience wrapper that will be optimized out for arches
154 * that don't need flush_kernel_dcache_page().
155 *
156 * Context:
157 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
158 */
159static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
160{
161 if (miter->page)
162 flush_kernel_dcache_page(miter->page);
163 sg_miter_stop(miter);
164}
165
166/* 143/*
167 * 32-bit PIO mapping sg iterator 144 * 32-bit PIO mapping sg iterator
168 * 145 *
@@ -171,12 +148,12 @@ static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
171 * without DMA support). 148 * without DMA support).
172 * 149 *
173 * Best-case reading (transfer from device): 150 * Best-case reading (transfer from device):
174 * sg_miter_start(); 151 * sg_miter_start(, SG_MITER_TO_SG);
175 * cb710_sg_dwiter_write_from_io(); 152 * cb710_sg_dwiter_write_from_io();
176 * cb710_sg_miter_stop_writing(); 153 * sg_miter_stop();
177 * 154 *
178 * Best-case writing (transfer to device): 155 * Best-case writing (transfer to device):
179 * sg_miter_start(); 156 * sg_miter_start(, SG_MITER_FROM_SG);
180 * cb710_sg_dwiter_read_to_io(); 157 * cb710_sg_dwiter_read_to_io();
181 * sg_miter_stop(); 158 * sg_miter_stop();
182 */ 159 */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 665fa70e4094..90bba9e62286 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -179,14 +179,11 @@ struct cgroup {
179 */ 179 */
180 struct list_head release_list; 180 struct list_head release_list;
181 181
182 /* pids_mutex protects the fields below */ 182 /* pids_mutex protects pids_list and cached pid arrays. */
183 struct rw_semaphore pids_mutex; 183 struct rw_semaphore pids_mutex;
184 /* Array of process ids in the cgroup */ 184
185 pid_t *tasks_pids; 185 /* Linked list of struct cgroup_pids */
186 /* How many files are using the current tasks_pids array */ 186 struct list_head pids_list;
187 int pids_use_count;
188 /* Length of the current tasks_pids array */
189 int pids_length;
190 187
191 /* For RCU-protected deletion */ 188 /* For RCU-protected deletion */
192 struct rcu_head rcu_head; 189 struct rcu_head rcu_head;
@@ -366,6 +363,23 @@ int cgroup_task_count(const struct cgroup *cgrp);
366int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); 363int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
367 364
368/* 365/*
366 * When the subsys has to access css and may add permanent refcnt to css,
367 * it should take care of racy conditions with rmdir(). Following set of
368 * functions, is for stop/restart rmdir if necessary.
369 * Because these will call css_get/put, "css" should be alive css.
370 *
371 * cgroup_exclude_rmdir();
372 * ...do some jobs which may access arbitrary empty cgroup
373 * cgroup_release_and_wakeup_rmdir();
374 *
375 * When someone removes a cgroup while cgroup_exclude_rmdir() holds it,
376 * it sleeps and cgroup_release_and_wakeup_rmdir() will wake him up.
377 */
378
379void cgroup_exclude_rmdir(struct cgroup_subsys_state *css);
380void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css);
381
382/*
369 * Control Group subsystem type. 383 * Control Group subsystem type.
370 * See Documentation/cgroups/cgroups.txt for details 384 * See Documentation/cgroups/cgroups.txt for details
371 */ 385 */
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 20a100fe2b4f..3a1dbba4d3ae 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -143,12 +143,3 @@ extern void clockevents_notify(unsigned long reason, void *arg);
143#endif 143#endif
144 144
145#endif 145#endif
146
147#ifdef CONFIG_GENERIC_CLOCKEVENTS
148extern ktime_t clockevents_get_next_event(int cpu);
149#else
150static inline ktime_t clockevents_get_next_event(int cpu)
151{
152 return (ktime_t) { .tv64 = KTIME_MAX };
153}
154#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index c56457c8334e..1219be4fb42e 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -293,7 +293,12 @@ static inline int clocksource_enable(struct clocksource *cs)
293 if (cs->enable) 293 if (cs->enable)
294 ret = cs->enable(cs); 294 ret = cs->enable(cs);
295 295
296 /* save mult_orig on enable */ 296 /*
297 * The frequency may have changed while the clocksource
298 * was disabled. If so the code in ->enable() must update
299 * the mult value to reflect the new frequency. Make sure
300 * mult_orig follows this change.
301 */
297 cs->mult_orig = cs->mult; 302 cs->mult_orig = cs->mult;
298 303
299 return ret; 304 return ret;
@@ -309,6 +314,13 @@ static inline int clocksource_enable(struct clocksource *cs)
309 */ 314 */
310static inline void clocksource_disable(struct clocksource *cs) 315static inline void clocksource_disable(struct clocksource *cs)
311{ 316{
317 /*
318 * Save mult_orig in mult so clocksource_enable() can
319 * restore the value regardless if ->enable() updates
320 * the value of mult or not.
321 */
322 cs->mult = cs->mult_orig;
323
312 if (cs->disable) 324 if (cs->disable)
313 cs->disable(cs); 325 cs->disable(cs);
314} 326}
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index d71f7c0f931b..38fe59dc89ae 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -89,7 +89,6 @@ struct vc_data {
89 unsigned int vc_need_wrap : 1; 89 unsigned int vc_need_wrap : 1;
90 unsigned int vc_can_do_color : 1; 90 unsigned int vc_can_do_color : 1;
91 unsigned int vc_report_mouse : 2; 91 unsigned int vc_report_mouse : 2;
92 unsigned int vc_kmalloced : 1;
93 unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ 92 unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
94 unsigned char vc_utf_count; 93 unsigned char vc_utf_count;
95 int vc_utf_char; 94 int vc_utf_char;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4d668e05d458..47536197ffdd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,6 +48,15 @@ struct notifier_block;
48 48
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50/* Need to know about CPUs going up/down? */ 50/* Need to know about CPUs going up/down? */
51#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
52#define cpu_notifier(fn, pri) { \
53 static struct notifier_block fn##_nb __cpuinitdata = \
54 { .notifier_call = fn, .priority = pri }; \
55 register_cpu_notifier(&fn##_nb); \
56}
57#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
58#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
59#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
51#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
52extern int register_cpu_notifier(struct notifier_block *nb); 61extern int register_cpu_notifier(struct notifier_block *nb);
53extern void unregister_cpu_notifier(struct notifier_block *nb); 62extern void unregister_cpu_notifier(struct notifier_block *nb);
@@ -74,6 +83,8 @@ extern void cpu_maps_update_done(void);
74 83
75#else /* CONFIG_SMP */ 84#else /* CONFIG_SMP */
76 85
86#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
87
77static inline int register_cpu_notifier(struct notifier_block *nb) 88static inline int register_cpu_notifier(struct notifier_block *nb)
78{ 89{
79 return 0; 90 return 0;
@@ -99,11 +110,7 @@ extern struct sysdev_class cpu_sysdev_class;
99 110
100extern void get_online_cpus(void); 111extern void get_online_cpus(void);
101extern void put_online_cpus(void); 112extern void put_online_cpus(void);
102#define hotcpu_notifier(fn, pri) { \ 113#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
103 static struct notifier_block fn##_nb __cpuinitdata = \
104 { .notifier_call = fn, .priority = pri }; \
105 register_cpu_notifier(&fn##_nb); \
106}
107#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) 114#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
108#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 115#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
109int cpu_down(unsigned int cpu); 116int cpu_down(unsigned int cpu);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c5ac87ca7bc6..796df12091b7 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -43,10 +43,10 @@
43 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask 43 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
44 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask 44 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
45 * 45 *
46 * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection] 46 * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
47 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union] 47 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
48 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2 48 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
49 * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2 49 * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2
50 * void cpus_complement(dst, src) dst = ~src 50 * void cpus_complement(dst, src) dst = ~src
51 * 51 *
52 * int cpus_equal(mask1, mask2) Does mask1 == mask2? 52 * int cpus_equal(mask1, mask2) Does mask1 == mask2?
@@ -179,10 +179,10 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
179} 179}
180 180
181#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) 181#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
182static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, 182static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
183 const cpumask_t *src2p, int nbits) 183 const cpumask_t *src2p, int nbits)
184{ 184{
185 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); 185 return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
186} 186}
187 187
188#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) 188#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
@@ -201,10 +201,10 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
201 201
202#define cpus_andnot(dst, src1, src2) \ 202#define cpus_andnot(dst, src1, src2) \
203 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) 203 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
204static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, 204static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
205 const cpumask_t *src2p, int nbits) 205 const cpumask_t *src2p, int nbits)
206{ 206{
207 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); 207 return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
208} 208}
209 209
210#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS) 210#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
@@ -738,11 +738,11 @@ static inline void cpumask_clear(struct cpumask *dstp)
738 * @src1p: the first input 738 * @src1p: the first input
739 * @src2p: the second input 739 * @src2p: the second input
740 */ 740 */
741static inline void cpumask_and(struct cpumask *dstp, 741static inline int cpumask_and(struct cpumask *dstp,
742 const struct cpumask *src1p, 742 const struct cpumask *src1p,
743 const struct cpumask *src2p) 743 const struct cpumask *src2p)
744{ 744{
745 bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), 745 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
746 cpumask_bits(src2p), nr_cpumask_bits); 746 cpumask_bits(src2p), nr_cpumask_bits);
747} 747}
748 748
@@ -779,11 +779,11 @@ static inline void cpumask_xor(struct cpumask *dstp,
779 * @src1p: the first input 779 * @src1p: the first input
780 * @src2p: the second input 780 * @src2p: the second input
781 */ 781 */
782static inline void cpumask_andnot(struct cpumask *dstp, 782static inline int cpumask_andnot(struct cpumask *dstp,
783 const struct cpumask *src1p, 783 const struct cpumask *src1p,
784 const struct cpumask *src2p) 784 const struct cpumask *src2p)
785{ 785{
786 bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), 786 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
787 cpumask_bits(src2p), nr_cpumask_bits); 787 cpumask_bits(src2p), nr_cpumask_bits);
788} 788}
789 789
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4fa999696310..24520a539c6f 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -114,6 +114,13 @@ struct thread_group_cred {
114 */ 114 */
115struct cred { 115struct cred {
116 atomic_t usage; 116 atomic_t usage;
117#ifdef CONFIG_DEBUG_CREDENTIALS
118 atomic_t subscribers; /* number of processes subscribed */
119 void *put_addr;
120 unsigned magic;
121#define CRED_MAGIC 0x43736564
122#define CRED_MAGIC_DEAD 0x44656144
123#endif
117 uid_t uid; /* real UID of the task */ 124 uid_t uid; /* real UID of the task */
118 gid_t gid; /* real GID of the task */ 125 gid_t gid; /* real GID of the task */
119 uid_t suid; /* saved UID of the task */ 126 uid_t suid; /* saved UID of the task */
@@ -143,7 +150,9 @@ struct cred {
143}; 150};
144 151
145extern void __put_cred(struct cred *); 152extern void __put_cred(struct cred *);
153extern void exit_creds(struct task_struct *);
146extern int copy_creds(struct task_struct *, unsigned long); 154extern int copy_creds(struct task_struct *, unsigned long);
155extern struct cred *cred_alloc_blank(void);
147extern struct cred *prepare_creds(void); 156extern struct cred *prepare_creds(void);
148extern struct cred *prepare_exec_creds(void); 157extern struct cred *prepare_exec_creds(void);
149extern struct cred *prepare_usermodehelper_creds(void); 158extern struct cred *prepare_usermodehelper_creds(void);
@@ -158,6 +167,60 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
158extern int set_create_files_as(struct cred *, struct inode *); 167extern int set_create_files_as(struct cred *, struct inode *);
159extern void __init cred_init(void); 168extern void __init cred_init(void);
160 169
170/*
171 * check for validity of credentials
172 */
173#ifdef CONFIG_DEBUG_CREDENTIALS
174extern void __invalid_creds(const struct cred *, const char *, unsigned);
175extern void __validate_process_creds(struct task_struct *,
176 const char *, unsigned);
177
178static inline bool creds_are_invalid(const struct cred *cred)
179{
180 if (cred->magic != CRED_MAGIC)
181 return true;
182 if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
183 return true;
184#ifdef CONFIG_SECURITY_SELINUX
185 if ((unsigned long) cred->security < PAGE_SIZE)
186 return true;
187 if ((*(u32*)cred->security & 0xffffff00) ==
188 (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))
189 return true;
190#endif
191 return false;
192}
193
194static inline void __validate_creds(const struct cred *cred,
195 const char *file, unsigned line)
196{
197 if (unlikely(creds_are_invalid(cred)))
198 __invalid_creds(cred, file, line);
199}
200
201#define validate_creds(cred) \
202do { \
203 __validate_creds((cred), __FILE__, __LINE__); \
204} while(0)
205
206#define validate_process_creds() \
207do { \
208 __validate_process_creds(current, __FILE__, __LINE__); \
209} while(0)
210
211extern void validate_creds_for_do_exit(struct task_struct *);
212#else
213static inline void validate_creds(const struct cred *cred)
214{
215}
216static inline void validate_creds_for_do_exit(struct task_struct *tsk)
217{
218}
219static inline void validate_process_creds(void)
220{
221}
222#endif
223
161/** 224/**
162 * get_new_cred - Get a reference on a new set of credentials 225 * get_new_cred - Get a reference on a new set of credentials
163 * @cred: The new credentials to reference 226 * @cred: The new credentials to reference
@@ -186,7 +249,9 @@ static inline struct cred *get_new_cred(struct cred *cred)
186 */ 249 */
187static inline const struct cred *get_cred(const struct cred *cred) 250static inline const struct cred *get_cred(const struct cred *cred)
188{ 251{
189 return get_new_cred((struct cred *) cred); 252 struct cred *nonconst_cred = (struct cred *) cred;
253 validate_creds(cred);
254 return get_new_cred(nonconst_cred);
190} 255}
191 256
192/** 257/**
@@ -204,7 +269,7 @@ static inline void put_cred(const struct cred *_cred)
204{ 269{
205 struct cred *cred = (struct cred *) _cred; 270 struct cred *cred = (struct cred *) _cred;
206 271
207 BUG_ON(atomic_read(&(cred)->usage) <= 0); 272 validate_creds(cred);
208 if (atomic_dec_and_test(&(cred)->usage)) 273 if (atomic_dec_and_test(&(cred)->usage))
209 __put_cred(cred); 274 __put_cred(cred);
210} 275}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index ec29fa268b94..fd929889e8dc 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -115,7 +115,6 @@ struct crypto_async_request;
115struct crypto_aead; 115struct crypto_aead;
116struct crypto_blkcipher; 116struct crypto_blkcipher;
117struct crypto_hash; 117struct crypto_hash;
118struct crypto_ahash;
119struct crypto_rng; 118struct crypto_rng;
120struct crypto_tfm; 119struct crypto_tfm;
121struct crypto_type; 120struct crypto_type;
@@ -146,16 +145,6 @@ struct ablkcipher_request {
146 void *__ctx[] CRYPTO_MINALIGN_ATTR; 145 void *__ctx[] CRYPTO_MINALIGN_ATTR;
147}; 146};
148 147
149struct ahash_request {
150 struct crypto_async_request base;
151
152 unsigned int nbytes;
153 struct scatterlist *src;
154 u8 *result;
155
156 void *__ctx[] CRYPTO_MINALIGN_ATTR;
157};
158
159/** 148/**
160 * struct aead_request - AEAD request 149 * struct aead_request - AEAD request
161 * @base: Common attributes for async crypto requests 150 * @base: Common attributes for async crypto requests
@@ -220,18 +209,6 @@ struct ablkcipher_alg {
220 unsigned int ivsize; 209 unsigned int ivsize;
221}; 210};
222 211
223struct ahash_alg {
224 int (*init)(struct ahash_request *req);
225 int (*reinit)(struct ahash_request *req);
226 int (*update)(struct ahash_request *req);
227 int (*final)(struct ahash_request *req);
228 int (*digest)(struct ahash_request *req);
229 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
230 unsigned int keylen);
231
232 unsigned int digestsize;
233};
234
235struct aead_alg { 212struct aead_alg {
236 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 213 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
237 unsigned int keylen); 214 unsigned int keylen);
@@ -318,7 +295,6 @@ struct rng_alg {
318#define cra_cipher cra_u.cipher 295#define cra_cipher cra_u.cipher
319#define cra_digest cra_u.digest 296#define cra_digest cra_u.digest
320#define cra_hash cra_u.hash 297#define cra_hash cra_u.hash
321#define cra_ahash cra_u.ahash
322#define cra_compress cra_u.compress 298#define cra_compress cra_u.compress
323#define cra_rng cra_u.rng 299#define cra_rng cra_u.rng
324 300
@@ -346,7 +322,6 @@ struct crypto_alg {
346 struct cipher_alg cipher; 322 struct cipher_alg cipher;
347 struct digest_alg digest; 323 struct digest_alg digest;
348 struct hash_alg hash; 324 struct hash_alg hash;
349 struct ahash_alg ahash;
350 struct compress_alg compress; 325 struct compress_alg compress;
351 struct rng_alg rng; 326 struct rng_alg rng;
352 } cra_u; 327 } cra_u;
@@ -433,18 +408,6 @@ struct hash_tfm {
433 unsigned int digestsize; 408 unsigned int digestsize;
434}; 409};
435 410
436struct ahash_tfm {
437 int (*init)(struct ahash_request *req);
438 int (*update)(struct ahash_request *req);
439 int (*final)(struct ahash_request *req);
440 int (*digest)(struct ahash_request *req);
441 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
442 unsigned int keylen);
443
444 unsigned int digestsize;
445 unsigned int reqsize;
446};
447
448struct compress_tfm { 411struct compress_tfm {
449 int (*cot_compress)(struct crypto_tfm *tfm, 412 int (*cot_compress)(struct crypto_tfm *tfm,
450 const u8 *src, unsigned int slen, 413 const u8 *src, unsigned int slen,
@@ -465,7 +428,6 @@ struct rng_tfm {
465#define crt_blkcipher crt_u.blkcipher 428#define crt_blkcipher crt_u.blkcipher
466#define crt_cipher crt_u.cipher 429#define crt_cipher crt_u.cipher
467#define crt_hash crt_u.hash 430#define crt_hash crt_u.hash
468#define crt_ahash crt_u.ahash
469#define crt_compress crt_u.compress 431#define crt_compress crt_u.compress
470#define crt_rng crt_u.rng 432#define crt_rng crt_u.rng
471 433
@@ -479,7 +441,6 @@ struct crypto_tfm {
479 struct blkcipher_tfm blkcipher; 441 struct blkcipher_tfm blkcipher;
480 struct cipher_tfm cipher; 442 struct cipher_tfm cipher;
481 struct hash_tfm hash; 443 struct hash_tfm hash;
482 struct ahash_tfm ahash;
483 struct compress_tfm compress; 444 struct compress_tfm compress;
484 struct rng_tfm rng; 445 struct rng_tfm rng;
485 } crt_u; 446 } crt_u;
@@ -770,7 +731,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
770 731
771static inline void ablkcipher_request_free(struct ablkcipher_request *req) 732static inline void ablkcipher_request_free(struct ablkcipher_request *req)
772{ 733{
773 kfree(req); 734 kzfree(req);
774} 735}
775 736
776static inline void ablkcipher_request_set_callback( 737static inline void ablkcipher_request_set_callback(
@@ -901,7 +862,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
901 862
902static inline void aead_request_free(struct aead_request *req) 863static inline void aead_request_free(struct aead_request *req)
903{ 864{
904 kfree(req); 865 kzfree(req);
905} 866}
906 867
907static inline void aead_request_set_callback(struct aead_request *req, 868static inline void aead_request_set_callback(struct aead_request *req,
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
index 6dfb856327bb..0c7111a55a1a 100644
--- a/include/linux/decompress/generic.h
+++ b/include/linux/decompress/generic.h
@@ -1,31 +1,37 @@
1#ifndef DECOMPRESS_GENERIC_H 1#ifndef DECOMPRESS_GENERIC_H
2#define DECOMPRESS_GENERIC_H 2#define DECOMPRESS_GENERIC_H
3 3
4/* Minimal chunksize to be read.
5 *Bzip2 prefers at least 4096
6 *Lzma prefers 0x10000 */
7#define COMPR_IOBUF_SIZE 4096
8
9typedef int (*decompress_fn) (unsigned char *inbuf, int len, 4typedef int (*decompress_fn) (unsigned char *inbuf, int len,
10 int(*fill)(void*, unsigned int), 5 int(*fill)(void*, unsigned int),
11 int(*writebb)(void*, unsigned int), 6 int(*flush)(void*, unsigned int),
12 unsigned char *output, 7 unsigned char *outbuf,
13 int *posp, 8 int *posp,
14 void(*error)(char *x)); 9 void(*error)(char *x));
15 10
16/* inbuf - input buffer 11/* inbuf - input buffer
17 *len - len of pre-read data in inbuf 12 *len - len of pre-read data in inbuf
18 *fill - function to fill inbuf if empty 13 *fill - function to fill inbuf when empty
19 *writebb - function to write out outbug 14 *flush - function to write out outbuf
15 *outbuf - output buffer
20 *posp - if non-null, input position (number of bytes read) will be 16 *posp - if non-null, input position (number of bytes read) will be
21 * returned here 17 * returned here
22 * 18 *
23 *If len != 0, the inbuf is initialized (with as much data), and fill 19 *If len != 0, inbuf should contain all the necessary input data, and fill
24 *should not be called 20 *should be NULL
25 *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE 21 *If len = 0, inbuf can be NULL, in which case the decompressor will allocate
26 *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE 22 *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes.
23 *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE
24 *bytes should be read per call. Replace XXX with the appropriate decompressor
25 *name, i.e. LZMA_IOBUF_SIZE.
26 *
27 *If flush = NULL, outbuf must be large enough to buffer all the expected
28 *output. If flush != NULL, the output buffer will be allocated by the
29 *decompressor (outbuf = NULL), and the flush function will be called to
30 *flush the output buffer at the appropriate time (decompressor and stream
31 *dependent).
27 */ 32 */
28 33
34
29/* Utility routine to detect the decompression method */ 35/* Utility routine to detect the decompression method */
30decompress_fn decompress_method(const unsigned char *inbuf, int len, 36decompress_fn decompress_method(const unsigned char *inbuf, int len,
31 const char **name); 37 const char **name);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 0d6310657f32..df7607e6dce8 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -84,13 +84,16 @@ typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
84 84
85typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 85typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
86 struct dm_dev *dev, 86 struct dm_dev *dev,
87 sector_t physical_start, 87 sector_t start, sector_t len,
88 void *data); 88 void *data);
89 89
90typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 90typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
91 iterate_devices_callout_fn fn, 91 iterate_devices_callout_fn fn,
92 void *data); 92 void *data);
93 93
94typedef void (*dm_io_hints_fn) (struct dm_target *ti,
95 struct queue_limits *limits);
96
94/* 97/*
95 * Returns: 98 * Returns:
96 * 0: The target can handle the next I/O immediately. 99 * 0: The target can handle the next I/O immediately.
@@ -104,7 +107,7 @@ void dm_error(const char *message);
104 * Combine device limits. 107 * Combine device limits.
105 */ 108 */
106int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 109int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
107 sector_t start, void *data); 110 sector_t start, sector_t len, void *data);
108 111
109struct dm_dev { 112struct dm_dev {
110 struct block_device *bdev; 113 struct block_device *bdev;
@@ -151,6 +154,7 @@ struct target_type {
151 dm_merge_fn merge; 154 dm_merge_fn merge;
152 dm_busy_fn busy; 155 dm_busy_fn busy;
153 dm_iterate_devices_fn iterate_devices; 156 dm_iterate_devices_fn iterate_devices;
157 dm_io_hints_fn io_hints;
154 158
155 /* For internal device-mapper use. */ 159 /* For internal device-mapper use. */
156 struct list_head list; 160 struct list_head list;
diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h
index 642e3017b51f..8a1f972c0fe9 100644
--- a/include/linux/dm-log-userspace.h
+++ b/include/linux/dm-log-userspace.h
@@ -371,7 +371,18 @@
371 (DM_ULOG_REQUEST_MASK & (request_type)) 371 (DM_ULOG_REQUEST_MASK & (request_type))
372 372
373struct dm_ulog_request { 373struct dm_ulog_request {
374 char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */ 374 /*
375 * The local unique identifier (luid) and the universally unique
376 * identifier (uuid) are used to tie a request to a specific
377 * mirror log. A single machine log could probably make due with
378 * just the 'luid', but a cluster-aware log must use the 'uuid' and
379 * the 'luid'. The uuid is what is required for node to node
380 * communication concerning a particular log, but the 'luid' helps
381 * differentiate between logs that are being swapped and have the
382 * same 'uuid'. (Think "live" and "inactive" device-mapper tables.)
383 */
384 uint64_t luid;
385 char uuid[DM_UUID_LEN];
375 char padding[7]; /* Padding because DM_UUID_LEN = 129 */ 386 char padding[7]; /* Padding because DM_UUID_LEN = 129 */
376 387
377 int32_t error; /* Used to report back processing errors */ 388 int32_t error; /* Used to report back processing errors */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 07dfd460d286..c0f6c3cd788c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -98,11 +98,6 @@ static inline int is_device_dma_capable(struct device *dev)
98 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; 98 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
99} 99}
100 100
101static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
102{
103 return addr + size <= mask;
104}
105
106#ifdef CONFIG_HAS_DMA 101#ifdef CONFIG_HAS_DMA
107#include <asm/dma-mapping.h> 102#include <asm/dma-mapping.h>
108#else 103#else
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 634a5e5aba3e..7499b3667798 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -874,7 +874,7 @@ struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
874struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); 874struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
875int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, 875int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
876 sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, 876 sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
877 int create, int extend_disksize); 877 int create);
878 878
879extern struct inode *ext3_iget(struct super_block *, unsigned long); 879extern struct inode *ext3_iget(struct super_block *, unsigned long);
880extern int ext3_write_inode (struct inode *, int); 880extern int ext3_write_inode (struct inode *, int);
diff --git a/include/linux/fips.h b/include/linux/fips.h
new file mode 100644
index 000000000000..f8fb07b0b6b8
--- /dev/null
+++ b/include/linux/fips.h
@@ -0,0 +1,10 @@
1#ifndef _FIPS_H
2#define _FIPS_H
3
4#ifdef CONFIG_CRYPTO_FIPS
5extern int fips_enabled;
6#else
7#define fips_enabled 0
8#endif
9
10#endif
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
new file mode 100644
index 000000000000..45ff18491514
--- /dev/null
+++ b/include/linux/flex_array.h
@@ -0,0 +1,49 @@
1#ifndef _FLEX_ARRAY_H
2#define _FLEX_ARRAY_H
3
4#include <linux/types.h>
5#include <asm/page.h>
6
7#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
8#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
9
10struct flex_array_part;
11
12/*
13 * This is meant to replace cases where an array-like
14 * structure has gotten too big to fit into kmalloc()
15 * and the developer is getting tempted to use
16 * vmalloc().
17 */
18
19struct flex_array {
20 union {
21 struct {
22 int element_size;
23 int total_nr_elements;
24 struct flex_array_part *parts[];
25 };
26 /*
27 * This little trick makes sure that
28 * sizeof(flex_array) == PAGE_SIZE
29 */
30 char padding[FLEX_ARRAY_BASE_SIZE];
31 };
32};
33
34#define FLEX_ARRAY_INIT(size, total) { { {\
35 .element_size = (size), \
36 .total_nr_elements = (total), \
37} } }
38
39struct flex_array *flex_array_alloc(int element_size, unsigned int total,
40 gfp_t flags);
41int flex_array_prealloc(struct flex_array *fa, unsigned int start,
42 unsigned int end, gfp_t flags);
43void flex_array_free(struct flex_array *fa);
44void flex_array_free_parts(struct flex_array *fa);
45int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
46 gfp_t flags);
47void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
48
49#endif /* _FLEX_ARRAY_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0872372184fe..a79f48373e7e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -715,7 +715,7 @@ struct posix_acl;
715 715
716struct inode { 716struct inode {
717 struct hlist_node i_hash; 717 struct hlist_node i_hash;
718 struct list_head i_list; 718 struct list_head i_list; /* backing dev IO list */
719 struct list_head i_sb_list; 719 struct list_head i_sb_list;
720 struct list_head i_dentry; 720 struct list_head i_dentry;
721 unsigned long i_ino; 721 unsigned long i_ino;
@@ -1336,9 +1336,6 @@ struct super_block {
1336 struct xattr_handler **s_xattr; 1336 struct xattr_handler **s_xattr;
1337 1337
1338 struct list_head s_inodes; /* all inodes */ 1338 struct list_head s_inodes; /* all inodes */
1339 struct list_head s_dirty; /* dirty inodes */
1340 struct list_head s_io; /* parked for writeback */
1341 struct list_head s_more_io; /* parked for more writeback */
1342 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ 1339 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
1343 struct list_head s_files; 1340 struct list_head s_files;
1344 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ 1341 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
@@ -1528,6 +1525,7 @@ struct inode_operations {
1528 void (*put_link) (struct dentry *, struct nameidata *, void *); 1525 void (*put_link) (struct dentry *, struct nameidata *, void *);
1529 void (*truncate) (struct inode *); 1526 void (*truncate) (struct inode *);
1530 int (*permission) (struct inode *, int); 1527 int (*permission) (struct inode *, int);
1528 int (*check_acl)(struct inode *, int);
1531 int (*setattr) (struct dentry *, struct iattr *); 1529 int (*setattr) (struct dentry *, struct iattr *);
1532 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); 1530 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
1533 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 1531 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -1788,6 +1786,7 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
1788 struct vfsmount *mnt); 1786 struct vfsmount *mnt);
1789extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); 1787extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
1790int __put_super_and_need_restart(struct super_block *sb); 1788int __put_super_and_need_restart(struct super_block *sb);
1789void put_super(struct super_block *sb);
1791 1790
1792/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ 1791/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
1793#define fops_get(fops) \ 1792#define fops_get(fops) \
@@ -1946,6 +1945,7 @@ extern void putname(const char *name);
1946extern int register_blkdev(unsigned int, const char *); 1945extern int register_blkdev(unsigned int, const char *);
1947extern void unregister_blkdev(unsigned int, const char *); 1946extern void unregister_blkdev(unsigned int, const char *);
1948extern struct block_device *bdget(dev_t); 1947extern struct block_device *bdget(dev_t);
1948extern struct block_device *bdgrab(struct block_device *bdev);
1949extern void bd_set_size(struct block_device *, loff_t size); 1949extern void bd_set_size(struct block_device *, loff_t size);
1950extern void bd_forget(struct inode *inode); 1950extern void bd_forget(struct inode *inode);
1951extern void bdput(struct block_device *); 1951extern void bdput(struct block_device *);
@@ -1997,12 +1997,25 @@ extern void bd_release_from_disk(struct block_device *, struct gendisk *);
1997#define CHRDEV_MAJOR_HASH_SIZE 255 1997#define CHRDEV_MAJOR_HASH_SIZE 255
1998extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); 1998extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
1999extern int register_chrdev_region(dev_t, unsigned, const char *); 1999extern int register_chrdev_region(dev_t, unsigned, const char *);
2000extern int register_chrdev(unsigned int, const char *, 2000extern int __register_chrdev(unsigned int major, unsigned int baseminor,
2001 const struct file_operations *); 2001 unsigned int count, const char *name,
2002extern void unregister_chrdev(unsigned int, const char *); 2002 const struct file_operations *fops);
2003extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
2004 unsigned int count, const char *name);
2003extern void unregister_chrdev_region(dev_t, unsigned); 2005extern void unregister_chrdev_region(dev_t, unsigned);
2004extern void chrdev_show(struct seq_file *,off_t); 2006extern void chrdev_show(struct seq_file *,off_t);
2005 2007
2008static inline int register_chrdev(unsigned int major, const char *name,
2009 const struct file_operations *fops)
2010{
2011 return __register_chrdev(major, 0, 256, name, fops);
2012}
2013
2014static inline void unregister_chrdev(unsigned int major, const char *name)
2015{
2016 __unregister_chrdev(major, 0, 256, name);
2017}
2018
2006/* fs/block_dev.c */ 2019/* fs/block_dev.c */
2007#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 2020#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
2008#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 2021#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
@@ -2069,8 +2082,6 @@ static inline void invalidate_remote_inode(struct inode *inode)
2069extern int invalidate_inode_pages2(struct address_space *mapping); 2082extern int invalidate_inode_pages2(struct address_space *mapping);
2070extern int invalidate_inode_pages2_range(struct address_space *mapping, 2083extern int invalidate_inode_pages2_range(struct address_space *mapping,
2071 pgoff_t start, pgoff_t end); 2084 pgoff_t start, pgoff_t end);
2072extern void generic_sync_sb_inodes(struct super_block *sb,
2073 struct writeback_control *wbc);
2074extern int write_inode_now(struct inode *, int); 2085extern int write_inode_now(struct inode *, int);
2075extern int filemap_fdatawrite(struct address_space *); 2086extern int filemap_fdatawrite(struct address_space *);
2076extern int filemap_flush(struct address_space *); 2087extern int filemap_flush(struct address_space *);
@@ -2122,7 +2133,7 @@ extern struct file *do_filp_open(int dfd, const char *pathname,
2122 int open_flag, int mode, int acc_mode); 2133 int open_flag, int mode, int acc_mode);
2123extern int may_open(struct path *, int, int); 2134extern int may_open(struct path *, int, int);
2124 2135
2125extern int kernel_read(struct file *, unsigned long, char *, unsigned long); 2136extern int kernel_read(struct file *, loff_t, char *, unsigned long);
2126extern struct file * open_exec(const char *); 2137extern struct file * open_exec(const char *);
2127 2138
2128/* fs/dcache.c -- generic fs support functions */ 2139/* fs/dcache.c -- generic fs support functions */
@@ -2136,7 +2147,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
2136 2147
2137extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); 2148extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
2138 2149
2139extern struct inode * inode_init_always(struct super_block *, struct inode *); 2150extern int inode_init_always(struct super_block *, struct inode *);
2140extern void inode_init_once(struct inode *); 2151extern void inode_init_once(struct inode *);
2141extern void inode_add_to_lists(struct super_block *, struct inode *); 2152extern void inode_add_to_lists(struct super_block *, struct inode *);
2142extern void iput(struct inode *); 2153extern void iput(struct inode *);
@@ -2163,6 +2174,7 @@ extern void __iget(struct inode * inode);
2163extern void iget_failed(struct inode *); 2174extern void iget_failed(struct inode *);
2164extern void clear_inode(struct inode *); 2175extern void clear_inode(struct inode *);
2165extern void destroy_inode(struct inode *); 2176extern void destroy_inode(struct inode *);
2177extern void __destroy_inode(struct inode *);
2166extern struct inode *new_inode(struct super_block *); 2178extern struct inode *new_inode(struct super_block *);
2167extern int should_remove_suid(struct dentry *); 2179extern int should_remove_suid(struct dentry *);
2168extern int file_remove_suid(struct file *); 2180extern int file_remove_suid(struct file *);
@@ -2184,7 +2196,6 @@ extern int bdev_read_only(struct block_device *);
2184extern int set_blocksize(struct block_device *, int); 2196extern int set_blocksize(struct block_device *, int);
2185extern int sb_set_blocksize(struct super_block *, int); 2197extern int sb_set_blocksize(struct super_block *, int);
2186extern int sb_min_blocksize(struct super_block *, int); 2198extern int sb_min_blocksize(struct super_block *, int);
2187extern int sb_has_dirty_inodes(struct super_block *);
2188 2199
2189extern int generic_file_mmap(struct file *, struct vm_area_struct *); 2200extern int generic_file_mmap(struct file *, struct vm_area_struct *);
2190extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); 2201extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6c3de999fb34..4d6f47b51189 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -352,7 +352,7 @@ extern void fsnotify_unmount_inodes(struct list_head *list);
352/* put here because inotify does some weird stuff when destroying watches */ 352/* put here because inotify does some weird stuff when destroying watches */
353extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, 353extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
354 void *data, int data_is, const char *name, 354 void *data, int data_is, const char *name,
355 u32 cookie); 355 u32 cookie, gfp_t gfp);
356 356
357#else 357#else
358 358
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5c093ffc655b..a81170de7f6b 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -89,7 +89,9 @@ enum print_line_t {
89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ 89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
90}; 90};
91 91
92 92void tracing_generic_entry_update(struct trace_entry *entry,
93 unsigned long flags,
94 int pc);
93struct ring_buffer_event * 95struct ring_buffer_event *
94trace_current_buffer_lock_reserve(int type, unsigned long len, 96trace_current_buffer_lock_reserve(int type, unsigned long len,
95 unsigned long flags, int pc); 97 unsigned long flags, int pc);
@@ -119,11 +121,9 @@ struct ftrace_event_call {
119 void *filter; 121 void *filter;
120 void *mod; 122 void *mod;
121 123
122#ifdef CONFIG_EVENT_PROFILE 124 atomic_t profile_count;
123 atomic_t profile_count; 125 int (*profile_enable)(struct ftrace_event_call *);
124 int (*profile_enable)(struct ftrace_event_call *); 126 void (*profile_disable)(struct ftrace_event_call *);
125 void (*profile_disable)(struct ftrace_event_call *);
126#endif
127}; 127};
128 128
129#define MAX_FILTER_PRED 32 129#define MAX_FILTER_PRED 32
diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
index 0ffa41df0ee8..710e901085d0 100644
--- a/include/linux/gen_stats.h
+++ b/include/linux/gen_stats.h
@@ -22,6 +22,11 @@ struct gnet_stats_basic
22{ 22{
23 __u64 bytes; 23 __u64 bytes;
24 __u32 packets; 24 __u32 packets;
25};
26struct gnet_stats_basic_packed
27{
28 __u64 bytes;
29 __u32 packets;
25} __attribute__ ((packed)); 30} __attribute__ ((packed));
26 31
27/** 32/**
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8246c697863d..330cb31bb496 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -132,7 +132,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
132} 132}
133#endif 133#endif
134 134
135#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) 135#if defined(CONFIG_NO_HZ)
136extern void rcu_irq_enter(void); 136extern void rcu_irq_enter(void);
137extern void rcu_irq_exit(void); 137extern void rcu_irq_exit(void);
138extern void rcu_nmi_enter(void); 138extern void rcu_nmi_enter(void);
@@ -142,7 +142,7 @@ extern void rcu_nmi_exit(void);
142# define rcu_irq_exit() do { } while (0) 142# define rcu_irq_exit() do { } while (0)
143# define rcu_nmi_enter() do { } while (0) 143# define rcu_nmi_enter() do { } while (0)
144# define rcu_nmi_exit() do { } while (0) 144# define rcu_nmi_exit() do { } while (0)
145#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ 145#endif /* #if defined(CONFIG_NO_HZ) */
146 146
147/* 147/*
148 * It is safe to do non-atomic ops on ->hardirq_context, 148 * It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 54648e625efd..4759917adc71 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -448,7 +448,7 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
448 448
449static inline void timer_stats_account_hrtimer(struct hrtimer *timer) 449static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
450{ 450{
451 if (likely(!timer->start_pid)) 451 if (likely(!timer->start_site))
452 return; 452 return;
453 timer_stats_update_stats(timer, timer->start_pid, timer->start_site, 453 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
454 timer->function, timer->start_comm, 0); 454 timer->function, timer->start_comm, 0);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 2723513a5651..5cbc620bdfe0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -10,6 +10,7 @@
10#include <asm/tlbflush.h> 10#include <asm/tlbflush.h>
11 11
12struct ctl_table; 12struct ctl_table;
13struct user_struct;
13 14
14int PageHuge(struct page *page); 15int PageHuge(struct page *page);
15 16
@@ -146,7 +147,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
146 147
147extern const struct file_operations hugetlbfs_file_operations; 148extern const struct file_operations hugetlbfs_file_operations;
148extern struct vm_operations_struct hugetlb_vm_ops; 149extern struct vm_operations_struct hugetlb_vm_ops;
149struct file *hugetlb_file_setup(const char *name, size_t, int); 150struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
151 struct user_struct **user);
150int hugetlb_get_quota(struct address_space *mapping, long delta); 152int hugetlb_get_quota(struct address_space *mapping, long delta);
151void hugetlb_put_quota(struct address_space *mapping, long delta); 153void hugetlb_put_quota(struct address_space *mapping, long delta);
152 154
@@ -168,7 +170,7 @@ static inline void set_file_hugepages(struct file *file)
168 170
169#define is_file_hugepages(file) 0 171#define is_file_hugepages(file) 0
170#define set_file_hugepages(file) BUG() 172#define set_file_hugepages(file) BUG()
171#define hugetlb_file_setup(name,size,acctflag) ERR_PTR(-ENOSYS) 173#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS)
172 174
173#endif /* !CONFIG_HUGETLBFS */ 175#endif /* !CONFIG_HUGETLBFS */
174 176
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index acef2a770b6b..ad27c7da8798 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
82 82
83#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) 83#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
84#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) 84#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
85#define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER) 85#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
86#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ 86#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
87 ACCEPT_SOURCE_ROUTE) 87 ACCEPT_SOURCE_ROUTE)
88#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) 88#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7fc01b13be43..9e7f2e8fc66e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -94,6 +94,16 @@ extern struct group_info init_groups;
94# define CAP_INIT_BSET CAP_INIT_EFF_SET 94# define CAP_INIT_BSET CAP_INIT_EFF_SET
95#endif 95#endif
96 96
97#ifdef CONFIG_TREE_PREEMPT_RCU
98#define INIT_TASK_RCU_PREEMPT(tsk) \
99 .rcu_read_lock_nesting = 0, \
100 .rcu_read_unlock_special = 0, \
101 .rcu_blocked_node = NULL, \
102 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
103#else
104#define INIT_TASK_RCU_PREEMPT(tsk)
105#endif
106
97extern struct cred init_cred; 107extern struct cred init_cred;
98 108
99#ifdef CONFIG_PERF_COUNTERS 109#ifdef CONFIG_PERF_COUNTERS
@@ -173,6 +183,7 @@ extern struct cred init_cred;
173 INIT_LOCKDEP \ 183 INIT_LOCKDEP \
174 INIT_FTRACE_GRAPH \ 184 INIT_FTRACE_GRAPH \
175 INIT_TRACE_RECURSION \ 185 INIT_TRACE_RECURSION \
186 INIT_TASK_RCU_PREEMPT(tsk) \
176} 187}
177 188
178 189
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 7964516c6954..15d5903af2dd 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -15,12 +15,13 @@
15#define KEY_COL(k) (((k) >> 16) & 0xff) 15#define KEY_COL(k) (((k) >> 16) & 0xff)
16#define KEY_VAL(k) ((k) & 0xffff) 16#define KEY_VAL(k) ((k) & 0xffff)
17 17
18#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col))
19
18/** 20/**
19 * struct matrix_keymap_data - keymap for matrix keyboards 21 * struct matrix_keymap_data - keymap for matrix keyboards
20 * @keymap: pointer to array of uint32 values encoded with KEY() macro 22 * @keymap: pointer to array of uint32 values encoded with KEY() macro
21 * representing keymap 23 * representing keymap
22 * @keymap_size: number of entries (initialized) in this keymap 24 * @keymap_size: number of entries (initialized) in this keymap
23 * @max_keymap_size: maximum size of keymap supported by the device
24 * 25 *
25 * This structure is supposed to be used by platform code to supply 26 * This structure is supposed to be used by platform code to supply
26 * keymaps to drivers that implement matrix-like keypads/keyboards. 27 * keymaps to drivers that implement matrix-like keypads/keyboards.
@@ -28,14 +29,13 @@
28struct matrix_keymap_data { 29struct matrix_keymap_data {
29 const uint32_t *keymap; 30 const uint32_t *keymap;
30 unsigned int keymap_size; 31 unsigned int keymap_size;
31 unsigned int max_keymap_size;
32}; 32};
33 33
34/** 34/**
35 * struct matrix_keypad_platform_data - platform-dependent keypad data 35 * struct matrix_keypad_platform_data - platform-dependent keypad data
36 * @keymap_data: pointer to &matrix_keymap_data 36 * @keymap_data: pointer to &matrix_keymap_data
37 * @row_gpios: array of gpio numbers reporesenting rows 37 * @row_gpios: pointer to array of gpio numbers representing rows
38 * @col_gpios: array of gpio numbers reporesenting colums 38 * @col_gpios: pointer to array of gpio numbers reporesenting colums
39 * @num_row_gpios: actual number of row gpios used by device 39 * @num_row_gpios: actual number of row gpios used by device
40 * @num_col_gpios: actual number of col gpios used by device 40 * @num_col_gpios: actual number of col gpios used by device
41 * @col_scan_delay_us: delay, measured in microseconds, that is 41 * @col_scan_delay_us: delay, measured in microseconds, that is
@@ -48,8 +48,9 @@ struct matrix_keymap_data {
48struct matrix_keypad_platform_data { 48struct matrix_keypad_platform_data {
49 const struct matrix_keymap_data *keymap_data; 49 const struct matrix_keymap_data *keymap_data;
50 50
51 unsigned int row_gpios[MATRIX_MAX_ROWS]; 51 const unsigned int *row_gpios;
52 unsigned int col_gpios[MATRIX_MAX_COLS]; 52 const unsigned int *col_gpios;
53
53 unsigned int num_row_gpios; 54 unsigned int num_row_gpios;
54 unsigned int num_col_gpios; 55 unsigned int num_col_gpios;
55 56
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 2721f07e9354..1ac57e522a1f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,7 @@
14#include <linux/irqflags.h> 14#include <linux/irqflags.h>
15#include <linux/smp.h> 15#include <linux/smp.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/hrtimer.h>
17 18
18#include <asm/atomic.h> 19#include <asm/atomic.h>
19#include <asm/ptrace.h> 20#include <asm/ptrace.h>
@@ -49,6 +50,9 @@
49 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 50 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50 * registered first in an shared interrupt is considered for 51 * registered first in an shared interrupt is considered for
51 * performance reasons) 52 * performance reasons)
53 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
54 * Used by threaded interrupts which need to keep the
55 * irq line disabled until the threaded handler has been run.
52 */ 56 */
53#define IRQF_DISABLED 0x00000020 57#define IRQF_DISABLED 0x00000020
54#define IRQF_SAMPLE_RANDOM 0x00000040 58#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -58,17 +62,20 @@
58#define IRQF_PERCPU 0x00000400 62#define IRQF_PERCPU 0x00000400
59#define IRQF_NOBALANCING 0x00000800 63#define IRQF_NOBALANCING 0x00000800
60#define IRQF_IRQPOLL 0x00001000 64#define IRQF_IRQPOLL 0x00001000
65#define IRQF_ONESHOT 0x00002000
61 66
62/* 67/*
63 * Bits used by threaded handlers: 68 * Bits used by threaded handlers:
64 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run 69 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
65 * IRQTF_DIED - handler thread died 70 * IRQTF_DIED - handler thread died
66 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed 71 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
72 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
67 */ 73 */
68enum { 74enum {
69 IRQTF_RUNTHREAD, 75 IRQTF_RUNTHREAD,
70 IRQTF_DIED, 76 IRQTF_DIED,
71 IRQTF_WARNED, 77 IRQTF_WARNED,
78 IRQTF_AFFINITY,
72}; 79};
73 80
74typedef irqreturn_t (*irq_handler_t)(int, void *); 81typedef irqreturn_t (*irq_handler_t)(int, void *);
@@ -517,6 +524,31 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
517extern void tasklet_init(struct tasklet_struct *t, 524extern void tasklet_init(struct tasklet_struct *t,
518 void (*func)(unsigned long), unsigned long data); 525 void (*func)(unsigned long), unsigned long data);
519 526
527struct tasklet_hrtimer {
528 struct hrtimer timer;
529 struct tasklet_struct tasklet;
530 enum hrtimer_restart (*function)(struct hrtimer *);
531};
532
533extern void
534tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
535 enum hrtimer_restart (*function)(struct hrtimer *),
536 clockid_t which_clock, enum hrtimer_mode mode);
537
538static inline
539int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
540 const enum hrtimer_mode mode)
541{
542 return hrtimer_start(&ttimer->timer, time, mode);
543}
544
545static inline
546void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
547{
548 hrtimer_cancel(&ttimer->timer);
549 tasklet_kill(&ttimer->tasklet);
550}
551
520/* 552/*
521 * Autoprobing for irqs: 553 * Autoprobing for irqs:
522 * 554 *
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index dd05434fa45f..4da4a75c3f1e 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -92,7 +92,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
92 * a race). 92 * a race).
93 */ 93 */
94 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { 94 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
95 atomic_long_inc(&ioc->refcount); 95 atomic_inc(&ioc->nr_tasks);
96 return ioc; 96 return ioc;
97 } 97 }
98 98
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cb2e77a3f7f7..ae9653dbcd78 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -69,6 +69,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
69#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ 69#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
70#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ 70#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
71#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ 71#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
72#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
73#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
72 74
73#ifdef CONFIG_IRQ_PER_CPU 75#ifdef CONFIG_IRQ_PER_CPU
74# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 76# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
@@ -100,6 +102,9 @@ struct msi_desc;
100 * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ 102 * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
101 * @set_wake: enable/disable power-management wake-on of an IRQ 103 * @set_wake: enable/disable power-management wake-on of an IRQ
102 * 104 *
105 * @bus_lock: function to lock access to slow bus (i2c) chips
106 * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips
107 *
103 * @release: release function solely used by UML 108 * @release: release function solely used by UML
104 * @typename: obsoleted by name, kept as migration helper 109 * @typename: obsoleted by name, kept as migration helper
105 */ 110 */
@@ -123,6 +128,9 @@ struct irq_chip {
123 int (*set_type)(unsigned int irq, unsigned int flow_type); 128 int (*set_type)(unsigned int irq, unsigned int flow_type);
124 int (*set_wake)(unsigned int irq, unsigned int on); 129 int (*set_wake)(unsigned int irq, unsigned int on);
125 130
131 void (*bus_lock)(unsigned int irq);
132 void (*bus_sync_unlock)(unsigned int irq);
133
126 /* Currently used only by UML, might disappear one day.*/ 134 /* Currently used only by UML, might disappear one day.*/
127#ifdef CONFIG_IRQ_RELEASE_METHOD 135#ifdef CONFIG_IRQ_RELEASE_METHOD
128 void (*release)(unsigned int irq, void *dev_id); 136 void (*release)(unsigned int irq, void *dev_id);
@@ -220,13 +228,6 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
220extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); 228extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
221 229
222/* 230/*
223 * Migration helpers for obsolete names, they will go away:
224 */
225#define hw_interrupt_type irq_chip
226#define no_irq_type no_irq_chip
227typedef struct irq_desc irq_desc_t;
228
229/*
230 * Pick up the arch-dependent methods: 231 * Pick up the arch-dependent methods:
231 */ 232 */
232#include <asm/hw_irq.h> 233#include <asm/hw_irq.h>
@@ -289,6 +290,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
289extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); 290extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
290extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); 291extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
291extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 292extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
293extern void handle_nested_irq(unsigned int irq);
292 294
293/* 295/*
294 * Monolithic do_IRQ implementation. 296 * Monolithic do_IRQ implementation.
@@ -379,6 +381,8 @@ set_irq_chained_handler(unsigned int irq,
379 __set_irq_handler(irq, handle, 1, NULL); 381 __set_irq_handler(irq, handle, 1, NULL);
380} 382}
381 383
384extern void set_irq_nested_thread(unsigned int irq, int nest);
385
382extern void set_irq_noprobe(unsigned int irq); 386extern void set_irq_noprobe(unsigned int irq);
383extern void set_irq_probe(unsigned int irq); 387extern void set_irq_probe(unsigned int irq);
384 388
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index ec87b212ff7d..7bf89bc8cbca 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -41,6 +41,12 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
41 ; \ 41 ; \
42 else 42 else
43 43
44#ifdef CONFIG_SMP
45#define irq_node(irq) (irq_to_desc(irq)->node)
46#else
47#define irq_node(irq) 0
48#endif
49
44#endif /* CONFIG_GENERIC_HARDIRQS */ 50#endif /* CONFIG_GENERIC_HARDIRQS */
45 51
46#define for_each_irq_nr(irq) \ 52#define for_each_irq_nr(irq) \
diff --git a/include/linux/key.h b/include/linux/key.h
index e544f466d69a..cd50dfa1d4c2 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -129,7 +129,10 @@ struct key {
129 struct rw_semaphore sem; /* change vs change sem */ 129 struct rw_semaphore sem; /* change vs change sem */
130 struct key_user *user; /* owner of this key */ 130 struct key_user *user; /* owner of this key */
131 void *security; /* security data for this key */ 131 void *security; /* security data for this key */
132 time_t expiry; /* time at which key expires (or 0) */ 132 union {
133 time_t expiry; /* time at which key expires (or 0) */
134 time_t revoked_at; /* time at which key was revoked */
135 };
133 uid_t uid; 136 uid_t uid;
134 gid_t gid; 137 gid_t gid;
135 key_perm_t perm; /* access permissions */ 138 key_perm_t perm; /* access permissions */
@@ -275,6 +278,8 @@ static inline key_serial_t key_serial(struct key *key)
275extern ctl_table key_sysctls[]; 278extern ctl_table key_sysctls[];
276#endif 279#endif
277 280
281extern void key_replace_session_keyring(void);
282
278/* 283/*
279 * the userspace interface 284 * the userspace interface
280 */ 285 */
@@ -297,6 +302,7 @@ extern void key_init(void);
297#define key_fsuid_changed(t) do { } while(0) 302#define key_fsuid_changed(t) do { } while(0)
298#define key_fsgid_changed(t) do { } while(0) 303#define key_fsgid_changed(t) do { } while(0)
299#define key_init() do { } while(0) 304#define key_init() do { } while(0)
305#define key_replace_session_keyring() do { } while(0)
300 306
301#endif /* CONFIG_KEYS */ 307#endif /* CONFIG_KEYS */
302#endif /* __KERNEL__ */ 308#endif /* __KERNEL__ */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
index c0688eb72093..bd383f1944fb 100644
--- a/include/linux/keyctl.h
+++ b/include/linux/keyctl.h
@@ -52,5 +52,6 @@
52#define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ 52#define KEYCTL_SET_TIMEOUT 15 /* set key timeout */
53#define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ 53#define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */
54#define KEYCTL_GET_SECURITY 17 /* get key security label */ 54#define KEYCTL_GET_SECURITY 17 /* get key security label */
55#define KEYCTL_SESSION_TO_PARENT 18 /* apply session keyring to parent process */
55 56
56#endif /* _LINUX_KEYCTL_H */ 57#endif /* _LINUX_KEYCTL_H */
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 47b39b7c7e84..dc2fd545db00 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -34,6 +34,8 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
34int kmemcheck_show_addr(unsigned long address); 34int kmemcheck_show_addr(unsigned long address);
35int kmemcheck_hide_addr(unsigned long address); 35int kmemcheck_hide_addr(unsigned long address);
36 36
37bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38
37#else 39#else
38#define kmemcheck_enabled 0 40#define kmemcheck_enabled 0
39 41
@@ -99,6 +101,11 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p,
99{ 101{
100} 102}
101 103
104static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
105{
106 return true;
107}
108
102#endif /* CONFIG_KMEMCHECK */ 109#endif /* CONFIG_KMEMCHECK */
103 110
104/* 111/*
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 6a63807f714e..3c7497d46ee9 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -23,18 +23,18 @@
23 23
24#ifdef CONFIG_DEBUG_KMEMLEAK 24#ifdef CONFIG_DEBUG_KMEMLEAK
25 25
26extern void kmemleak_init(void); 26extern void kmemleak_init(void) __ref;
27extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 27extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
28 gfp_t gfp); 28 gfp_t gfp) __ref;
29extern void kmemleak_free(const void *ptr); 29extern void kmemleak_free(const void *ptr) __ref;
30extern void kmemleak_free_part(const void *ptr, size_t size); 30extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
31extern void kmemleak_padding(const void *ptr, unsigned long offset, 31extern void kmemleak_padding(const void *ptr, unsigned long offset,
32 size_t size); 32 size_t size) __ref;
33extern void kmemleak_not_leak(const void *ptr); 33extern void kmemleak_not_leak(const void *ptr) __ref;
34extern void kmemleak_ignore(const void *ptr); 34extern void kmemleak_ignore(const void *ptr) __ref;
35extern void kmemleak_scan_area(const void *ptr, unsigned long offset, 35extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
36 size_t length, gfp_t gfp); 36 size_t length, gfp_t gfp) __ref;
37extern void kmemleak_no_scan(const void *ptr); 37extern void kmemleak_no_scan(const void *ptr) __ref;
38 38
39static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, 39static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
40 int min_count, unsigned long flags, 40 int min_count, unsigned long flags,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 16713dc672e4..3060bdc35ffe 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -110,6 +110,7 @@ struct kvm_memory_slot {
110 110
111struct kvm_kernel_irq_routing_entry { 111struct kvm_kernel_irq_routing_entry {
112 u32 gsi; 112 u32 gsi;
113 u32 type;
113 int (*set)(struct kvm_kernel_irq_routing_entry *e, 114 int (*set)(struct kvm_kernel_irq_routing_entry *e,
114 struct kvm *kvm, int level); 115 struct kvm *kvm, int level);
115 union { 116 union {
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 7bc1440fc473..2fb1dcbcb5aa 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -1,5 +1,7 @@
1/* Things the lguest guest needs to know. Note: like all lguest interfaces, 1/*
2 * this is subject to wild and random change between versions. */ 2 * Things the lguest guest needs to know. Note: like all lguest interfaces,
3 * this is subject to wild and random change between versions.
4 */
3#ifndef _LINUX_LGUEST_H 5#ifndef _LINUX_LGUEST_H
4#define _LINUX_LGUEST_H 6#define _LINUX_LGUEST_H
5 7
@@ -11,32 +13,41 @@
11#define LG_CLOCK_MIN_DELTA 100UL 13#define LG_CLOCK_MIN_DELTA 100UL
12#define LG_CLOCK_MAX_DELTA ULONG_MAX 14#define LG_CLOCK_MAX_DELTA ULONG_MAX
13 15
14/*G:032 The second method of communicating with the Host is to via "struct 16/*G:031
17 * The second method of communicating with the Host is to via "struct
15 * lguest_data". Once the Guest's initialization hypercall tells the Host where 18 * lguest_data". Once the Guest's initialization hypercall tells the Host where
16 * this is, the Guest and Host both publish information in it. :*/ 19 * this is, the Guest and Host both publish information in it.
17struct lguest_data 20:*/
18{ 21struct lguest_data {
19 /* 512 == enabled (same as eflags in normal hardware). The Guest 22 /*
20 * changes interrupts so often that a hypercall is too slow. */ 23 * 512 == enabled (same as eflags in normal hardware). The Guest
24 * changes interrupts so often that a hypercall is too slow.
25 */
21 unsigned int irq_enabled; 26 unsigned int irq_enabled;
22 /* Fine-grained interrupt disabling by the Guest */ 27 /* Fine-grained interrupt disabling by the Guest */
23 DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); 28 DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
24 29
25 /* The Host writes the virtual address of the last page fault here, 30 /*
31 * The Host writes the virtual address of the last page fault here,
26 * which saves the Guest a hypercall. CR2 is the native register where 32 * which saves the Guest a hypercall. CR2 is the native register where
27 * this address would normally be found. */ 33 * this address would normally be found.
34 */
28 unsigned long cr2; 35 unsigned long cr2;
29 36
30 /* Wallclock time set by the Host. */ 37 /* Wallclock time set by the Host. */
31 struct timespec time; 38 struct timespec time;
32 39
33 /* Interrupt pending set by the Host. The Guest should do a hypercall 40 /*
34 * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */ 41 * Interrupt pending set by the Host. The Guest should do a hypercall
42 * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF).
43 */
35 int irq_pending; 44 int irq_pending;
36 45
37 /* Async hypercall ring. Instead of directly making hypercalls, we can 46 /*
47 * Async hypercall ring. Instead of directly making hypercalls, we can
38 * place them in here for processing the next time the Host wants. 48 * place them in here for processing the next time the Host wants.
39 * This batching can be quite efficient. */ 49 * This batching can be quite efficient.
50 */
40 51
41 /* 0xFF == done (set by Host), 0 == pending (set by Guest). */ 52 /* 0xFF == done (set by Host), 0 == pending (set by Guest). */
42 u8 hcall_status[LHCALL_RING_SIZE]; 53 u8 hcall_status[LHCALL_RING_SIZE];
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index bfefbdf7498a..495203ff221c 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -29,8 +29,10 @@ struct lguest_device_desc {
29 __u8 type; 29 __u8 type;
30 /* The number of virtqueues (first in config array) */ 30 /* The number of virtqueues (first in config array) */
31 __u8 num_vq; 31 __u8 num_vq;
32 /* The number of bytes of feature bits. Multiply by 2: one for host 32 /*
33 * features and one for Guest acknowledgements. */ 33 * The number of bytes of feature bits. Multiply by 2: one for host
34 * features and one for Guest acknowledgements.
35 */
34 __u8 feature_len; 36 __u8 feature_len;
35 /* The number of bytes of the config array after virtqueues. */ 37 /* The number of bytes of the config array after virtqueues. */
36 __u8 config_len; 38 __u8 config_len;
@@ -39,8 +41,10 @@ struct lguest_device_desc {
39 __u8 config[0]; 41 __u8 config[0];
40}; 42};
41 43
42/*D:135 This is how we expect the device configuration field for a virtqueue 44/*D:135
43 * to be laid out in config space. */ 45 * This is how we expect the device configuration field for a virtqueue
46 * to be laid out in config space.
47 */
44struct lguest_vqconfig { 48struct lguest_vqconfig {
45 /* The number of entries in the virtio_ring */ 49 /* The number of entries in the virtio_ring */
46 __u16 num; 50 __u16 num;
@@ -61,7 +65,9 @@ enum lguest_req
61 LHREQ_EVENTFD, /* + address, fd. */ 65 LHREQ_EVENTFD, /* + address, fd. */
62}; 66};
63 67
64/* The alignment to use between consumer and producer parts of vring. 68/*
65 * x86 pagesize for historical reasons. */ 69 * The alignment to use between consumer and producer parts of vring.
70 * x86 pagesize for historical reasons.
71 */
66#define LGUEST_VRING_ALIGN 4096 72#define LGUEST_VRING_ALIGN 4096
67#endif /* _LINUX_LGUEST_LAUNCHER */ 73#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 3d501db36a26..e5b6e33c6571 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -385,6 +385,7 @@ enum {
385 not multiple of 16 bytes */ 385 not multiple of 16 bytes */
386 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ 386 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
387 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ 387 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
388 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
388 389
389 /* DMA mask for user DMA control: User visible values; DO NOT 390 /* DMA mask for user DMA control: User visible values; DO NOT
390 renumber */ 391 renumber */
@@ -588,6 +589,7 @@ struct ata_device {
588#endif 589#endif
589 /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ 590 /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */
590 u64 n_sectors; /* size of device, if ATA */ 591 u64 n_sectors; /* size of device, if ATA */
592 u64 n_native_sectors; /* native size, if ATA */
591 unsigned int class; /* ATA_DEV_xxx */ 593 unsigned int class; /* ATA_DEV_xxx */
592 unsigned long unpark_deadline; 594 unsigned long unpark_deadline;
593 595
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index c46c89505dac..2442e3f3d033 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -51,7 +51,7 @@ extern u64 __init lmb_alloc_base(u64 size,
51extern u64 __init __lmb_alloc_base(u64 size, 51extern u64 __init __lmb_alloc_base(u64 size,
52 u64 align, u64 max_addr); 52 u64 align, u64 max_addr);
53extern u64 __init lmb_phys_mem_size(void); 53extern u64 __init lmb_phys_mem_size(void);
54extern u64 __init lmb_end_of_DRAM(void); 54extern u64 lmb_end_of_DRAM(void);
55extern void __init lmb_enforce_memory_limit(u64 memory_limit); 55extern void __init lmb_enforce_memory_limit(u64 memory_limit);
56extern int __init lmb_is_reserved(u64 addr); 56extern int __init lmb_is_reserved(u64 addr);
57extern int lmb_find(struct lmb_property *res); 57extern int lmb_find(struct lmb_property *res);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b25d1b53df0d..9ccf0e286b2a 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -149,6 +149,12 @@ struct lock_list {
149 struct lock_class *class; 149 struct lock_class *class;
150 struct stack_trace trace; 150 struct stack_trace trace;
151 int distance; 151 int distance;
152
153 /*
154 * The parent field is used to implement breadth-first search, and the
155 * bit 0 is reused to indicate if the lock has been accessed in BFS.
156 */
157 struct lock_list *parent;
152}; 158};
153 159
154/* 160/*
@@ -208,10 +214,12 @@ struct held_lock {
208 * interrupt context: 214 * interrupt context:
209 */ 215 */
210 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 216 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
211 unsigned int trylock:1; 217 unsigned int trylock:1; /* 16 bits */
218
212 unsigned int read:2; /* see lock_acquire() comment */ 219 unsigned int read:2; /* see lock_acquire() comment */
213 unsigned int check:2; /* see lock_acquire() comment */ 220 unsigned int check:2; /* see lock_acquire() comment */
214 unsigned int hardirqs_off:1; 221 unsigned int hardirqs_off:1;
222 unsigned int references:11; /* 32 bits */
215}; 223};
216 224
217/* 225/*
@@ -291,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
291extern void lock_release(struct lockdep_map *lock, int nested, 299extern void lock_release(struct lockdep_map *lock, int nested,
292 unsigned long ip); 300 unsigned long ip);
293 301
302#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
303
304extern int lock_is_held(struct lockdep_map *lock);
305
294extern void lock_set_class(struct lockdep_map *lock, const char *name, 306extern void lock_set_class(struct lockdep_map *lock, const char *name,
295 struct lock_class_key *key, unsigned int subclass, 307 struct lock_class_key *key, unsigned int subclass,
296 unsigned long ip); 308 unsigned long ip);
@@ -309,6 +321,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
309 321
310#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 322#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
311 323
324#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
325
312#else /* !LOCKDEP */ 326#else /* !LOCKDEP */
313 327
314static inline void lockdep_off(void) 328static inline void lockdep_off(void)
@@ -353,6 +367,8 @@ struct lock_class_key { };
353 367
354#define lockdep_depth(tsk) (0) 368#define lockdep_depth(tsk) (0)
355 369
370#define lockdep_assert_held(l) do { } while (0)
371
356#endif /* !LOCKDEP */ 372#endif /* !LOCKDEP */
357 373
358#ifdef CONFIG_LOCK_STAT 374#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index e461b2c3d711..190c37854870 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -33,6 +33,7 @@ struct common_audit_data {
33#define LSM_AUDIT_DATA_IPC 4 33#define LSM_AUDIT_DATA_IPC 4
34#define LSM_AUDIT_DATA_TASK 5 34#define LSM_AUDIT_DATA_TASK 5
35#define LSM_AUDIT_DATA_KEY 6 35#define LSM_AUDIT_DATA_KEY 6
36#define LSM_AUDIT_NO_AUDIT 7
36 struct task_struct *tsk; 37 struct task_struct *tsk;
37 union { 38 union {
38 struct { 39 struct {
@@ -66,16 +67,19 @@ struct common_audit_data {
66 } key_struct; 67 } key_struct;
67#endif 68#endif
68 } u; 69 } u;
69 const char *function;
70 /* this union contains LSM specific data */ 70 /* this union contains LSM specific data */
71 union { 71 union {
72#ifdef CONFIG_SECURITY_SMACK
72 /* SMACK data */ 73 /* SMACK data */
73 struct smack_audit_data { 74 struct smack_audit_data {
75 const char *function;
74 char *subject; 76 char *subject;
75 char *object; 77 char *object;
76 char *request; 78 char *request;
77 int result; 79 int result;
78 } smack_audit_data; 80 } smack_audit_data;
81#endif
82#ifdef CONFIG_SECURITY_SELINUX
79 /* SELinux data */ 83 /* SELinux data */
80 struct { 84 struct {
81 u32 ssid; 85 u32 ssid;
@@ -83,10 +87,12 @@ struct common_audit_data {
83 u16 tclass; 87 u16 tclass;
84 u32 requested; 88 u32 requested;
85 u32 audited; 89 u32 audited;
90 u32 denied;
86 struct av_decision *avd; 91 struct av_decision *avd;
87 int result; 92 int result;
88 } selinux_audit_data; 93 } selinux_audit_data;
89 } lsm_priv; 94#endif
95 };
90 /* these callback will be implemented by a specific LSM */ 96 /* these callback will be implemented by a specific LSM */
91 void (*lsm_pre_audit)(struct audit_buffer *, void *); 97 void (*lsm_pre_audit)(struct audit_buffer *, void *);
92 void (*lsm_post_audit)(struct audit_buffer *, void *); 98 void (*lsm_post_audit)(struct audit_buffer *, void *);
@@ -104,7 +110,7 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
104/* Initialize an LSM audit data structure. */ 110/* Initialize an LSM audit data structure. */
105#define COMMON_AUDIT_DATA_INIT(_d, _t) \ 111#define COMMON_AUDIT_DATA_INIT(_d, _t) \
106 { memset((_d), 0, sizeof(struct common_audit_data)); \ 112 { memset((_d), 0, sizeof(struct common_audit_data)); \
107 (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; } 113 (_d)->type = LSM_AUDIT_DATA_##_t; }
108 114
109void common_lsm_audit(struct common_audit_data *a); 115void common_lsm_audit(struct common_audit_data *a);
110 116
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ba3a7cb1eaa0..9a72cc78e6b8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -34,8 +34,6 @@ extern int sysctl_legacy_va_layout;
34#define sysctl_legacy_va_layout 0 34#define sysctl_legacy_va_layout 0
35#endif 35#endif
36 36
37extern unsigned long mmap_min_addr;
38
39#include <asm/page.h> 37#include <asm/page.h>
40#include <asm/pgtable.h> 38#include <asm/pgtable.h>
41#include <asm/processor.h> 39#include <asm/processor.h>
@@ -575,19 +573,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
575} 573}
576 574
577/* 575/*
578 * If a hint addr is less than mmap_min_addr change hint to be as
579 * low as possible but still greater than mmap_min_addr
580 */
581static inline unsigned long round_hint_to_min(unsigned long hint)
582{
583 hint &= PAGE_MASK;
584 if (((void *)hint != NULL) &&
585 (hint < mmap_min_addr))
586 return PAGE_ALIGN(mmap_min_addr);
587 return hint;
588}
589
590/*
591 * Some inline functions in vmstat.h depend on page_zone() 576 * Some inline functions in vmstat.h depend on page_zone()
592 */ 577 */
593#include <linux/vmstat.h> 578#include <linux/vmstat.h>
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7acc8439d9b3..0042090a4d70 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -240,8 +240,6 @@ struct mm_struct {
240 240
241 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 241 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
242 242
243 s8 oom_adj; /* OOM kill score adjustment (bit shift) */
244
245 cpumask_t cpu_vm_mask; 243 cpumask_t cpu_vm_mask;
246 244
247 /* Architecture-specific MM context */ 245 /* Architecture-specific MM context */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 5675b63a0631..0f32a9b6ff55 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -251,7 +251,7 @@ struct mtd_info {
251 251
252static inline struct mtd_info *dev_to_mtd(struct device *dev) 252static inline struct mtd_info *dev_to_mtd(struct device *dev)
253{ 253{
254 return dev ? container_of(dev, struct mtd_info, dev) : NULL; 254 return dev ? dev_get_drvdata(dev) : NULL;
255} 255}
256 256
257static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 257static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index af6dcb992bc3..b70313d33ff8 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -47,6 +47,8 @@ struct mtd_partition {
47#define MTDPART_SIZ_FULL (0) 47#define MTDPART_SIZ_FULL (0)
48 48
49 49
50struct mtd_info;
51
50int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); 52int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
51int del_mtd_partitions(struct mtd_info *); 53int del_mtd_partitions(struct mtd_info *);
52 54
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index fdffb413b192..f6b90240dd41 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -473,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *);
473extern int nfs_flush_incompatible(struct file *file, struct page *page); 473extern int nfs_flush_incompatible(struct file *file, struct page *page);
474extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); 474extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
475extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); 475extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
476extern void nfs_writedata_release(void *);
477 476
478/* 477/*
479 * Try to write back everything synchronously (but check the 478 * Try to write back everything synchronously (but check the
@@ -488,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
488extern int nfs_commit_inode(struct inode *, int); 487extern int nfs_commit_inode(struct inode *, int);
489extern struct nfs_write_data *nfs_commitdata_alloc(void); 488extern struct nfs_write_data *nfs_commitdata_alloc(void);
490extern void nfs_commit_free(struct nfs_write_data *wdata); 489extern void nfs_commit_free(struct nfs_write_data *wdata);
491extern void nfs_commitdata_release(void *wdata);
492#else 490#else
493static inline int 491static inline int
494nfs_commit_inode(struct inode *inode, int how) 492nfs_commit_inode(struct inode *inode, int how)
@@ -507,6 +505,7 @@ nfs_have_writebacks(struct inode *inode)
507 * Allocate nfs_write_data structures 505 * Allocate nfs_write_data structures
508 */ 506 */
509extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages); 507extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
508extern void nfs_writedata_free(struct nfs_write_data *);
510 509
511/* 510/*
512 * linux/fs/nfs/read.c 511 * linux/fs/nfs/read.c
@@ -515,7 +514,6 @@ extern int nfs_readpage(struct file *, struct page *);
515extern int nfs_readpages(struct file *, struct address_space *, 514extern int nfs_readpages(struct file *, struct address_space *,
516 struct list_head *, unsigned); 515 struct list_head *, unsigned);
517extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); 516extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
518extern void nfs_readdata_release(void *data);
519extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, 517extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
520 struct page *); 518 struct page *);
521 519
@@ -523,6 +521,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
523 * Allocate nfs_read_data structures 521 * Allocate nfs_read_data structures
524 */ 522 */
525extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages); 523extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
524extern void nfs_readdata_free(struct nfs_read_data *);
526 525
527/* 526/*
528 * linux/fs/nfs3proc.c 527 * linux/fs/nfs3proc.c
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 29af2d5df097..b752e807adde 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
28static inline void acpi_nmi_enable(void) { } 28static inline void acpi_nmi_enable(void) { }
29#endif 29#endif
30 30
31#ifndef trigger_all_cpu_backtrace 31/*
32#define trigger_all_cpu_backtrace() do { } while (0) 32 * Create trigger_all_cpu_backtrace() out of the arch-provided
33 * base function. Return whether such support was available,
34 * to allow calling code to fall back to some other mechanism:
35 */
36#ifdef arch_trigger_all_cpu_backtrace
37static inline bool trigger_all_cpu_backtrace(void)
38{
39 arch_trigger_all_cpu_backtrace();
40
41 return true;
42}
43#else
44static inline bool trigger_all_cpu_backtrace(void)
45{
46 return false;
47}
33#endif 48#endif
34 49
35#endif 50#endif
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 829b94b156f2..b359c4a9ec9e 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -82,6 +82,12 @@
82 * to generate slightly worse code. So use a simple one-line #define 82 * to generate slightly worse code. So use a simple one-line #define
83 * for node_isset(), instead of wrapping an inline inside a macro, the 83 * for node_isset(), instead of wrapping an inline inside a macro, the
84 * way we do the other calls. 84 * way we do the other calls.
85 *
86 * NODEMASK_SCRATCH
87 * When doing above logical AND, OR, XOR, Remap operations the callers tend to
88 * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
89 * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
90 * for such situations. See below and CPUMASK_ALLOC also.
85 */ 91 */
86 92
87#include <linux/kernel.h> 93#include <linux/kernel.h>
@@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state)
473#define for_each_node(node) for_each_node_state(node, N_POSSIBLE) 479#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
474#define for_each_online_node(node) for_each_node_state(node, N_ONLINE) 480#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
475 481
482/*
483 * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
484 */
485
486#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
487#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
488#define NODEMASK_FREE(m) kfree(m)
489#else
490#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
491#define NODEMASK_FREE(m)
492#endif
493
494/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
495struct nodemask_scratch {
496 nodemask_t mask1;
497 nodemask_t mask2;
498};
499
500#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
501#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
502
503
476#endif /* __LINUX_NODEMASK_H */ 504#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index c9663c690303..53b94e025c7c 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -18,5 +18,8 @@ extern struct phy_device *of_phy_connect(struct net_device *dev,
18 struct device_node *phy_np, 18 struct device_node *phy_np,
19 void (*hndlr)(struct net_device *), 19 void (*hndlr)(struct net_device *),
20 u32 flags, phy_interface_t iface); 20 u32 flags, phy_interface_t iface);
21extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
22 void (*hndlr)(struct net_device *),
23 phy_interface_t iface);
21 24
22#endif /* __LINUX_OF_MDIO_H */ 25#endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index aec3252afcf5..ed5d7501e181 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -132,7 +132,7 @@ static inline int page_cache_get_speculative(struct page *page)
132{ 132{
133 VM_BUG_ON(in_interrupt()); 133 VM_BUG_ON(in_interrupt());
134 134
135#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) 135#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
136# ifdef CONFIG_PREEMPT 136# ifdef CONFIG_PREEMPT
137 VM_BUG_ON(!in_atomic()); 137 VM_BUG_ON(!in_atomic());
138# endif 138# endif
@@ -170,7 +170,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
170{ 170{
171 VM_BUG_ON(in_interrupt()); 171 VM_BUG_ON(in_interrupt());
172 172
173#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) 173#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
174# ifdef CONFIG_PREEMPT 174# ifdef CONFIG_PREEMPT
175 VM_BUG_ON(!in_atomic()); 175 VM_BUG_ON(!in_atomic());
176# endif 176# endif
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 5e970c7d3fd5..b53f7006cc4e 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -115,26 +115,44 @@ enum perf_counter_sample_format {
115 PERF_SAMPLE_TID = 1U << 1, 115 PERF_SAMPLE_TID = 1U << 1,
116 PERF_SAMPLE_TIME = 1U << 2, 116 PERF_SAMPLE_TIME = 1U << 2,
117 PERF_SAMPLE_ADDR = 1U << 3, 117 PERF_SAMPLE_ADDR = 1U << 3,
118 PERF_SAMPLE_GROUP = 1U << 4, 118 PERF_SAMPLE_READ = 1U << 4,
119 PERF_SAMPLE_CALLCHAIN = 1U << 5, 119 PERF_SAMPLE_CALLCHAIN = 1U << 5,
120 PERF_SAMPLE_ID = 1U << 6, 120 PERF_SAMPLE_ID = 1U << 6,
121 PERF_SAMPLE_CPU = 1U << 7, 121 PERF_SAMPLE_CPU = 1U << 7,
122 PERF_SAMPLE_PERIOD = 1U << 8, 122 PERF_SAMPLE_PERIOD = 1U << 8,
123 PERF_SAMPLE_STREAM_ID = 1U << 9,
124 PERF_SAMPLE_RAW = 1U << 10,
123 125
124 PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */ 126 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
125}; 127};
126 128
127/* 129/*
128 * Bits that can be set in attr.read_format to request that 130 * The format of the data returned by read() on a perf counter fd,
129 * reads on the counter should return the indicated quantities, 131 * as specified by attr.read_format:
130 * in increasing order of bit value, after the counter value. 132 *
133 * struct read_format {
134 * { u64 value;
135 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
136 * { u64 time_running; } && PERF_FORMAT_RUNNING
137 * { u64 id; } && PERF_FORMAT_ID
138 * } && !PERF_FORMAT_GROUP
139 *
140 * { u64 nr;
141 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
142 * { u64 time_running; } && PERF_FORMAT_RUNNING
143 * { u64 value;
144 * { u64 id; } && PERF_FORMAT_ID
145 * } cntr[nr];
146 * } && PERF_FORMAT_GROUP
147 * };
131 */ 148 */
132enum perf_counter_read_format { 149enum perf_counter_read_format {
133 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 150 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
134 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 151 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
135 PERF_FORMAT_ID = 1U << 2, 152 PERF_FORMAT_ID = 1U << 2,
153 PERF_FORMAT_GROUP = 1U << 3,
136 154
137 PERF_FORMAT_MAX = 1U << 3, /* non-ABI */ 155 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
138}; 156};
139 157
140#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 158#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -180,8 +198,9 @@ struct perf_counter_attr {
180 freq : 1, /* use freq, not period */ 198 freq : 1, /* use freq, not period */
181 inherit_stat : 1, /* per task counts */ 199 inherit_stat : 1, /* per task counts */
182 enable_on_exec : 1, /* next exec enables */ 200 enable_on_exec : 1, /* next exec enables */
201 task : 1, /* trace fork/exit */
183 202
184 __reserved_1 : 51; 203 __reserved_1 : 50;
185 204
186 __u32 wakeup_events; /* wakeup every n events */ 205 __u32 wakeup_events; /* wakeup every n events */
187 __u32 __reserved_2; 206 __u32 __reserved_2;
@@ -310,18 +329,18 @@ enum perf_event_type {
310 /* 329 /*
311 * struct { 330 * struct {
312 * struct perf_event_header header; 331 * struct perf_event_header header;
313 * u64 time; 332 * u32 pid, ppid;
314 * u64 id; 333 * u32 tid, ptid;
315 * u64 sample_period;
316 * }; 334 * };
317 */ 335 */
318 PERF_EVENT_PERIOD = 4, 336 PERF_EVENT_EXIT = 4,
319 337
320 /* 338 /*
321 * struct { 339 * struct {
322 * struct perf_event_header header; 340 * struct perf_event_header header;
323 * u64 time; 341 * u64 time;
324 * u64 id; 342 * u64 id;
343 * u64 stream_id;
325 * }; 344 * };
326 */ 345 */
327 PERF_EVENT_THROTTLE = 5, 346 PERF_EVENT_THROTTLE = 5,
@@ -331,6 +350,7 @@ enum perf_event_type {
331 * struct { 350 * struct {
332 * struct perf_event_header header; 351 * struct perf_event_header header;
333 * u32 pid, ppid; 352 * u32 pid, ppid;
353 * u32 tid, ptid;
334 * }; 354 * };
335 */ 355 */
336 PERF_EVENT_FORK = 7, 356 PERF_EVENT_FORK = 7,
@@ -339,10 +359,8 @@ enum perf_event_type {
339 * struct { 359 * struct {
340 * struct perf_event_header header; 360 * struct perf_event_header header;
341 * u32 pid, tid; 361 * u32 pid, tid;
342 * u64 value; 362 *
343 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 363 * struct read_format values;
344 * { u64 time_running; } && PERF_FORMAT_RUNNING
345 * { u64 parent_id; } && PERF_FORMAT_ID
346 * }; 364 * };
347 */ 365 */
348 PERF_EVENT_READ = 8, 366 PERF_EVENT_READ = 8,
@@ -356,14 +374,28 @@ enum perf_event_type {
356 * { u64 time; } && PERF_SAMPLE_TIME 374 * { u64 time; } && PERF_SAMPLE_TIME
357 * { u64 addr; } && PERF_SAMPLE_ADDR 375 * { u64 addr; } && PERF_SAMPLE_ADDR
358 * { u64 id; } && PERF_SAMPLE_ID 376 * { u64 id; } && PERF_SAMPLE_ID
377 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
359 * { u32 cpu, res; } && PERF_SAMPLE_CPU 378 * { u32 cpu, res; } && PERF_SAMPLE_CPU
360 * { u64 period; } && PERF_SAMPLE_PERIOD 379 * { u64 period; } && PERF_SAMPLE_PERIOD
361 * 380 *
362 * { u64 nr; 381 * { struct read_format values; } && PERF_SAMPLE_READ
363 * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
364 * 382 *
365 * { u64 nr, 383 * { u64 nr,
366 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 384 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
385 *
386 * #
387 * # The RAW record below is opaque data wrt the ABI
388 * #
389 * # That is, the ABI doesn't make any promises wrt to
390 * # the stability of its content, it may vary depending
391 * # on event, hardware, kernel version and phase of
392 * # the moon.
393 * #
394 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
395 * #
396 *
397 * { u32 size;
398 * char data[size];}&& PERF_SAMPLE_RAW
367 * }; 399 * };
368 */ 400 */
369 PERF_EVENT_SAMPLE = 9, 401 PERF_EVENT_SAMPLE = 9,
@@ -409,6 +441,11 @@ struct perf_callchain_entry {
409 __u64 ip[PERF_MAX_STACK_DEPTH]; 441 __u64 ip[PERF_MAX_STACK_DEPTH];
410}; 442};
411 443
444struct perf_raw_record {
445 u32 size;
446 void *data;
447};
448
412struct task_struct; 449struct task_struct;
413 450
414/** 451/**
@@ -677,10 +714,13 @@ struct perf_sample_data {
677 struct pt_regs *regs; 714 struct pt_regs *regs;
678 u64 addr; 715 u64 addr;
679 u64 period; 716 u64 period;
717 struct perf_raw_record *raw;
680}; 718};
681 719
682extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 720extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
683 struct perf_sample_data *data); 721 struct perf_sample_data *data);
722extern void perf_counter_output(struct perf_counter *counter, int nmi,
723 struct perf_sample_data *data);
684 724
685/* 725/*
686 * Return 1 for a software counter, 0 for a hardware counter 726 * Return 1 for a software counter, 0 for a hardware counter
diff --git a/include/linux/pps.h b/include/linux/pps.h
index cfe5c7214ec6..0194ab06177b 100644
--- a/include/linux/pps.h
+++ b/include/linux/pps.h
@@ -22,6 +22,8 @@
22#ifndef _PPS_H_ 22#ifndef _PPS_H_
23#define _PPS_H_ 23#define _PPS_H_
24 24
25#include <linux/types.h>
26
25#define PPS_VERSION "5.3.6" 27#define PPS_VERSION "5.3.6"
26#define PPS_MAX_SOURCES 16 /* should be enough... */ 28#define PPS_MAX_SOURCES 16 /* should be enough... */
27 29
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
deleted file mode 100644
index bfd92e1e5d2c..000000000000
--- a/include/linux/rcuclassic.h
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (classic version)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * Documentation/RCU
30 *
31 */
32
33#ifndef __LINUX_RCUCLASSIC_H
34#define __LINUX_RCUCLASSIC_H
35
36#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
39#include <linux/cpumask.h>
40#include <linux/seqlock.h>
41
42#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
43#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
44#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
45#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
46
47/* Global control variables for rcupdate callback mechanism. */
48struct rcu_ctrlblk {
49 long cur; /* Current batch number. */
50 long completed; /* Number of the last completed batch */
51 long pending; /* Number of the last pending batch */
52#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
53 unsigned long gp_start; /* Time at which GP started in jiffies. */
54 unsigned long jiffies_stall;
55 /* Time at which to check for CPU stalls. */
56#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
57
58 int signaled;
59
60 spinlock_t lock ____cacheline_internodealigned_in_smp;
61 DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
62 /* current batch to proceed. */
63} ____cacheline_internodealigned_in_smp;
64
65/* Is batch a before batch b ? */
66static inline int rcu_batch_before(long a, long b)
67{
68 return (a - b) < 0;
69}
70
71/* Is batch a after batch b ? */
72static inline int rcu_batch_after(long a, long b)
73{
74 return (a - b) > 0;
75}
76
77/* Per-CPU data for Read-Copy UPdate. */
78struct rcu_data {
79 /* 1) quiescent state handling : */
80 long quiescbatch; /* Batch # for grace period */
81 int passed_quiesc; /* User-mode/idle loop etc. */
82 int qs_pending; /* core waits for quiesc state */
83
84 /* 2) batch handling */
85 /*
86 * if nxtlist is not NULL, then:
87 * batch:
88 * The batch # for the last entry of nxtlist
89 * [*nxttail[1], NULL = *nxttail[2]):
90 * Entries that batch # <= batch
91 * [*nxttail[0], *nxttail[1]):
92 * Entries that batch # <= batch - 1
93 * [nxtlist, *nxttail[0]):
94 * Entries that batch # <= batch - 2
95 * The grace period for these entries has completed, and
96 * the other grace-period-completed entries may be moved
97 * here temporarily in rcu_process_callbacks().
98 */
99 long batch;
100 struct rcu_head *nxtlist;
101 struct rcu_head **nxttail[3];
102 long qlen; /* # of queued callbacks */
103 struct rcu_head *donelist;
104 struct rcu_head **donetail;
105 long blimit; /* Upper limit on a processed batch */
106 int cpu;
107 struct rcu_head barrier;
108};
109
110/*
111 * Increment the quiescent state counter.
112 * The counter is a bit degenerated: We do not need to know
113 * how many quiescent states passed, just if there was at least
114 * one since the start of the grace period. Thus just a flag.
115 */
116extern void rcu_qsctr_inc(int cpu);
117extern void rcu_bh_qsctr_inc(int cpu);
118
119extern int rcu_pending(int cpu);
120extern int rcu_needs_cpu(int cpu);
121
122#ifdef CONFIG_DEBUG_LOCK_ALLOC
123extern struct lockdep_map rcu_lock_map;
124# define rcu_read_acquire() \
125 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
126# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
127#else
128# define rcu_read_acquire() do { } while (0)
129# define rcu_read_release() do { } while (0)
130#endif
131
132#define __rcu_read_lock() \
133 do { \
134 preempt_disable(); \
135 __acquire(RCU); \
136 rcu_read_acquire(); \
137 } while (0)
138#define __rcu_read_unlock() \
139 do { \
140 rcu_read_release(); \
141 __release(RCU); \
142 preempt_enable(); \
143 } while (0)
144#define __rcu_read_lock_bh() \
145 do { \
146 local_bh_disable(); \
147 __acquire(RCU_BH); \
148 rcu_read_acquire(); \
149 } while (0)
150#define __rcu_read_unlock_bh() \
151 do { \
152 rcu_read_release(); \
153 __release(RCU_BH); \
154 local_bh_enable(); \
155 } while (0)
156
157#define __synchronize_sched() synchronize_rcu()
158
159#define call_rcu_sched(head, func) call_rcu(head, func)
160
161extern void __rcu_init(void);
162#define rcu_init_sched() do { } while (0)
163extern void rcu_check_callbacks(int cpu, int user);
164extern void rcu_restart_cpu(int cpu);
165
166extern long rcu_batches_completed(void);
167extern long rcu_batches_completed_bh(void);
168
169#define rcu_enter_nohz() do { } while (0)
170#define rcu_exit_nohz() do { } while (0)
171
172/* A context switch is a grace period for rcuclassic. */
173static inline int rcu_blocking_is_gp(void)
174{
175 return num_online_cpus() == 1;
176}
177
178#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 15fbb3ca634d..95e0615f4d75 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,18 +51,26 @@ struct rcu_head {
51 void (*func)(struct rcu_head *head); 51 void (*func)(struct rcu_head *head);
52}; 52};
53 53
54/* Internal to kernel, but needed by rcupreempt.h. */ 54/* Exported common interfaces */
55extern void synchronize_rcu(void);
56extern void synchronize_rcu_bh(void);
57extern void rcu_barrier(void);
58extern void rcu_barrier_bh(void);
59extern void rcu_barrier_sched(void);
60extern void synchronize_sched_expedited(void);
61extern int sched_expedited_torture_stats(char *page);
62
63/* Internal to kernel */
64extern void rcu_init(void);
65extern void rcu_scheduler_starting(void);
66extern int rcu_needs_cpu(int cpu);
55extern int rcu_scheduler_active; 67extern int rcu_scheduler_active;
56 68
57#if defined(CONFIG_CLASSIC_RCU) 69#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
58#include <linux/rcuclassic.h>
59#elif defined(CONFIG_TREE_RCU)
60#include <linux/rcutree.h> 70#include <linux/rcutree.h>
61#elif defined(CONFIG_PREEMPT_RCU)
62#include <linux/rcupreempt.h>
63#else 71#else
64#error "Unknown RCU implementation specified to kernel configuration" 72#error "Unknown RCU implementation specified to kernel configuration"
65#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ 73#endif
66 74
67#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 75#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
68#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 76#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
@@ -70,6 +78,16 @@ extern int rcu_scheduler_active;
70 (ptr)->next = NULL; (ptr)->func = NULL; \ 78 (ptr)->next = NULL; (ptr)->func = NULL; \
71} while (0) 79} while (0)
72 80
81#ifdef CONFIG_DEBUG_LOCK_ALLOC
82extern struct lockdep_map rcu_lock_map;
83# define rcu_read_acquire() \
84 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
85# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
86#else
87# define rcu_read_acquire() do { } while (0)
88# define rcu_read_release() do { } while (0)
89#endif
90
73/** 91/**
74 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 92 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
75 * 93 *
@@ -99,7 +117,12 @@ extern int rcu_scheduler_active;
99 * 117 *
100 * It is illegal to block while in an RCU read-side critical section. 118 * It is illegal to block while in an RCU read-side critical section.
101 */ 119 */
102#define rcu_read_lock() __rcu_read_lock() 120static inline void rcu_read_lock(void)
121{
122 __rcu_read_lock();
123 __acquire(RCU);
124 rcu_read_acquire();
125}
103 126
104/** 127/**
105 * rcu_read_unlock - marks the end of an RCU read-side critical section. 128 * rcu_read_unlock - marks the end of an RCU read-side critical section.
@@ -116,7 +139,12 @@ extern int rcu_scheduler_active;
116 * used as well. RCU does not care how the writers keep out of each 139 * used as well. RCU does not care how the writers keep out of each
117 * others' way, as long as they do so. 140 * others' way, as long as they do so.
118 */ 141 */
119#define rcu_read_unlock() __rcu_read_unlock() 142static inline void rcu_read_unlock(void)
143{
144 rcu_read_release();
145 __release(RCU);
146 __rcu_read_unlock();
147}
120 148
121/** 149/**
122 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 150 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
@@ -129,14 +157,24 @@ extern int rcu_scheduler_active;
129 * can use just rcu_read_lock(). 157 * can use just rcu_read_lock().
130 * 158 *
131 */ 159 */
132#define rcu_read_lock_bh() __rcu_read_lock_bh() 160static inline void rcu_read_lock_bh(void)
161{
162 __rcu_read_lock_bh();
163 __acquire(RCU_BH);
164 rcu_read_acquire();
165}
133 166
134/* 167/*
135 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 168 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
136 * 169 *
137 * See rcu_read_lock_bh() for more information. 170 * See rcu_read_lock_bh() for more information.
138 */ 171 */
139#define rcu_read_unlock_bh() __rcu_read_unlock_bh() 172static inline void rcu_read_unlock_bh(void)
173{
174 rcu_read_release();
175 __release(RCU_BH);
176 __rcu_read_unlock_bh();
177}
140 178
141/** 179/**
142 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section 180 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
@@ -147,17 +185,34 @@ extern int rcu_scheduler_active;
147 * - call_rcu_sched() and rcu_barrier_sched() 185 * - call_rcu_sched() and rcu_barrier_sched()
148 * on the write-side to insure proper synchronization. 186 * on the write-side to insure proper synchronization.
149 */ 187 */
150#define rcu_read_lock_sched() preempt_disable() 188static inline void rcu_read_lock_sched(void)
151#define rcu_read_lock_sched_notrace() preempt_disable_notrace() 189{
190 preempt_disable();
191 __acquire(RCU_SCHED);
192 rcu_read_acquire();
193}
194static inline notrace void rcu_read_lock_sched_notrace(void)
195{
196 preempt_disable_notrace();
197 __acquire(RCU_SCHED);
198}
152 199
153/* 200/*
154 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 201 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
155 * 202 *
156 * See rcu_read_lock_sched for more information. 203 * See rcu_read_lock_sched for more information.
157 */ 204 */
158#define rcu_read_unlock_sched() preempt_enable() 205static inline void rcu_read_unlock_sched(void)
159#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() 206{
160 207 rcu_read_release();
208 __release(RCU_SCHED);
209 preempt_enable();
210}
211static inline notrace void rcu_read_unlock_sched_notrace(void)
212{
213 __release(RCU_SCHED);
214 preempt_enable_notrace();
215}
161 216
162 217
163/** 218/**
@@ -259,15 +314,4 @@ extern void call_rcu(struct rcu_head *head,
259extern void call_rcu_bh(struct rcu_head *head, 314extern void call_rcu_bh(struct rcu_head *head,
260 void (*func)(struct rcu_head *head)); 315 void (*func)(struct rcu_head *head));
261 316
262/* Exported common interfaces */
263extern void synchronize_rcu(void);
264extern void rcu_barrier(void);
265extern void rcu_barrier_bh(void);
266extern void rcu_barrier_sched(void);
267
268/* Internal to kernel */
269extern void rcu_init(void);
270extern void rcu_scheduler_starting(void);
271extern int rcu_needs_cpu(int cpu);
272
273#endif /* __LINUX_RCUPDATE_H */ 317#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
deleted file mode 100644
index fce522782ffa..000000000000
--- a/include/linux/rcupreempt.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * Documentation/RCU
30 *
31 */
32
33#ifndef __LINUX_RCUPREEMPT_H
34#define __LINUX_RCUPREEMPT_H
35
36#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
39#include <linux/smp.h>
40#include <linux/cpumask.h>
41#include <linux/seqlock.h>
42
43extern void rcu_qsctr_inc(int cpu);
44static inline void rcu_bh_qsctr_inc(int cpu) { }
45
46/*
47 * Someone might want to pass call_rcu_bh as a function pointer.
48 * So this needs to just be a rename and not a macro function.
49 * (no parentheses)
50 */
51#define call_rcu_bh call_rcu
52
53/**
54 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
55 * @head: structure to be used for queueing the RCU updates.
56 * @func: actual update function to be invoked after the grace period
57 *
58 * The update function will be invoked some time after a full
59 * synchronize_sched()-style grace period elapses, in other words after
60 * all currently executing preempt-disabled sections of code (including
61 * hardirq handlers, NMI handlers, and local_irq_save() blocks) have
62 * completed.
63 */
64extern void call_rcu_sched(struct rcu_head *head,
65 void (*func)(struct rcu_head *head));
66
67extern void __rcu_read_lock(void) __acquires(RCU);
68extern void __rcu_read_unlock(void) __releases(RCU);
69extern int rcu_pending(int cpu);
70extern int rcu_needs_cpu(int cpu);
71
72#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
73#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); }
74
75extern void __synchronize_sched(void);
76
77extern void __rcu_init(void);
78extern void rcu_init_sched(void);
79extern void rcu_check_callbacks(int cpu, int user);
80extern void rcu_restart_cpu(int cpu);
81extern long rcu_batches_completed(void);
82
83/*
84 * Return the number of RCU batches processed thus far. Useful for debug
85 * and statistic. The _bh variant is identifcal to straight RCU
86 */
87static inline long rcu_batches_completed_bh(void)
88{
89 return rcu_batches_completed();
90}
91
92#ifdef CONFIG_RCU_TRACE
93struct rcupreempt_trace;
94extern long *rcupreempt_flipctr(int cpu);
95extern long rcupreempt_data_completed(void);
96extern int rcupreempt_flip_flag(int cpu);
97extern int rcupreempt_mb_flag(int cpu);
98extern char *rcupreempt_try_flip_state_name(void);
99extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
100#endif
101
102struct softirq_action;
103
104#ifdef CONFIG_NO_HZ
105extern void rcu_enter_nohz(void);
106extern void rcu_exit_nohz(void);
107#else
108# define rcu_enter_nohz() do { } while (0)
109# define rcu_exit_nohz() do { } while (0)
110#endif
111
112/*
113 * A context switch is a grace period for rcupreempt synchronize_rcu()
114 * only during early boot, before the scheduler has been initialized.
115 * So, how the heck do we get a context switch? Well, if the caller
116 * invokes synchronize_rcu(), they are willing to accept a context
117 * switch, so we simply pretend that one happened.
118 *
119 * After boot, there might be a blocked or preempted task in an RCU
120 * read-side critical section, so we cannot then take the fastpath.
121 */
122static inline int rcu_blocking_is_gp(void)
123{
124 return num_online_cpus() == 1 && !rcu_scheduler_active;
125}
126
127#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h
deleted file mode 100644
index b99ae073192a..000000000000
--- a/include/linux/rcupreempt_trace.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of the Preemptible Read-Copy Update mechanism see -
29 * http://lwn.net/Articles/253651/
30 */
31
32#ifndef __LINUX_RCUPREEMPT_TRACE_H
33#define __LINUX_RCUPREEMPT_TRACE_H
34
35#include <linux/types.h>
36#include <linux/kernel.h>
37
38#include <asm/atomic.h>
39
40/*
41 * PREEMPT_RCU data structures.
42 */
43
44struct rcupreempt_trace {
45 long next_length;
46 long next_add;
47 long wait_length;
48 long wait_add;
49 long done_length;
50 long done_add;
51 long done_remove;
52 atomic_t done_invoked;
53 long rcu_check_callbacks;
54 atomic_t rcu_try_flip_1;
55 atomic_t rcu_try_flip_e1;
56 long rcu_try_flip_i1;
57 long rcu_try_flip_ie1;
58 long rcu_try_flip_g1;
59 long rcu_try_flip_a1;
60 long rcu_try_flip_ae1;
61 long rcu_try_flip_a2;
62 long rcu_try_flip_z1;
63 long rcu_try_flip_ze1;
64 long rcu_try_flip_z2;
65 long rcu_try_flip_m1;
66 long rcu_try_flip_me1;
67 long rcu_try_flip_m2;
68};
69
70#ifdef CONFIG_RCU_TRACE
71#define RCU_TRACE(fn, arg) fn(arg);
72#else
73#define RCU_TRACE(fn, arg)
74#endif
75
76extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace);
77extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace);
78extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace);
79extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace);
80extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace);
81extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace);
82extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace);
83extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace);
84extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace);
85extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace);
86extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace);
87extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace);
88extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace);
89extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace);
90extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace);
91extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace);
92extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace);
93extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
94extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
95extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
96
97#endif /* __LINUX_RCUPREEMPT_TRACE_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5a5153806c42..a89307717825 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,264 +30,57 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33#include <linux/cache.h> 33extern void rcu_sched_qs(int cpu);
34#include <linux/spinlock.h> 34extern void rcu_bh_qs(int cpu);
35#include <linux/threads.h>
36#include <linux/cpumask.h>
37#include <linux/seqlock.h>
38 35
39/* 36extern int rcu_needs_cpu(int cpu);
40 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
41 * In theory, it should be possible to add more levels straightforwardly.
42 * In practice, this has not been tested, so there is probably some
43 * bug somewhere.
44 */
45#define MAX_RCU_LVLS 3
46#define RCU_FANOUT (CONFIG_RCU_FANOUT)
47#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
48#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
49
50#if NR_CPUS <= RCU_FANOUT
51# define NUM_RCU_LVLS 1
52# define NUM_RCU_LVL_0 1
53# define NUM_RCU_LVL_1 (NR_CPUS)
54# define NUM_RCU_LVL_2 0
55# define NUM_RCU_LVL_3 0
56#elif NR_CPUS <= RCU_FANOUT_SQ
57# define NUM_RCU_LVLS 2
58# define NUM_RCU_LVL_0 1
59# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
60# define NUM_RCU_LVL_2 (NR_CPUS)
61# define NUM_RCU_LVL_3 0
62#elif NR_CPUS <= RCU_FANOUT_CUBE
63# define NUM_RCU_LVLS 3
64# define NUM_RCU_LVL_0 1
65# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
66# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
67# define NUM_RCU_LVL_3 NR_CPUS
68#else
69# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
70#endif /* #if (NR_CPUS) <= RCU_FANOUT */
71
72#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
73#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
74
75/*
76 * Dynticks per-CPU state.
77 */
78struct rcu_dynticks {
79 int dynticks_nesting; /* Track nesting level, sort of. */
80 int dynticks; /* Even value for dynticks-idle, else odd. */
81 int dynticks_nmi; /* Even value for either dynticks-idle or */
82 /* not in nmi handler, else odd. So this */
83 /* remains even for nmi from irq handler. */
84};
85
86/*
87 * Definition for node within the RCU grace-period-detection hierarchy.
88 */
89struct rcu_node {
90 spinlock_t lock;
91 unsigned long qsmask; /* CPUs or groups that need to switch in */
92 /* order for current grace period to proceed.*/
93 unsigned long qsmaskinit;
94 /* Per-GP initialization for qsmask. */
95 unsigned long grpmask; /* Mask to apply to parent qsmask. */
96 int grplo; /* lowest-numbered CPU or group here. */
97 int grphi; /* highest-numbered CPU or group here. */
98 u8 grpnum; /* CPU/group number for next level up. */
99 u8 level; /* root is at level 0. */
100 struct rcu_node *parent;
101} ____cacheline_internodealigned_in_smp;
102
103/* Index values for nxttail array in struct rcu_data. */
104#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
105#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
106#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
107#define RCU_NEXT_TAIL 3
108#define RCU_NEXT_SIZE 4
109
110/* Per-CPU data for read-copy update. */
111struct rcu_data {
112 /* 1) quiescent-state and grace-period handling : */
113 long completed; /* Track rsp->completed gp number */
114 /* in order to detect GP end. */
115 long gpnum; /* Highest gp number that this CPU */
116 /* is aware of having started. */
117 long passed_quiesc_completed;
118 /* Value of completed at time of qs. */
119 bool passed_quiesc; /* User-mode/idle loop etc. */
120 bool qs_pending; /* Core waits for quiesc state. */
121 bool beenonline; /* CPU online at least once. */
122 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
123 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
124
125 /* 2) batch handling */
126 /*
127 * If nxtlist is not NULL, it is partitioned as follows.
128 * Any of the partitions might be empty, in which case the
129 * pointer to that partition will be equal to the pointer for
130 * the following partition. When the list is empty, all of
131 * the nxttail elements point to nxtlist, which is NULL.
132 *
133 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
134 * Entries that might have arrived after current GP ended
135 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
136 * Entries known to have arrived before current GP ended
137 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
138 * Entries that batch # <= ->completed - 1: waiting for current GP
139 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
140 * Entries that batch # <= ->completed
141 * The grace period for these entries has completed, and
142 * the other grace-period-completed entries may be moved
143 * here temporarily in rcu_process_callbacks().
144 */
145 struct rcu_head *nxtlist;
146 struct rcu_head **nxttail[RCU_NEXT_SIZE];
147 long qlen; /* # of queued callbacks */
148 long blimit; /* Upper limit on a processed batch */
149
150#ifdef CONFIG_NO_HZ
151 /* 3) dynticks interface. */
152 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
153 int dynticks_snap; /* Per-GP tracking for dynticks. */
154 int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
155#endif /* #ifdef CONFIG_NO_HZ */
156
157 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
158#ifdef CONFIG_NO_HZ
159 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
160#endif /* #ifdef CONFIG_NO_HZ */
161 unsigned long offline_fqs; /* Kicked due to being offline. */
162 unsigned long resched_ipi; /* Sent a resched IPI. */
163
164 /* 5) __rcu_pending() statistics. */
165 long n_rcu_pending; /* rcu_pending() calls since boot. */
166 long n_rp_qs_pending;
167 long n_rp_cb_ready;
168 long n_rp_cpu_needs_gp;
169 long n_rp_gp_completed;
170 long n_rp_gp_started;
171 long n_rp_need_fqs;
172 long n_rp_need_nothing;
173
174 int cpu;
175};
176
177/* Values for signaled field in struct rcu_state. */
178#define RCU_GP_INIT 0 /* Grace period being initialized. */
179#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
180#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
181#ifdef CONFIG_NO_HZ
182#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
183#else /* #ifdef CONFIG_NO_HZ */
184#define RCU_SIGNAL_INIT RCU_FORCE_QS
185#endif /* #else #ifdef CONFIG_NO_HZ */
186
187#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
188#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
189#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
190#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
191#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
192 /* to take at least one */
193 /* scheduling clock irq */
194 /* before ratting on them. */
195
196#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
197
198/*
199 * RCU global state, including node hierarchy. This hierarchy is
200 * represented in "heap" form in a dense array. The root (first level)
201 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
202 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
203 * and the third level in ->node[m+1] and following (->node[m+1] referenced
204 * by ->level[2]). The number of levels is determined by the number of
205 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
206 * consisting of a single rcu_node.
207 */
208struct rcu_state {
209 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
210 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
211 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
212 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
213 struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
214
215 /* The following fields are guarded by the root rcu_node's lock. */
216
217 u8 signaled ____cacheline_internodealigned_in_smp;
218 /* Force QS state. */
219 long gpnum; /* Current gp number. */
220 long completed; /* # of last completed gp. */
221 spinlock_t onofflock; /* exclude on/offline and */
222 /* starting new GP. */
223 spinlock_t fqslock; /* Only one task forcing */
224 /* quiescent states. */
225 unsigned long jiffies_force_qs; /* Time at which to invoke */
226 /* force_quiescent_state(). */
227 unsigned long n_force_qs; /* Number of calls to */
228 /* force_quiescent_state(). */
229 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
230 /* due to lock unavailable. */
231 unsigned long n_force_qs_ngp; /* Number of calls leaving */
232 /* due to no GP active. */
233#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
234 unsigned long gp_start; /* Time at which GP started, */
235 /* but in jiffies. */
236 unsigned long jiffies_stall; /* Time at which to check */
237 /* for CPU stalls. */
238#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
239#ifdef CONFIG_NO_HZ
240 long dynticks_completed; /* Value of completed @ snap. */
241#endif /* #ifdef CONFIG_NO_HZ */
242};
243 37
244extern void rcu_qsctr_inc(int cpu); 38#ifdef CONFIG_TREE_PREEMPT_RCU
245extern void rcu_bh_qsctr_inc(int cpu);
246 39
247extern int rcu_pending(int cpu); 40extern void __rcu_read_lock(void);
248extern int rcu_needs_cpu(int cpu); 41extern void __rcu_read_unlock(void);
42extern void exit_rcu(void);
249 43
250#ifdef CONFIG_DEBUG_LOCK_ALLOC 44#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
251extern struct lockdep_map rcu_lock_map;
252# define rcu_read_acquire() \
253 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
254# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
255#else
256# define rcu_read_acquire() do { } while (0)
257# define rcu_read_release() do { } while (0)
258#endif
259 45
260static inline void __rcu_read_lock(void) 46static inline void __rcu_read_lock(void)
261{ 47{
262 preempt_disable(); 48 preempt_disable();
263 __acquire(RCU);
264 rcu_read_acquire();
265} 49}
50
266static inline void __rcu_read_unlock(void) 51static inline void __rcu_read_unlock(void)
267{ 52{
268 rcu_read_release();
269 __release(RCU);
270 preempt_enable(); 53 preempt_enable();
271} 54}
55
56static inline void exit_rcu(void)
57{
58}
59
60#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
61
272static inline void __rcu_read_lock_bh(void) 62static inline void __rcu_read_lock_bh(void)
273{ 63{
274 local_bh_disable(); 64 local_bh_disable();
275 __acquire(RCU_BH);
276 rcu_read_acquire();
277} 65}
278static inline void __rcu_read_unlock_bh(void) 66static inline void __rcu_read_unlock_bh(void)
279{ 67{
280 rcu_read_release();
281 __release(RCU_BH);
282 local_bh_enable(); 68 local_bh_enable();
283} 69}
284 70
285#define __synchronize_sched() synchronize_rcu() 71#define __synchronize_sched() synchronize_rcu()
286 72
287#define call_rcu_sched(head, func) call_rcu(head, func) 73extern void call_rcu_sched(struct rcu_head *head,
74 void (*func)(struct rcu_head *rcu));
288 75
289static inline void rcu_init_sched(void) 76static inline void synchronize_rcu_expedited(void)
290{ 77{
78 synchronize_sched_expedited();
79}
80
81static inline void synchronize_rcu_bh_expedited(void)
82{
83 synchronize_sched_expedited();
291} 84}
292 85
293extern void __rcu_init(void); 86extern void __rcu_init(void);
@@ -296,6 +89,11 @@ extern void rcu_restart_cpu(int cpu);
296 89
297extern long rcu_batches_completed(void); 90extern long rcu_batches_completed(void);
298extern long rcu_batches_completed_bh(void); 91extern long rcu_batches_completed_bh(void);
92extern long rcu_batches_completed_sched(void);
93
94static inline void rcu_init_sched(void)
95{
96}
299 97
300#ifdef CONFIG_NO_HZ 98#ifdef CONFIG_NO_HZ
301void rcu_enter_nohz(void); 99void rcu_enter_nohz(void);
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 2ce29831feb6..278777fa8a3a 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -224,7 +224,7 @@ void rfkill_destroy(struct rfkill *rfkill);
224 * should be blocked) so that drivers need not keep track of the soft 224 * should be blocked) so that drivers need not keep track of the soft
225 * block state -- which they might not be able to. 225 * block state -- which they might not be able to.
226 */ 226 */
227bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); 227bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
228 228
229/** 229/**
230 * rfkill_set_sw_state - Set the internal rfkill software block state 230 * rfkill_set_sw_state - Set the internal rfkill software block state
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index e5996984ddd0..9aaf5bfdad1a 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -242,6 +242,8 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
242 */ 242 */
243 243
244#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ 244#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
245#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
246#define SG_MITER_FROM_SG (1 << 2) /* nop */
245 247
246struct sg_mapping_iter { 248struct sg_mapping_iter {
247 /* the following three fields can be accessed directly */ 249 /* the following three fields can be accessed directly */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 16a982e389fb..379531c08975 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -209,7 +209,7 @@ extern unsigned long long time_sync_thresh;
209 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 209 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
210#define task_contributes_to_load(task) \ 210#define task_contributes_to_load(task) \
211 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 211 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
212 (task->flags & PF_FROZEN) == 0) 212 (task->flags & PF_FREEZING) == 0)
213 213
214#define __set_task_state(tsk, state_value) \ 214#define __set_task_state(tsk, state_value) \
215 do { (tsk)->state = (state_value); } while (0) 215 do { (tsk)->state = (state_value); } while (0)
@@ -1163,6 +1163,8 @@ struct sched_rt_entity {
1163#endif 1163#endif
1164}; 1164};
1165 1165
1166struct rcu_node;
1167
1166struct task_struct { 1168struct task_struct {
1167 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1169 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1168 void *stack; 1170 void *stack;
@@ -1198,6 +1200,7 @@ struct task_struct {
1198 * a short time 1200 * a short time
1199 */ 1201 */
1200 unsigned char fpu_counter; 1202 unsigned char fpu_counter;
1203 s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
1201#ifdef CONFIG_BLK_DEV_IO_TRACE 1204#ifdef CONFIG_BLK_DEV_IO_TRACE
1202 unsigned int btrace_seq; 1205 unsigned int btrace_seq;
1203#endif 1206#endif
@@ -1205,10 +1208,12 @@ struct task_struct {
1205 unsigned int policy; 1208 unsigned int policy;
1206 cpumask_t cpus_allowed; 1209 cpumask_t cpus_allowed;
1207 1210
1208#ifdef CONFIG_PREEMPT_RCU 1211#ifdef CONFIG_TREE_PREEMPT_RCU
1209 int rcu_read_lock_nesting; 1212 int rcu_read_lock_nesting;
1210 int rcu_flipctr_idx; 1213 char rcu_read_unlock_special;
1211#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1214 struct rcu_node *rcu_blocked_node;
1215 struct list_head rcu_node_entry;
1216#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1212 1217
1213#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1218#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1214 struct sched_info sched_info; 1219 struct sched_info sched_info;
@@ -1291,6 +1296,7 @@ struct task_struct {
1291 struct mutex cred_guard_mutex; /* guard against foreign influences on 1296 struct mutex cred_guard_mutex; /* guard against foreign influences on
1292 * credential calculations 1297 * credential calculations
1293 * (notably. ptrace) */ 1298 * (notably. ptrace) */
1299 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1294 1300
1295 char comm[TASK_COMM_LEN]; /* executable name excluding path 1301 char comm[TASK_COMM_LEN]; /* executable name excluding path
1296 - access with [gs]et_task_comm (which lock 1302 - access with [gs]et_task_comm (which lock
@@ -1680,6 +1686,7 @@ extern cputime_t task_gtime(struct task_struct *p);
1680#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1686#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1681#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ 1687#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
1682#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1688#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1689#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
1683#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1690#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1684#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1691#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1685#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1692#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
@@ -1722,6 +1729,28 @@ extern cputime_t task_gtime(struct task_struct *p);
1722#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1729#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1723#define used_math() tsk_used_math(current) 1730#define used_math() tsk_used_math(current)
1724 1731
1732#ifdef CONFIG_TREE_PREEMPT_RCU
1733
1734#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1735#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1736#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
1737
1738static inline void rcu_copy_process(struct task_struct *p)
1739{
1740 p->rcu_read_lock_nesting = 0;
1741 p->rcu_read_unlock_special = 0;
1742 p->rcu_blocked_node = NULL;
1743 INIT_LIST_HEAD(&p->rcu_node_entry);
1744}
1745
1746#else
1747
1748static inline void rcu_copy_process(struct task_struct *p)
1749{
1750}
1751
1752#endif
1753
1725#ifdef CONFIG_SMP 1754#ifdef CONFIG_SMP
1726extern int set_cpus_allowed_ptr(struct task_struct *p, 1755extern int set_cpus_allowed_ptr(struct task_struct *p,
1727 const struct cpumask *new_mask); 1756 const struct cpumask *new_mask);
@@ -2075,7 +2104,7 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2075#define for_each_process(p) \ 2104#define for_each_process(p) \
2076 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2105 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2077 2106
2078extern bool is_single_threaded(struct task_struct *); 2107extern bool current_is_single_threaded(void);
2079 2108
2080/* 2109/*
2081 * Careful: do_each_thread/while_each_thread is a double loop so 2110 * Careful: do_each_thread/while_each_thread is a double loop so
diff --git a/include/linux/security.h b/include/linux/security.h
index 5eff459b3833..d050b66ab9ef 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -28,6 +28,7 @@
28#include <linux/resource.h> 28#include <linux/resource.h>
29#include <linux/sem.h> 29#include <linux/sem.h>
30#include <linux/shm.h> 30#include <linux/shm.h>
31#include <linux/mm.h> /* PAGE_ALIGN */
31#include <linux/msg.h> 32#include <linux/msg.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
33#include <linux/key.h> 34#include <linux/key.h>
@@ -52,7 +53,7 @@ struct audit_krule;
52extern int cap_capable(struct task_struct *tsk, const struct cred *cred, 53extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
53 int cap, int audit); 54 int cap, int audit);
54extern int cap_settime(struct timespec *ts, struct timezone *tz); 55extern int cap_settime(struct timespec *ts, struct timezone *tz);
55extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); 56extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
56extern int cap_ptrace_traceme(struct task_struct *parent); 57extern int cap_ptrace_traceme(struct task_struct *parent);
57extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); 58extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
58extern int cap_capset(struct cred *new, const struct cred *old, 59extern int cap_capset(struct cred *new, const struct cred *old,
@@ -66,6 +67,9 @@ extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
66extern int cap_inode_removexattr(struct dentry *dentry, const char *name); 67extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
67extern int cap_inode_need_killpriv(struct dentry *dentry); 68extern int cap_inode_need_killpriv(struct dentry *dentry);
68extern int cap_inode_killpriv(struct dentry *dentry); 69extern int cap_inode_killpriv(struct dentry *dentry);
70extern int cap_file_mmap(struct file *file, unsigned long reqprot,
71 unsigned long prot, unsigned long flags,
72 unsigned long addr, unsigned long addr_only);
69extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); 73extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
70extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, 74extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
71 unsigned long arg4, unsigned long arg5); 75 unsigned long arg4, unsigned long arg5);
@@ -92,6 +96,7 @@ extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
92extern int cap_netlink_recv(struct sk_buff *skb, int cap); 96extern int cap_netlink_recv(struct sk_buff *skb, int cap);
93 97
94extern unsigned long mmap_min_addr; 98extern unsigned long mmap_min_addr;
99extern unsigned long dac_mmap_min_addr;
95/* 100/*
96 * Values used in the task_security_ops calls 101 * Values used in the task_security_ops calls
97 */ 102 */
@@ -116,6 +121,21 @@ struct request_sock;
116#define LSM_UNSAFE_PTRACE 2 121#define LSM_UNSAFE_PTRACE 2
117#define LSM_UNSAFE_PTRACE_CAP 4 122#define LSM_UNSAFE_PTRACE_CAP 4
118 123
124/*
125 * If a hint addr is less than mmap_min_addr change hint to be as
126 * low as possible but still greater than mmap_min_addr
127 */
128static inline unsigned long round_hint_to_min(unsigned long hint)
129{
130 hint &= PAGE_MASK;
131 if (((void *)hint != NULL) &&
132 (hint < mmap_min_addr))
133 return PAGE_ALIGN(mmap_min_addr);
134 return hint;
135}
136extern int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp,
137 void __user *buffer, size_t *lenp, loff_t *ppos);
138
119#ifdef CONFIG_SECURITY 139#ifdef CONFIG_SECURITY
120 140
121struct security_mnt_opts { 141struct security_mnt_opts {
@@ -633,6 +653,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
633 * manual page for definitions of the @clone_flags. 653 * manual page for definitions of the @clone_flags.
634 * @clone_flags contains the flags indicating what should be shared. 654 * @clone_flags contains the flags indicating what should be shared.
635 * Return 0 if permission is granted. 655 * Return 0 if permission is granted.
656 * @cred_alloc_blank:
657 * @cred points to the credentials.
658 * @gfp indicates the atomicity of any memory allocations.
659 * Only allocate sufficient memory and attach to @cred such that
660 * cred_transfer() will not get ENOMEM.
636 * @cred_free: 661 * @cred_free:
637 * @cred points to the credentials. 662 * @cred points to the credentials.
638 * Deallocate and clear the cred->security field in a set of credentials. 663 * Deallocate and clear the cred->security field in a set of credentials.
@@ -645,6 +670,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
645 * @new points to the new credentials. 670 * @new points to the new credentials.
646 * @old points to the original credentials. 671 * @old points to the original credentials.
647 * Install a new set of credentials. 672 * Install a new set of credentials.
673 * @cred_transfer:
674 * @new points to the new credentials.
675 * @old points to the original credentials.
676 * Transfer data from original creds to new creds
648 * @kernel_act_as: 677 * @kernel_act_as:
649 * Set the credentials for a kernel service to act as (subjective context). 678 * Set the credentials for a kernel service to act as (subjective context).
650 * @new points to the credentials to be modified. 679 * @new points to the credentials to be modified.
@@ -658,6 +687,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
658 * @inode points to the inode to use as a reference. 687 * @inode points to the inode to use as a reference.
659 * The current task must be the one that nominated @inode. 688 * The current task must be the one that nominated @inode.
660 * Return 0 if successful. 689 * Return 0 if successful.
690 * @kernel_module_request:
691 * Ability to trigger the kernel to automatically upcall to userspace for
692 * userspace to load a kernel module with the given name.
693 * Return 0 if successful.
661 * @task_setuid: 694 * @task_setuid:
662 * Check permission before setting one or more of the user identity 695 * Check permission before setting one or more of the user identity
663 * attributes of the current process. The @flags parameter indicates 696 * attributes of the current process. The @flags parameter indicates
@@ -974,6 +1007,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
974 * Sets the connection's peersid to the secmark on skb. 1007 * Sets the connection's peersid to the secmark on skb.
975 * @req_classify_flow: 1008 * @req_classify_flow:
976 * Sets the flow's sid to the openreq sid. 1009 * Sets the flow's sid to the openreq sid.
1010 * @tun_dev_create:
1011 * Check permissions prior to creating a new TUN device.
1012 * @tun_dev_post_create:
1013 * This hook allows a module to update or allocate a per-socket security
1014 * structure.
1015 * @sk contains the newly created sock structure.
1016 * @tun_dev_attach:
1017 * Check permissions prior to attaching to a persistent TUN device. This
1018 * hook can also be used by the module to update any security state
1019 * associated with the TUN device's sock structure.
1020 * @sk contains the existing sock structure.
977 * 1021 *
978 * Security hooks for XFRM operations. 1022 * Security hooks for XFRM operations.
979 * 1023 *
@@ -1068,6 +1112,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1068 * Return the length of the string (including terminating NUL) or -ve if 1112 * Return the length of the string (including terminating NUL) or -ve if
1069 * an error. 1113 * an error.
1070 * May also return 0 (and a NULL buffer pointer) if there is no label. 1114 * May also return 0 (and a NULL buffer pointer) if there is no label.
1115 * @key_session_to_parent:
1116 * Forcibly assign the session keyring from a process to its parent
1117 * process.
1118 * @cred: Pointer to process's credentials
1119 * @parent_cred: Pointer to parent process's credentials
1120 * @keyring: Proposed new session keyring
1121 * Return 0 if permission is granted, -ve error otherwise.
1071 * 1122 *
1072 * Security hooks affecting all System V IPC operations. 1123 * Security hooks affecting all System V IPC operations.
1073 * 1124 *
@@ -1209,7 +1260,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1209 * @alter contains the flag indicating whether changes are to be made. 1260 * @alter contains the flag indicating whether changes are to be made.
1210 * Return 0 if permission is granted. 1261 * Return 0 if permission is granted.
1211 * 1262 *
1212 * @ptrace_may_access: 1263 * @ptrace_access_check:
1213 * Check permission before allowing the current process to trace the 1264 * Check permission before allowing the current process to trace the
1214 * @child process. 1265 * @child process.
1215 * Security modules may also want to perform a process tracing check 1266 * Security modules may also want to perform a process tracing check
@@ -1224,7 +1275,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1224 * Check that the @parent process has sufficient permission to trace the 1275 * Check that the @parent process has sufficient permission to trace the
1225 * current process before allowing the current process to present itself 1276 * current process before allowing the current process to present itself
1226 * to the @parent process for tracing. 1277 * to the @parent process for tracing.
1227 * The parent process will still have to undergo the ptrace_may_access 1278 * The parent process will still have to undergo the ptrace_access_check
1228 * checks before it is allowed to trace this one. 1279 * checks before it is allowed to trace this one.
1229 * @parent contains the task_struct structure for debugger process. 1280 * @parent contains the task_struct structure for debugger process.
1230 * Return 0 if permission is granted. 1281 * Return 0 if permission is granted.
@@ -1331,12 +1382,47 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1331 * audit_rule_init. 1382 * audit_rule_init.
1332 * @rule contains the allocated rule 1383 * @rule contains the allocated rule
1333 * 1384 *
1385 * @inode_notifysecctx:
1386 * Notify the security module of what the security context of an inode
1387 * should be. Initializes the incore security context managed by the
1388 * security module for this inode. Example usage: NFS client invokes
1389 * this hook to initialize the security context in its incore inode to the
1390 * value provided by the server for the file when the server returned the
1391 * file's attributes to the client.
1392 *
1393 * Must be called with inode->i_mutex locked.
1394 *
1395 * @inode we wish to set the security context of.
1396 * @ctx contains the string which we wish to set in the inode.
1397 * @ctxlen contains the length of @ctx.
1398 *
1399 * @inode_setsecctx:
1400 * Change the security context of an inode. Updates the
1401 * incore security context managed by the security module and invokes the
1402 * fs code as needed (via __vfs_setxattr_noperm) to update any backing
1403 * xattrs that represent the context. Example usage: NFS server invokes
1404 * this hook to change the security context in its incore inode and on the
1405 * backing filesystem to a value provided by the client on a SETATTR
1406 * operation.
1407 *
1408 * Must be called with inode->i_mutex locked.
1409 *
1410 * @dentry contains the inode we wish to set the security context of.
1411 * @ctx contains the string which we wish to set in the inode.
1412 * @ctxlen contains the length of @ctx.
1413 *
1414 * @inode_getsecctx:
1415 * Returns a string containing all relavent security context information
1416 *
1417 * @inode we wish to set the security context of.
1418 * @ctx is a pointer in which to place the allocated security context.
1419 * @ctxlen points to the place to put the length of @ctx.
1334 * This is the main security structure. 1420 * This is the main security structure.
1335 */ 1421 */
1336struct security_operations { 1422struct security_operations {
1337 char name[SECURITY_NAME_MAX + 1]; 1423 char name[SECURITY_NAME_MAX + 1];
1338 1424
1339 int (*ptrace_may_access) (struct task_struct *child, unsigned int mode); 1425 int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
1340 int (*ptrace_traceme) (struct task_struct *parent); 1426 int (*ptrace_traceme) (struct task_struct *parent);
1341 int (*capget) (struct task_struct *target, 1427 int (*capget) (struct task_struct *target,
1342 kernel_cap_t *effective, 1428 kernel_cap_t *effective,
@@ -1463,12 +1549,15 @@ struct security_operations {
1463 int (*dentry_open) (struct file *file, const struct cred *cred); 1549 int (*dentry_open) (struct file *file, const struct cred *cred);
1464 1550
1465 int (*task_create) (unsigned long clone_flags); 1551 int (*task_create) (unsigned long clone_flags);
1552 int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp);
1466 void (*cred_free) (struct cred *cred); 1553 void (*cred_free) (struct cred *cred);
1467 int (*cred_prepare)(struct cred *new, const struct cred *old, 1554 int (*cred_prepare)(struct cred *new, const struct cred *old,
1468 gfp_t gfp); 1555 gfp_t gfp);
1469 void (*cred_commit)(struct cred *new, const struct cred *old); 1556 void (*cred_commit)(struct cred *new, const struct cred *old);
1557 void (*cred_transfer)(struct cred *new, const struct cred *old);
1470 int (*kernel_act_as)(struct cred *new, u32 secid); 1558 int (*kernel_act_as)(struct cred *new, u32 secid);
1471 int (*kernel_create_files_as)(struct cred *new, struct inode *inode); 1559 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1560 int (*kernel_module_request)(void);
1472 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); 1561 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
1473 int (*task_fix_setuid) (struct cred *new, const struct cred *old, 1562 int (*task_fix_setuid) (struct cred *new, const struct cred *old,
1474 int flags); 1563 int flags);
@@ -1536,6 +1625,10 @@ struct security_operations {
1536 int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); 1625 int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
1537 void (*release_secctx) (char *secdata, u32 seclen); 1626 void (*release_secctx) (char *secdata, u32 seclen);
1538 1627
1628 int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
1629 int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
1630 int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
1631
1539#ifdef CONFIG_SECURITY_NETWORK 1632#ifdef CONFIG_SECURITY_NETWORK
1540 int (*unix_stream_connect) (struct socket *sock, 1633 int (*unix_stream_connect) (struct socket *sock,
1541 struct socket *other, struct sock *newsk); 1634 struct socket *other, struct sock *newsk);
@@ -1572,6 +1665,9 @@ struct security_operations {
1572 void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); 1665 void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
1573 void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); 1666 void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
1574 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); 1667 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
1668 int (*tun_dev_create)(void);
1669 void (*tun_dev_post_create)(struct sock *sk);
1670 int (*tun_dev_attach)(struct sock *sk);
1575#endif /* CONFIG_SECURITY_NETWORK */ 1671#endif /* CONFIG_SECURITY_NETWORK */
1576 1672
1577#ifdef CONFIG_SECURITY_NETWORK_XFRM 1673#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1600,6 +1696,9 @@ struct security_operations {
1600 const struct cred *cred, 1696 const struct cred *cred,
1601 key_perm_t perm); 1697 key_perm_t perm);
1602 int (*key_getsecurity)(struct key *key, char **_buffer); 1698 int (*key_getsecurity)(struct key *key, char **_buffer);
1699 int (*key_session_to_parent)(const struct cred *cred,
1700 const struct cred *parent_cred,
1701 struct key *key);
1603#endif /* CONFIG_KEYS */ 1702#endif /* CONFIG_KEYS */
1604 1703
1605#ifdef CONFIG_AUDIT 1704#ifdef CONFIG_AUDIT
@@ -1617,7 +1716,7 @@ extern int security_module_enable(struct security_operations *ops);
1617extern int register_security(struct security_operations *ops); 1716extern int register_security(struct security_operations *ops);
1618 1717
1619/* Security operations */ 1718/* Security operations */
1620int security_ptrace_may_access(struct task_struct *child, unsigned int mode); 1719int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
1621int security_ptrace_traceme(struct task_struct *parent); 1720int security_ptrace_traceme(struct task_struct *parent);
1622int security_capget(struct task_struct *target, 1721int security_capget(struct task_struct *target,
1623 kernel_cap_t *effective, 1722 kernel_cap_t *effective,
@@ -1716,11 +1815,14 @@ int security_file_send_sigiotask(struct task_struct *tsk,
1716int security_file_receive(struct file *file); 1815int security_file_receive(struct file *file);
1717int security_dentry_open(struct file *file, const struct cred *cred); 1816int security_dentry_open(struct file *file, const struct cred *cred);
1718int security_task_create(unsigned long clone_flags); 1817int security_task_create(unsigned long clone_flags);
1818int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
1719void security_cred_free(struct cred *cred); 1819void security_cred_free(struct cred *cred);
1720int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); 1820int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
1721void security_commit_creds(struct cred *new, const struct cred *old); 1821void security_commit_creds(struct cred *new, const struct cred *old);
1822void security_transfer_creds(struct cred *new, const struct cred *old);
1722int security_kernel_act_as(struct cred *new, u32 secid); 1823int security_kernel_act_as(struct cred *new, u32 secid);
1723int security_kernel_create_files_as(struct cred *new, struct inode *inode); 1824int security_kernel_create_files_as(struct cred *new, struct inode *inode);
1825int security_kernel_module_request(void);
1724int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); 1826int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
1725int security_task_fix_setuid(struct cred *new, const struct cred *old, 1827int security_task_fix_setuid(struct cred *new, const struct cred *old,
1726 int flags); 1828 int flags);
@@ -1776,6 +1878,9 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
1776int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); 1878int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
1777void security_release_secctx(char *secdata, u32 seclen); 1879void security_release_secctx(char *secdata, u32 seclen);
1778 1880
1881int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
1882int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
1883int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
1779#else /* CONFIG_SECURITY */ 1884#else /* CONFIG_SECURITY */
1780struct security_mnt_opts { 1885struct security_mnt_opts {
1781}; 1886};
@@ -1798,10 +1903,10 @@ static inline int security_init(void)
1798 return 0; 1903 return 0;
1799} 1904}
1800 1905
1801static inline int security_ptrace_may_access(struct task_struct *child, 1906static inline int security_ptrace_access_check(struct task_struct *child,
1802 unsigned int mode) 1907 unsigned int mode)
1803{ 1908{
1804 return cap_ptrace_may_access(child, mode); 1909 return cap_ptrace_access_check(child, mode);
1805} 1910}
1806 1911
1807static inline int security_ptrace_traceme(struct task_struct *parent) 1912static inline int security_ptrace_traceme(struct task_struct *parent)
@@ -2197,9 +2302,7 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot,
2197 unsigned long addr, 2302 unsigned long addr,
2198 unsigned long addr_only) 2303 unsigned long addr_only)
2199{ 2304{
2200 if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO)) 2305 return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
2201 return -EACCES;
2202 return 0;
2203} 2306}
2204 2307
2205static inline int security_file_mprotect(struct vm_area_struct *vma, 2308static inline int security_file_mprotect(struct vm_area_struct *vma,
@@ -2248,6 +2351,11 @@ static inline int security_task_create(unsigned long clone_flags)
2248 return 0; 2351 return 0;
2249} 2352}
2250 2353
2354static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
2355{
2356 return 0;
2357}
2358
2251static inline void security_cred_free(struct cred *cred) 2359static inline void security_cred_free(struct cred *cred)
2252{ } 2360{ }
2253 2361
@@ -2263,6 +2371,11 @@ static inline void security_commit_creds(struct cred *new,
2263{ 2371{
2264} 2372}
2265 2373
2374static inline void security_transfer_creds(struct cred *new,
2375 const struct cred *old)
2376{
2377}
2378
2266static inline int security_kernel_act_as(struct cred *cred, u32 secid) 2379static inline int security_kernel_act_as(struct cred *cred, u32 secid)
2267{ 2380{
2268 return 0; 2381 return 0;
@@ -2274,6 +2387,11 @@ static inline int security_kernel_create_files_as(struct cred *cred,
2274 return 0; 2387 return 0;
2275} 2388}
2276 2389
2390static inline int security_kernel_module_request(void)
2391{
2392 return 0;
2393}
2394
2277static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, 2395static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
2278 int flags) 2396 int flags)
2279{ 2397{
@@ -2519,6 +2637,19 @@ static inline int security_secctx_to_secid(const char *secdata,
2519static inline void security_release_secctx(char *secdata, u32 seclen) 2637static inline void security_release_secctx(char *secdata, u32 seclen)
2520{ 2638{
2521} 2639}
2640
2641static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
2642{
2643 return -EOPNOTSUPP;
2644}
2645static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
2646{
2647 return -EOPNOTSUPP;
2648}
2649static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
2650{
2651 return -EOPNOTSUPP;
2652}
2522#endif /* CONFIG_SECURITY */ 2653#endif /* CONFIG_SECURITY */
2523 2654
2524#ifdef CONFIG_SECURITY_NETWORK 2655#ifdef CONFIG_SECURITY_NETWORK
@@ -2557,6 +2688,9 @@ void security_inet_csk_clone(struct sock *newsk,
2557 const struct request_sock *req); 2688 const struct request_sock *req);
2558void security_inet_conn_established(struct sock *sk, 2689void security_inet_conn_established(struct sock *sk,
2559 struct sk_buff *skb); 2690 struct sk_buff *skb);
2691int security_tun_dev_create(void);
2692void security_tun_dev_post_create(struct sock *sk);
2693int security_tun_dev_attach(struct sock *sk);
2560 2694
2561#else /* CONFIG_SECURITY_NETWORK */ 2695#else /* CONFIG_SECURITY_NETWORK */
2562static inline int security_unix_stream_connect(struct socket *sock, 2696static inline int security_unix_stream_connect(struct socket *sock,
@@ -2707,6 +2841,20 @@ static inline void security_inet_conn_established(struct sock *sk,
2707 struct sk_buff *skb) 2841 struct sk_buff *skb)
2708{ 2842{
2709} 2843}
2844
2845static inline int security_tun_dev_create(void)
2846{
2847 return 0;
2848}
2849
2850static inline void security_tun_dev_post_create(struct sock *sk)
2851{
2852}
2853
2854static inline int security_tun_dev_attach(struct sock *sk)
2855{
2856 return 0;
2857}
2710#endif /* CONFIG_SECURITY_NETWORK */ 2858#endif /* CONFIG_SECURITY_NETWORK */
2711 2859
2712#ifdef CONFIG_SECURITY_NETWORK_XFRM 2860#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2863,6 +3011,9 @@ void security_key_free(struct key *key);
2863int security_key_permission(key_ref_t key_ref, 3011int security_key_permission(key_ref_t key_ref,
2864 const struct cred *cred, key_perm_t perm); 3012 const struct cred *cred, key_perm_t perm);
2865int security_key_getsecurity(struct key *key, char **_buffer); 3013int security_key_getsecurity(struct key *key, char **_buffer);
3014int security_key_session_to_parent(const struct cred *cred,
3015 const struct cred *parent_cred,
3016 struct key *key);
2866 3017
2867#else 3018#else
2868 3019
@@ -2890,6 +3041,13 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
2890 return 0; 3041 return 0;
2891} 3042}
2892 3043
3044static inline int security_key_session_to_parent(const struct cred *cred,
3045 const struct cred *parent_cred,
3046 struct key *key)
3047{
3048 return 0;
3049}
3050
2893#endif 3051#endif
2894#endif /* CONFIG_KEYS */ 3052#endif /* CONFIG_KEYS */
2895 3053
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index abff6c9b413c..6d3f2f449ead 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -39,7 +39,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
39} 39}
40 40
41#ifdef CONFIG_TMPFS_POSIX_ACL 41#ifdef CONFIG_TMPFS_POSIX_ACL
42int shmem_permission(struct inode *, int); 42int shmem_check_acl(struct inode *, int);
43int shmem_acl_init(struct inode *, struct inode *); 43int shmem_acl_init(struct inode *, struct inode *);
44 44
45extern struct xattr_handler shmem_xattr_acl_access_handler; 45extern struct xattr_handler shmem_xattr_acl_access_handler;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b47b3f039d14..f2c69a2cca17 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1342,12 +1342,12 @@ static inline int skb_network_offset(const struct sk_buff *skb)
1342 * shifting the start of the packet by 2 bytes. Drivers should do this 1342 * shifting the start of the packet by 2 bytes. Drivers should do this
1343 * with: 1343 * with:
1344 * 1344 *
1345 * skb_reserve(NET_IP_ALIGN); 1345 * skb_reserve(skb, NET_IP_ALIGN);
1346 * 1346 *
1347 * The downside to this alignment of the IP header is that the DMA is now 1347 * The downside to this alignment of the IP header is that the DMA is now
1348 * unaligned. On some architectures the cost of an unaligned DMA is high 1348 * unaligned. On some architectures the cost of an unaligned DMA is high
1349 * and this cost outweighs the gains made by aligning the IP header. 1349 * and this cost outweighs the gains made by aligning the IP header.
1350 * 1350 *
1351 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1351 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1352 * to be overridden. 1352 * to be overridden.
1353 */ 1353 */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 4be57ab03478..f0ca7a7a1757 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -143,15 +143,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
143 */ 143 */
144#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 144#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
145 145
146/*
147 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
148 */
149#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
150# include <linux/spinlock_api_smp.h>
151#else
152# include <linux/spinlock_api_up.h>
153#endif
154
155#ifdef CONFIG_DEBUG_SPINLOCK 146#ifdef CONFIG_DEBUG_SPINLOCK
156 extern void _raw_spin_lock(spinlock_t *lock); 147 extern void _raw_spin_lock(spinlock_t *lock);
157#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 148#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
@@ -268,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
268 259
269#define spin_lock_irq(lock) _spin_lock_irq(lock) 260#define spin_lock_irq(lock) _spin_lock_irq(lock)
270#define spin_lock_bh(lock) _spin_lock_bh(lock) 261#define spin_lock_bh(lock) _spin_lock_bh(lock)
271
272#define read_lock_irq(lock) _read_lock_irq(lock) 262#define read_lock_irq(lock) _read_lock_irq(lock)
273#define read_lock_bh(lock) _read_lock_bh(lock) 263#define read_lock_bh(lock) _read_lock_bh(lock)
274
275#define write_lock_irq(lock) _write_lock_irq(lock) 264#define write_lock_irq(lock) _write_lock_irq(lock)
276#define write_lock_bh(lock) _write_lock_bh(lock) 265#define write_lock_bh(lock) _write_lock_bh(lock)
277 266#define spin_unlock(lock) _spin_unlock(lock)
278/* 267#define read_unlock(lock) _read_unlock(lock)
279 * We inline the unlock functions in the nondebug case: 268#define write_unlock(lock) _write_unlock(lock)
280 */ 269#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
281#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ 270#define read_unlock_irq(lock) _read_unlock_irq(lock)
282 !defined(CONFIG_SMP) 271#define write_unlock_irq(lock) _write_unlock_irq(lock)
283# define spin_unlock(lock) _spin_unlock(lock)
284# define read_unlock(lock) _read_unlock(lock)
285# define write_unlock(lock) _write_unlock(lock)
286# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
287# define read_unlock_irq(lock) _read_unlock_irq(lock)
288# define write_unlock_irq(lock) _write_unlock_irq(lock)
289#else
290# define spin_unlock(lock) \
291 do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
292# define read_unlock(lock) \
293 do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
294# define write_unlock(lock) \
295 do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
296# define spin_unlock_irq(lock) \
297do { \
298 __raw_spin_unlock(&(lock)->raw_lock); \
299 __release(lock); \
300 local_irq_enable(); \
301} while (0)
302# define read_unlock_irq(lock) \
303do { \
304 __raw_read_unlock(&(lock)->raw_lock); \
305 __release(lock); \
306 local_irq_enable(); \
307} while (0)
308# define write_unlock_irq(lock) \
309do { \
310 __raw_write_unlock(&(lock)->raw_lock); \
311 __release(lock); \
312 local_irq_enable(); \
313} while (0)
314#endif
315 272
316#define spin_unlock_irqrestore(lock, flags) \ 273#define spin_unlock_irqrestore(lock, flags) \
317 do { \ 274 do { \
@@ -380,4 +337,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
380 */ 337 */
381#define spin_can_lock(lock) (!spin_is_locked(lock)) 338#define spin_can_lock(lock) (!spin_is_locked(lock))
382 339
340/*
341 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
342 */
343#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
344# include <linux/spinlock_api_smp.h>
345#else
346# include <linux/spinlock_api_up.h>
347#endif
348
383#endif /* __LINUX_SPINLOCK_H */ 349#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index d79845d034b5..7a7e18fc2415 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,4 +60,398 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock); 61 __releases(lock);
62 62
63/*
64 * We inline the unlock functions in the nondebug case:
65 */
66#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
67#define __always_inline__spin_unlock
68#define __always_inline__read_unlock
69#define __always_inline__write_unlock
70#define __always_inline__spin_unlock_irq
71#define __always_inline__read_unlock_irq
72#define __always_inline__write_unlock_irq
73#endif
74
75#ifndef CONFIG_DEBUG_SPINLOCK
76#ifndef CONFIG_GENERIC_LOCKBREAK
77
78#ifdef __always_inline__spin_lock
79#define _spin_lock(lock) __spin_lock(lock)
80#endif
81
82#ifdef __always_inline__read_lock
83#define _read_lock(lock) __read_lock(lock)
84#endif
85
86#ifdef __always_inline__write_lock
87#define _write_lock(lock) __write_lock(lock)
88#endif
89
90#ifdef __always_inline__spin_lock_bh
91#define _spin_lock_bh(lock) __spin_lock_bh(lock)
92#endif
93
94#ifdef __always_inline__read_lock_bh
95#define _read_lock_bh(lock) __read_lock_bh(lock)
96#endif
97
98#ifdef __always_inline__write_lock_bh
99#define _write_lock_bh(lock) __write_lock_bh(lock)
100#endif
101
102#ifdef __always_inline__spin_lock_irq
103#define _spin_lock_irq(lock) __spin_lock_irq(lock)
104#endif
105
106#ifdef __always_inline__read_lock_irq
107#define _read_lock_irq(lock) __read_lock_irq(lock)
108#endif
109
110#ifdef __always_inline__write_lock_irq
111#define _write_lock_irq(lock) __write_lock_irq(lock)
112#endif
113
114#ifdef __always_inline__spin_lock_irqsave
115#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
116#endif
117
118#ifdef __always_inline__read_lock_irqsave
119#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
120#endif
121
122#ifdef __always_inline__write_lock_irqsave
123#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
124#endif
125
126#endif /* !CONFIG_GENERIC_LOCKBREAK */
127
128#ifdef __always_inline__spin_trylock
129#define _spin_trylock(lock) __spin_trylock(lock)
130#endif
131
132#ifdef __always_inline__read_trylock
133#define _read_trylock(lock) __read_trylock(lock)
134#endif
135
136#ifdef __always_inline__write_trylock
137#define _write_trylock(lock) __write_trylock(lock)
138#endif
139
140#ifdef __always_inline__spin_trylock_bh
141#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
142#endif
143
144#ifdef __always_inline__spin_unlock
145#define _spin_unlock(lock) __spin_unlock(lock)
146#endif
147
148#ifdef __always_inline__read_unlock
149#define _read_unlock(lock) __read_unlock(lock)
150#endif
151
152#ifdef __always_inline__write_unlock
153#define _write_unlock(lock) __write_unlock(lock)
154#endif
155
156#ifdef __always_inline__spin_unlock_bh
157#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
158#endif
159
160#ifdef __always_inline__read_unlock_bh
161#define _read_unlock_bh(lock) __read_unlock_bh(lock)
162#endif
163
164#ifdef __always_inline__write_unlock_bh
165#define _write_unlock_bh(lock) __write_unlock_bh(lock)
166#endif
167
168#ifdef __always_inline__spin_unlock_irq
169#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
170#endif
171
172#ifdef __always_inline__read_unlock_irq
173#define _read_unlock_irq(lock) __read_unlock_irq(lock)
174#endif
175
176#ifdef __always_inline__write_unlock_irq
177#define _write_unlock_irq(lock) __write_unlock_irq(lock)
178#endif
179
180#ifdef __always_inline__spin_unlock_irqrestore
181#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
182#endif
183
184#ifdef __always_inline__read_unlock_irqrestore
185#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
186#endif
187
188#ifdef __always_inline__write_unlock_irqrestore
189#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
190#endif
191
192#endif /* CONFIG_DEBUG_SPINLOCK */
193
194static inline int __spin_trylock(spinlock_t *lock)
195{
196 preempt_disable();
197 if (_raw_spin_trylock(lock)) {
198 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
199 return 1;
200 }
201 preempt_enable();
202 return 0;
203}
204
205static inline int __read_trylock(rwlock_t *lock)
206{
207 preempt_disable();
208 if (_raw_read_trylock(lock)) {
209 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
210 return 1;
211 }
212 preempt_enable();
213 return 0;
214}
215
216static inline int __write_trylock(rwlock_t *lock)
217{
218 preempt_disable();
219 if (_raw_write_trylock(lock)) {
220 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
221 return 1;
222 }
223 preempt_enable();
224 return 0;
225}
226
227/*
228 * If lockdep is enabled then we use the non-preemption spin-ops
229 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
230 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
231 */
232#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
233
234static inline void __read_lock(rwlock_t *lock)
235{
236 preempt_disable();
237 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
238 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
239}
240
241static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
242{
243 unsigned long flags;
244
245 local_irq_save(flags);
246 preempt_disable();
247 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
248 /*
249 * On lockdep we dont want the hand-coded irq-enable of
250 * _raw_spin_lock_flags() code, because lockdep assumes
251 * that interrupts are not re-enabled during lock-acquire:
252 */
253#ifdef CONFIG_LOCKDEP
254 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
255#else
256 _raw_spin_lock_flags(lock, &flags);
257#endif
258 return flags;
259}
260
261static inline void __spin_lock_irq(spinlock_t *lock)
262{
263 local_irq_disable();
264 preempt_disable();
265 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
266 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
267}
268
269static inline void __spin_lock_bh(spinlock_t *lock)
270{
271 local_bh_disable();
272 preempt_disable();
273 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
274 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
275}
276
277static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
278{
279 unsigned long flags;
280
281 local_irq_save(flags);
282 preempt_disable();
283 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
284 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
285 _raw_read_lock_flags, &flags);
286 return flags;
287}
288
289static inline void __read_lock_irq(rwlock_t *lock)
290{
291 local_irq_disable();
292 preempt_disable();
293 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
294 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
295}
296
297static inline void __read_lock_bh(rwlock_t *lock)
298{
299 local_bh_disable();
300 preempt_disable();
301 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
302 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
303}
304
305static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
306{
307 unsigned long flags;
308
309 local_irq_save(flags);
310 preempt_disable();
311 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
312 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
313 _raw_write_lock_flags, &flags);
314 return flags;
315}
316
317static inline void __write_lock_irq(rwlock_t *lock)
318{
319 local_irq_disable();
320 preempt_disable();
321 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
322 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
323}
324
325static inline void __write_lock_bh(rwlock_t *lock)
326{
327 local_bh_disable();
328 preempt_disable();
329 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
330 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
331}
332
333static inline void __spin_lock(spinlock_t *lock)
334{
335 preempt_disable();
336 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
337 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
338}
339
340static inline void __write_lock(rwlock_t *lock)
341{
342 preempt_disable();
343 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
344 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
345}
346
347#endif /* CONFIG_PREEMPT */
348
349static inline void __spin_unlock(spinlock_t *lock)
350{
351 spin_release(&lock->dep_map, 1, _RET_IP_);
352 _raw_spin_unlock(lock);
353 preempt_enable();
354}
355
356static inline void __write_unlock(rwlock_t *lock)
357{
358 rwlock_release(&lock->dep_map, 1, _RET_IP_);
359 _raw_write_unlock(lock);
360 preempt_enable();
361}
362
363static inline void __read_unlock(rwlock_t *lock)
364{
365 rwlock_release(&lock->dep_map, 1, _RET_IP_);
366 _raw_read_unlock(lock);
367 preempt_enable();
368}
369
370static inline void __spin_unlock_irqrestore(spinlock_t *lock,
371 unsigned long flags)
372{
373 spin_release(&lock->dep_map, 1, _RET_IP_);
374 _raw_spin_unlock(lock);
375 local_irq_restore(flags);
376 preempt_enable();
377}
378
379static inline void __spin_unlock_irq(spinlock_t *lock)
380{
381 spin_release(&lock->dep_map, 1, _RET_IP_);
382 _raw_spin_unlock(lock);
383 local_irq_enable();
384 preempt_enable();
385}
386
387static inline void __spin_unlock_bh(spinlock_t *lock)
388{
389 spin_release(&lock->dep_map, 1, _RET_IP_);
390 _raw_spin_unlock(lock);
391 preempt_enable_no_resched();
392 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
393}
394
395static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
396{
397 rwlock_release(&lock->dep_map, 1, _RET_IP_);
398 _raw_read_unlock(lock);
399 local_irq_restore(flags);
400 preempt_enable();
401}
402
403static inline void __read_unlock_irq(rwlock_t *lock)
404{
405 rwlock_release(&lock->dep_map, 1, _RET_IP_);
406 _raw_read_unlock(lock);
407 local_irq_enable();
408 preempt_enable();
409}
410
411static inline void __read_unlock_bh(rwlock_t *lock)
412{
413 rwlock_release(&lock->dep_map, 1, _RET_IP_);
414 _raw_read_unlock(lock);
415 preempt_enable_no_resched();
416 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
417}
418
419static inline void __write_unlock_irqrestore(rwlock_t *lock,
420 unsigned long flags)
421{
422 rwlock_release(&lock->dep_map, 1, _RET_IP_);
423 _raw_write_unlock(lock);
424 local_irq_restore(flags);
425 preempt_enable();
426}
427
428static inline void __write_unlock_irq(rwlock_t *lock)
429{
430 rwlock_release(&lock->dep_map, 1, _RET_IP_);
431 _raw_write_unlock(lock);
432 local_irq_enable();
433 preempt_enable();
434}
435
436static inline void __write_unlock_bh(rwlock_t *lock)
437{
438 rwlock_release(&lock->dep_map, 1, _RET_IP_);
439 _raw_write_unlock(lock);
440 preempt_enable_no_resched();
441 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
442}
443
444static inline int __spin_trylock_bh(spinlock_t *lock)
445{
446 local_bh_disable();
447 preempt_disable();
448 if (_raw_spin_trylock(lock)) {
449 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
450 return 1;
451 }
452 preempt_enable_no_resched();
453 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
454 return 0;
455}
456
63#endif /* __LINUX_SPINLOCK_API_SMP_H */ 457#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index cb1a6631b8f4..73b1f1cec423 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -14,7 +14,6 @@ struct scatterlist;
14 */ 14 */
15#define IO_TLB_SEGSIZE 128 15#define IO_TLB_SEGSIZE 128
16 16
17
18/* 17/*
19 * log of the size of each IO TLB slab. The number of slabs is command line 18 * log of the size of each IO TLB slab. The number of slabs is command line
20 * controllable. 19 * controllable.
@@ -24,16 +23,6 @@ struct scatterlist;
24extern void 23extern void
25swiotlb_init(void); 24swiotlb_init(void);
26 25
27extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
28extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
29
30extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
31 phys_addr_t address);
32extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev,
33 dma_addr_t address);
34
35extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
36
37extern void 26extern void
38*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 27*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flags); 28 dma_addr_t *dma_handle, gfp_t flags);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1488d8c81aac..0d3974f59c53 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -23,7 +23,7 @@
23 */ 23 */
24#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ 24#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */
25#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ 25#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */
26#define NR_LDISCS 19 26#define NR_LDISCS 20
27 27
28/* line disciplines */ 28/* line disciplines */
29#define N_TTY 0 29#define N_TTY 0
@@ -47,6 +47,8 @@
47#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ 47#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */
48#define N_PPS 18 /* Pulse per Second */ 48#define N_PPS 18 /* Pulse per Second */
49 49
50#define N_V253 19 /* Codec control over voice modem */
51
50/* 52/*
51 * This character is the same as _POSIX_VDISABLE: it cannot be used as 53 * This character is the same as _POSIX_VDISABLE: it cannot be used as
52 * a c_cc[] character, but indicates that a particular special character 54 * a c_cc[] character, but indicates that a particular special character
@@ -394,6 +396,7 @@ extern void __do_SAK(struct tty_struct *tty);
394extern void disassociate_ctty(int priv); 396extern void disassociate_ctty(int priv);
395extern void no_tty(void); 397extern void no_tty(void);
396extern void tty_flip_buffer_push(struct tty_struct *tty); 398extern void tty_flip_buffer_push(struct tty_struct *tty);
399extern void tty_flush_to_ldisc(struct tty_struct *tty);
397extern void tty_buffer_free_all(struct tty_struct *tty); 400extern void tty_buffer_free_all(struct tty_struct *tty);
398extern void tty_buffer_flush(struct tty_struct *tty); 401extern void tty_buffer_flush(struct tty_struct *tty);
399extern void tty_buffer_init(struct tty_struct *tty); 402extern void tty_buffer_init(struct tty_struct *tty);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 40f38d896777..0c4ee9b88f85 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -144,7 +144,7 @@ struct tty_ldisc_ops {
144 144
145struct tty_ldisc { 145struct tty_ldisc {
146 struct tty_ldisc_ops *ops; 146 struct tty_ldisc_ops *ops;
147 int refcount; 147 atomic_t users;
148}; 148};
149 149
150#define TTY_LDISC_MAGIC 0x5403 150#define TTY_LDISC_MAGIC 0x5403
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
index ed889f4168f3..ae779bb8cc0f 100644
--- a/include/linux/ucb1400.h
+++ b/include/linux/ucb1400.h
@@ -73,6 +73,10 @@
73 73
74#define UCB_ADC_DATA 0x68 74#define UCB_ADC_DATA 0x68
75#define UCB_ADC_DAT_VALID (1 << 15) 75#define UCB_ADC_DAT_VALID (1 << 15)
76
77#define UCB_FCSR 0x6c
78#define UCB_FCSR_AVE (1 << 12)
79
76#define UCB_ADC_DAT_MASK 0x3ff 80#define UCB_ADC_DAT_MASK 0x3ff
77 81
78#define UCB_ID 0x7e 82#define UCB_ID 0x7e
diff --git a/include/linux/uio.h b/include/linux/uio.h
index b7fe13883bdb..98c114323a8b 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -19,15 +19,6 @@ struct iovec
19 __kernel_size_t iov_len; /* Must be size_t (1003.1g) */ 19 __kernel_size_t iov_len; /* Must be size_t (1003.1g) */
20}; 20};
21 21
22#ifdef __KERNEL__
23
24struct kvec {
25 void *iov_base; /* and that should *never* hold a userland pointer */
26 size_t iov_len;
27};
28
29#endif
30
31/* 22/*
32 * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1) 23 * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
33 */ 24 */
@@ -35,6 +26,13 @@ struct kvec {
35#define UIO_FASTIOV 8 26#define UIO_FASTIOV 8
36#define UIO_MAXIOV 1024 27#define UIO_MAXIOV 1024
37 28
29#ifdef __KERNEL__
30
31struct kvec {
32 void *iov_base; /* and that should *never* hold a userland pointer */
33 size_t iov_len;
34};
35
38/* 36/*
39 * Total number of bytes covered by an iovec. 37 * Total number of bytes covered by an iovec.
40 * 38 *
@@ -53,5 +51,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
53} 51}
54 52
55unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); 53unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
54#endif
56 55
57#endif 56#endif
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 95846d988011..74f16876f38d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -338,6 +338,7 @@ struct v4l2_pix_format {
338/* Vendor-specific formats */ 338/* Vendor-specific formats */
339#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ 339#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
340#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ 340#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
341#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */
341#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ 342#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */
342#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ 343#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */
343#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ 344#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index be7d255fc7cf..8dab9f2b8832 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -20,8 +20,7 @@
20 20
21#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ 21#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
22 22
23struct virtio_blk_config 23struct virtio_blk_config {
24{
25 /* The capacity (in 512-byte sectors). */ 24 /* The capacity (in 512-byte sectors). */
26 __u64 capacity; 25 __u64 capacity;
27 /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ 26 /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
@@ -50,8 +49,7 @@ struct virtio_blk_config
50#define VIRTIO_BLK_T_BARRIER 0x80000000 49#define VIRTIO_BLK_T_BARRIER 0x80000000
51 50
52/* This is the first element of the read scatter-gather list. */ 51/* This is the first element of the read scatter-gather list. */
53struct virtio_blk_outhdr 52struct virtio_blk_outhdr {
54{
55 /* VIRTIO_BLK_T* */ 53 /* VIRTIO_BLK_T* */
56 __u32 type; 54 __u32 type;
57 /* io priority. */ 55 /* io priority. */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 99f514575f6a..e547e3c8ee9a 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -79,8 +79,7 @@
79 * the dev->feature bits if it wants. 79 * the dev->feature bits if it wants.
80 */ 80 */
81typedef void vq_callback_t(struct virtqueue *); 81typedef void vq_callback_t(struct virtqueue *);
82struct virtio_config_ops 82struct virtio_config_ops {
83{
84 void (*get)(struct virtio_device *vdev, unsigned offset, 83 void (*get)(struct virtio_device *vdev, unsigned offset,
85 void *buf, unsigned len); 84 void *buf, unsigned len);
86 void (*set)(struct virtio_device *vdev, unsigned offset, 85 void (*set)(struct virtio_device *vdev, unsigned offset,
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index cec79adbe3ea..d8dd539c9f48 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -27,11 +27,11 @@
27#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ 27#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
28#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ 28#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
29#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ 29#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
30#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
30 31
31#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ 32#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
32 33
33struct virtio_net_config 34struct virtio_net_config {
34{
35 /* The config defining mac address (if VIRTIO_NET_F_MAC) */ 35 /* The config defining mac address (if VIRTIO_NET_F_MAC) */
36 __u8 mac[6]; 36 __u8 mac[6];
37 /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ 37 /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
@@ -40,8 +40,7 @@ struct virtio_net_config
40 40
41/* This is the first element of the scatter-gather list. If you don't 41/* This is the first element of the scatter-gather list. If you don't
42 * specify GSO or CSUM features, you can simply ignore the header. */ 42 * specify GSO or CSUM features, you can simply ignore the header. */
43struct virtio_net_hdr 43struct virtio_net_hdr {
44{
45#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset 44#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
46 __u8 flags; 45 __u8 flags;
47#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame 46#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
@@ -81,14 +80,19 @@ typedef __u8 virtio_net_ctrl_ack;
81#define VIRTIO_NET_ERR 1 80#define VIRTIO_NET_ERR 1
82 81
83/* 82/*
84 * Control the RX mode, ie. promisucous and allmulti. PROMISC and 83 * Control the RX mode, ie. promisucous, allmulti, etc...
85 * ALLMULTI commands require an "out" sg entry containing a 1 byte 84 * All commands require an "out" sg entry containing a 1 byte
86 * state value, zero = disable, non-zero = enable. These commands 85 * state value, zero = disable, non-zero = enable. Commands
87 * are supported with the VIRTIO_NET_F_CTRL_RX feature. 86 * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
87 * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
88 */ 88 */
89#define VIRTIO_NET_CTRL_RX 0 89#define VIRTIO_NET_CTRL_RX 0
90 #define VIRTIO_NET_CTRL_RX_PROMISC 0 90 #define VIRTIO_NET_CTRL_RX_PROMISC 0
91 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1 91 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
92 #define VIRTIO_NET_CTRL_RX_ALLUNI 2
93 #define VIRTIO_NET_CTRL_RX_NOMULTI 3
94 #define VIRTIO_NET_CTRL_RX_NOUNI 4
95 #define VIRTIO_NET_CTRL_RX_NOBCAST 5
92 96
93/* 97/*
94 * Control the MAC filter table. 98 * Control the MAC filter table.
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 693e0ec5afa6..e4d144b132b5 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -30,8 +30,7 @@
30#define VIRTIO_RING_F_INDIRECT_DESC 28 30#define VIRTIO_RING_F_INDIRECT_DESC 28
31 31
32/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ 32/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
33struct vring_desc 33struct vring_desc {
34{
35 /* Address (guest-physical). */ 34 /* Address (guest-physical). */
36 __u64 addr; 35 __u64 addr;
37 /* Length. */ 36 /* Length. */
@@ -42,24 +41,21 @@ struct vring_desc
42 __u16 next; 41 __u16 next;
43}; 42};
44 43
45struct vring_avail 44struct vring_avail {
46{
47 __u16 flags; 45 __u16 flags;
48 __u16 idx; 46 __u16 idx;
49 __u16 ring[]; 47 __u16 ring[];
50}; 48};
51 49
52/* u32 is used here for ids for padding reasons. */ 50/* u32 is used here for ids for padding reasons. */
53struct vring_used_elem 51struct vring_used_elem {
54{
55 /* Index of start of used descriptor chain. */ 52 /* Index of start of used descriptor chain. */
56 __u32 id; 53 __u32 id;
57 /* Total length of the descriptor chain which was used (written to) */ 54 /* Total length of the descriptor chain which was used (written to) */
58 __u32 len; 55 __u32 len;
59}; 56};
60 57
61struct vring_used 58struct vring_used {
62{
63 __u16 flags; 59 __u16 flags;
64 __u16 idx; 60 __u16 idx;
65 struct vring_used_elem ring[]; 61 struct vring_used_elem ring[];
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6788e1a4d4ca..cf3c2f5dba51 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,7 +77,14 @@ struct task_struct;
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, } 78 { .flags = word, .bit_nr = bit, }
79 79
80extern void init_waitqueue_head(wait_queue_head_t *q); 80extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
81
82#define init_waitqueue_head(q) \
83 do { \
84 static struct lock_class_key __key; \
85 \
86 __init_waitqueue_head((q), &__key); \
87 } while (0)
81 88
82#ifdef CONFIG_LOCKDEP 89#ifdef CONFIG_LOCKDEP
83# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ 90# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 13e1adf55c4c..6273fa97b527 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -240,6 +240,21 @@ static inline int cancel_delayed_work(struct delayed_work *work)
240 return ret; 240 return ret;
241} 241}
242 242
243/*
244 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
245 * if it returns 0 the timer function may be running and the queueing is in
246 * progress.
247 */
248static inline int __cancel_delayed_work(struct delayed_work *work)
249{
250 int ret;
251
252 ret = del_timer(&work->timer);
253 if (ret)
254 work_clear_pending(&work->work);
255 return ret;
256}
257
243extern int cancel_delayed_work_sync(struct delayed_work *work); 258extern int cancel_delayed_work_sync(struct delayed_work *work);
244 259
245/* Obsolete. use cancel_delayed_work_sync() */ 260/* Obsolete. use cancel_delayed_work_sync() */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3224820c8514..78b1e4684cc9 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -14,17 +14,6 @@ extern struct list_head inode_in_use;
14extern struct list_head inode_unused; 14extern struct list_head inode_unused;
15 15
16/* 16/*
17 * Yes, writeback.h requires sched.h
18 * No, sched.h is not included from here.
19 */
20static inline int task_is_pdflush(struct task_struct *task)
21{
22 return task->flags & PF_FLUSHER;
23}
24
25#define current_is_pdflush() task_is_pdflush(current)
26
27/*
28 * fs/fs-writeback.c 17 * fs/fs-writeback.c
29 */ 18 */
30enum writeback_sync_modes { 19enum writeback_sync_modes {
@@ -40,6 +29,8 @@ enum writeback_sync_modes {
40struct writeback_control { 29struct writeback_control {
41 struct backing_dev_info *bdi; /* If !NULL, only write back this 30 struct backing_dev_info *bdi; /* If !NULL, only write back this
42 queue */ 31 queue */
32 struct super_block *sb; /* if !NULL, only write inodes from
33 this super_block */
43 enum writeback_sync_modes sync_mode; 34 enum writeback_sync_modes sync_mode;
44 unsigned long *older_than_this; /* If !NULL, only write back inodes 35 unsigned long *older_than_this; /* If !NULL, only write back inodes
45 older than this */ 36 older than this */
@@ -76,9 +67,13 @@ struct writeback_control {
76/* 67/*
77 * fs/fs-writeback.c 68 * fs/fs-writeback.c
78 */ 69 */
79void writeback_inodes(struct writeback_control *wbc); 70struct bdi_writeback;
80int inode_wait(void *); 71int inode_wait(void *);
81void sync_inodes_sb(struct super_block *, int wait); 72long writeback_inodes_sb(struct super_block *);
73long sync_inodes_sb(struct super_block *);
74void writeback_inodes_wbc(struct writeback_control *wbc);
75long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
76void wakeup_flusher_threads(long nr_pages);
82 77
83/* writeback.h requires fs.h; it, too, is not included from here. */ 78/* writeback.h requires fs.h; it, too, is not included from here. */
84static inline void wait_on_inode(struct inode *inode) 79static inline void wait_on_inode(struct inode *inode)
@@ -98,7 +93,6 @@ static inline void inode_sync_wait(struct inode *inode)
98/* 93/*
99 * mm/page-writeback.c 94 * mm/page-writeback.c
100 */ 95 */
101int wakeup_pdflush(long nr_pages);
102void laptop_io_completion(void); 96void laptop_io_completion(void);
103void laptop_sync_completion(void); 97void laptop_sync_completion(void);
104void throttle_vm_writeout(gfp_t gfp_mask); 98void throttle_vm_writeout(gfp_t gfp_mask);
@@ -150,7 +144,6 @@ balance_dirty_pages_ratelimited(struct address_space *mapping)
150typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, 144typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
151 void *data); 145 void *data);
152 146
153int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
154int generic_writepages(struct address_space *mapping, 147int generic_writepages(struct address_space *mapping,
155 struct writeback_control *wbc); 148 struct writeback_control *wbc);
156int write_cache_pages(struct address_space *mapping, 149int write_cache_pages(struct address_space *mapping,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d131e352cfe1..5c84af8c5f6f 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -49,6 +49,7 @@ struct xattr_handler {
49ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); 49ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
50ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); 50ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
51ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); 51ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
52int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
52int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); 53int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
53int vfs_removexattr(struct dentry *, const char *); 54int vfs_removexattr(struct dentry *, const char *);
54 55