aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/amba/mmci.h23
-rw-r--r--include/linux/bitops.h30
-rw-r--r--include/linux/clocksource.h19
-rw-r--r--include/linux/cpufreq.h30
-rw-r--r--include/linux/cpuset.h16
-rw-r--r--include/linux/debugobjects.h11
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/elf.h1
-rw-r--r--include/linux/ftrace.h61
-rw-r--r--include/linux/ftrace_event.h1
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/hw_breakpoint.h25
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/interrupt.h32
-rw-r--r--include/linux/iommu.h24
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/ktime.h10
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/nfs_fs.h14
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h7
-rw-r--r--include/linux/nfsd/nfsfh.h6
-rw-r--r--include/linux/perf_event.h83
-rw-r--r--include/linux/ptrace.h12
-rw-r--r--include/linux/rbtree.h5
-rw-r--r--include/linux/rcupdate.h50
-rw-r--r--include/linux/rcutiny.h31
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/ring_buffer.h10
-rw-r--r--include/linux/sched.h79
-rw-r--r--include/linux/serial_sci.h4
-rw-r--r--include/linux/sh_clk.h150
-rw-r--r--include/linux/sh_dma.h102
-rw-r--r--include/linux/sh_intc.h26
-rw-r--r--include/linux/srcu.h6
-rw-r--r--include/linux/stop_machine.h122
-rw-r--r--include/linux/sunrpc/auth.h1
-rw-r--r--include/linux/sunrpc/auth_gss.h1
-rw-r--r--include/linux/sunrpc/gss_api.h8
-rw-r--r--include/linux/sunrpc/gss_krb5.h184
-rw-r--r--include/linux/sunrpc/metrics.h7
-rw-r--r--include/linux/sunrpc/sched.h20
-rw-r--r--include/linux/sunrpc/xdr.h8
-rw-r--r--include/linux/sunrpc/xprt.h13
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/timer.h10
-rw-r--r--include/linux/timex.h5
-rw-r--r--include/linux/tracepoint.h114
-rw-r--r--include/linux/wait.h35
53 files changed, 1107 insertions, 337 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b926afe8c03e..3da73f5f0ae9 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -116,11 +116,12 @@ extern unsigned long acpi_realmode_flags;
116 116
117int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); 117int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
118int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); 118int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
119int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
119 120
120#ifdef CONFIG_X86_IO_APIC 121#ifdef CONFIG_X86_IO_APIC
121extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); 122extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
122#else 123#else
123#define acpi_get_override_irq(bus, trigger, polarity) (-1) 124#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
124#endif 125#endif
125/* 126/*
126 * This function undoes the effect of one call to acpi_register_gsi(). 127 * This function undoes the effect of one call to acpi_register_gsi().
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 6b4241748dda..7e466fe72025 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -6,8 +6,29 @@
6 6
7#include <linux/mmc/host.h> 7#include <linux/mmc/host.h>
8 8
9/**
10 * struct mmci_platform_data - platform configuration for the MMCI
11 * (also known as PL180) block.
12 * @f_max: the maximum operational frequency for this host in this
13 * platform configuration. When this is specified it takes precedence
14 * over the module parameter for the same frequency.
15 * @ocr_mask: available voltages on the 4 pins from the block, this
16 * is ignored if a regulator is used, see the MMC_VDD_* masks in
17 * mmc/host.h
18 * @translate_vdd: a callback function to translate a MMC_VDD_*
19 * mask into a value to be binary or:ed and written into the
20 * MMCIPWR register of the block
21 * @status: if no GPIO read function was given to the block in
22 * gpio_wp (below) this function will be called to determine
23 * whether a card is present in the MMC slot or not
24 * @gpio_wp: read this GPIO pin to see if the card is write protected
25 * @gpio_cd: read this GPIO pin to detect card insertion
26 * @capabilities: the capabilities of the block as implemented in
27 * this platform, signify anything MMC_CAP_* from mmc/host.h
28 */
9struct mmci_platform_data { 29struct mmci_platform_data {
10 unsigned int ocr_mask; /* available voltages */ 30 unsigned int f_max;
31 unsigned int ocr_mask;
11 u32 (*translate_vdd)(struct device *, unsigned int); 32 u32 (*translate_vdd)(struct device *, unsigned int);
12 unsigned int (*status)(struct device *); 33 unsigned int (*status)(struct device *);
13 int gpio_wp; 34 int gpio_wp;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index b796eab5ca75..fc68053378ce 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -10,6 +10,11 @@
10#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 10#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
11#endif 11#endif
12 12
13extern unsigned int __sw_hweight8(unsigned int w);
14extern unsigned int __sw_hweight16(unsigned int w);
15extern unsigned int __sw_hweight32(unsigned int w);
16extern unsigned long __sw_hweight64(__u64 w);
17
13/* 18/*
14 * Include this here because some architectures need generic_ffs/fls in 19 * Include this here because some architectures need generic_ffs/fls in
15 * scope 20 * scope
@@ -44,31 +49,6 @@ static inline unsigned long hweight_long(unsigned long w)
44 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 49 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
45} 50}
46 51
47/*
48 * Clearly slow versions of the hweightN() functions, their benefit is
49 * of course compile time evaluation of constant arguments.
50 */
51#define HWEIGHT8(w) \
52 ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
53 (!!((w) & (1ULL << 0))) + \
54 (!!((w) & (1ULL << 1))) + \
55 (!!((w) & (1ULL << 2))) + \
56 (!!((w) & (1ULL << 3))) + \
57 (!!((w) & (1ULL << 4))) + \
58 (!!((w) & (1ULL << 5))) + \
59 (!!((w) & (1ULL << 6))) + \
60 (!!((w) & (1ULL << 7))) )
61
62#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
63#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
64#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
65
66/*
67 * Type invariant version that simply casts things to the
68 * largest type.
69 */
70#define HWEIGHT(w) HWEIGHT64((u64)(w))
71
72/** 52/**
73 * rol32 - rotate a 32-bit value left 53 * rol32 - rotate a 32-bit value left
74 * @word: value to rotate 54 * @word: value to rotate
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 4bca8b60cdf7..5ea3c60c160c 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -273,7 +273,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
273} 273}
274 274
275 275
276/* used to install a new clocksource */
277extern int clocksource_register(struct clocksource*); 276extern int clocksource_register(struct clocksource*);
278extern void clocksource_unregister(struct clocksource*); 277extern void clocksource_unregister(struct clocksource*);
279extern void clocksource_touch_watchdog(void); 278extern void clocksource_touch_watchdog(void);
@@ -287,6 +286,24 @@ extern void clocksource_mark_unstable(struct clocksource *cs);
287extern void 286extern void
288clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); 287clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
289 288
289/*
290 * Don't call __clocksource_register_scale directly, use
291 * clocksource_register_hz/khz
292 */
293extern int
294__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
295
296static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
297{
298 return __clocksource_register_scale(cs, 1, hz);
299}
300
301static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
302{
303 return __clocksource_register_scale(cs, 1000, khz);
304}
305
306
290static inline void 307static inline void
291clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) 308clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
292{ 309{
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4de02b10007f..9f15150ce8d6 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -278,6 +278,27 @@ struct freq_attr {
278 ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); 278 ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
279}; 279};
280 280
281#define cpufreq_freq_attr_ro(_name) \
282static struct freq_attr _name = \
283__ATTR(_name, 0444, show_##_name, NULL)
284
285#define cpufreq_freq_attr_ro_perm(_name, _perm) \
286static struct freq_attr _name = \
287__ATTR(_name, _perm, show_##_name, NULL)
288
289#define cpufreq_freq_attr_ro_old(_name) \
290static struct freq_attr _name##_old = \
291__ATTR(_name, 0444, show_##_name##_old, NULL)
292
293#define cpufreq_freq_attr_rw(_name) \
294static struct freq_attr _name = \
295__ATTR(_name, 0644, show_##_name, store_##_name)
296
297#define cpufreq_freq_attr_rw_old(_name) \
298static struct freq_attr _name##_old = \
299__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
300
301
281struct global_attr { 302struct global_attr {
282 struct attribute attr; 303 struct attribute attr;
283 ssize_t (*show)(struct kobject *kobj, 304 ssize_t (*show)(struct kobject *kobj,
@@ -286,6 +307,15 @@ struct global_attr {
286 const char *c, size_t count); 307 const char *c, size_t count);
287}; 308};
288 309
310#define define_one_global_ro(_name) \
311static struct global_attr _name = \
312__ATTR(_name, 0444, show_##_name, NULL)
313
314#define define_one_global_rw(_name) \
315static struct global_attr _name = \
316__ATTR(_name, 0644, show_##_name, store_##_name)
317
318
289/********************************************************************* 319/*********************************************************************
290 * CPUFREQ 2.6. INTERFACE * 320 * CPUFREQ 2.6. INTERFACE *
291 *********************************************************************/ 321 *********************************************************************/
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a5740fc4d04b..a73454aec333 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
21extern int cpuset_init(void); 21extern int cpuset_init(void);
22extern void cpuset_init_smp(void); 22extern void cpuset_init_smp(void);
23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
24extern void cpuset_cpus_allowed_locked(struct task_struct *p, 24extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
25 struct cpumask *mask);
26extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 25extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
27#define cpuset_current_mems_allowed (current->mems_allowed) 26#define cpuset_current_mems_allowed (current->mems_allowed)
28void cpuset_init_current_mems_allowed(void); 27void cpuset_init_current_mems_allowed(void);
@@ -69,9 +68,6 @@ struct seq_file;
69extern void cpuset_task_status_allowed(struct seq_file *m, 68extern void cpuset_task_status_allowed(struct seq_file *m,
70 struct task_struct *task); 69 struct task_struct *task);
71 70
72extern void cpuset_lock(void);
73extern void cpuset_unlock(void);
74
75extern int cpuset_mem_spread_node(void); 71extern int cpuset_mem_spread_node(void);
76 72
77static inline int cpuset_do_page_mem_spread(void) 73static inline int cpuset_do_page_mem_spread(void)
@@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
105{ 101{
106 cpumask_copy(mask, cpu_possible_mask); 102 cpumask_copy(mask, cpu_possible_mask);
107} 103}
108static inline void cpuset_cpus_allowed_locked(struct task_struct *p, 104
109 struct cpumask *mask) 105static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
110{ 106{
111 cpumask_copy(mask, cpu_possible_mask); 107 cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
108 return cpumask_any(cpu_active_mask);
112} 109}
113 110
114static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 111static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
157{ 154{
158} 155}
159 156
160static inline void cpuset_lock(void) {}
161static inline void cpuset_unlock(void) {}
162
163static inline int cpuset_mem_spread_node(void) 157static inline int cpuset_mem_spread_node(void)
164{ 158{
165 return 0; 159 return 0;
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 8c243aaa86a7..597692f1fc8d 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -20,12 +20,14 @@ struct debug_obj_descr;
20 * struct debug_obj - representaion of an tracked object 20 * struct debug_obj - representaion of an tracked object
21 * @node: hlist node to link the object into the tracker list 21 * @node: hlist node to link the object into the tracker list
22 * @state: tracked object state 22 * @state: tracked object state
23 * @astate: current active state
23 * @object: pointer to the real object 24 * @object: pointer to the real object
24 * @descr: pointer to an object type specific debug description structure 25 * @descr: pointer to an object type specific debug description structure
25 */ 26 */
26struct debug_obj { 27struct debug_obj {
27 struct hlist_node node; 28 struct hlist_node node;
28 enum debug_obj_state state; 29 enum debug_obj_state state;
30 unsigned int astate;
29 void *object; 31 void *object;
30 struct debug_obj_descr *descr; 32 struct debug_obj_descr *descr;
31}; 33};
@@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
60extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); 62extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
61extern void debug_object_free (void *addr, struct debug_obj_descr *descr); 63extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
62 64
65/*
66 * Active state:
67 * - Set at 0 upon initialization.
68 * - Must return to 0 before deactivation.
69 */
70extern void
71debug_object_active_state(void *addr, struct debug_obj_descr *descr,
72 unsigned int expect, unsigned int next);
73
63extern void debug_objects_early_init(void); 74extern void debug_objects_early_init(void);
64extern void debug_objects_mem_init(void); 75extern void debug_objects_mem_init(void);
65#else 76#else
diff --git a/include/linux/device.h b/include/linux/device.h
index 182192892d45..241b96bcd7ad 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -451,6 +451,10 @@ struct device {
451 451
452static inline const char *dev_name(const struct device *dev) 452static inline const char *dev_name(const struct device *dev)
453{ 453{
454 /* Use the init name until the kobject becomes available */
455 if (dev->init_name)
456 return dev->init_name;
457
454 return kobject_name(&dev->kobj); 458 return kobject_name(&dev->kobj);
455} 459}
456 460
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 597858418051..4d608014753a 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -394,6 +394,7 @@ typedef struct elf64_shdr {
394#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */ 394#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
395#define NT_S390_CTRS 0x304 /* s390 control registers */ 395#define NT_S390_CTRS 0x304 /* s390 control registers */
396#define NT_S390_PREFIX 0x305 /* s390 prefix register */ 396#define NT_S390_PREFIX 0x305 /* s390 prefix register */
397#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
397 398
398 399
399/* Note header in a PT_NOTE section */ 400/* Note header in a PT_NOTE section */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 01e6adea07ec..41e46330d9be 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,9 +82,13 @@ void clear_ftrace_function(void);
82extern void ftrace_stub(unsigned long a0, unsigned long a1); 82extern void ftrace_stub(unsigned long a0, unsigned long a1);
83 83
84#else /* !CONFIG_FUNCTION_TRACER */ 84#else /* !CONFIG_FUNCTION_TRACER */
85# define register_ftrace_function(ops) do { } while (0) 85/*
86# define unregister_ftrace_function(ops) do { } while (0) 86 * (un)register_ftrace_function must be a macro since the ops parameter
87# define clear_ftrace_function(ops) do { } while (0) 87 * must not be evaluated.
88 */
89#define register_ftrace_function(ops) ({ 0; })
90#define unregister_ftrace_function(ops) ({ 0; })
91static inline void clear_ftrace_function(void) { }
88static inline void ftrace_kill(void) { } 92static inline void ftrace_kill(void) { }
89static inline void ftrace_stop(void) { } 93static inline void ftrace_stop(void) { }
90static inline void ftrace_start(void) { } 94static inline void ftrace_start(void) { }
@@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip);
237extern void ftrace_disable_daemon(void); 241extern void ftrace_disable_daemon(void);
238extern void ftrace_enable_daemon(void); 242extern void ftrace_enable_daemon(void);
239#else 243#else
240# define skip_trace(ip) ({ 0; }) 244static inline int skip_trace(unsigned long ip) { return 0; }
241# define ftrace_force_update() ({ 0; }) 245static inline int ftrace_force_update(void) { return 0; }
242# define ftrace_set_filter(buf, len, reset) do { } while (0) 246static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
243# define ftrace_disable_daemon() do { } while (0) 247{
244# define ftrace_enable_daemon() do { } while (0) 248}
249static inline void ftrace_disable_daemon(void) { }
250static inline void ftrace_enable_daemon(void) { }
245static inline void ftrace_release_mod(struct module *mod) {} 251static inline void ftrace_release_mod(struct module *mod) {}
246static inline int register_ftrace_command(struct ftrace_func_command *cmd) 252static inline int register_ftrace_command(struct ftrace_func_command *cmd)
247{ 253{
@@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled)
314 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 320 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
315 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 321 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
316#else 322#else
317# define time_hardirqs_on(a0, a1) do { } while (0) 323 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
318# define time_hardirqs_off(a0, a1) do { } while (0) 324 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
319#endif 325#endif
320 326
321#ifdef CONFIG_PREEMPT_TRACER 327#ifdef CONFIG_PREEMPT_TRACER
322 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 328 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
323 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 329 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
324#else 330#else
325# define trace_preempt_on(a0, a1) do { } while (0) 331 static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
326# define trace_preempt_off(a0, a1) do { } while (0) 332 static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
327#endif 333#endif
328 334
329#ifdef CONFIG_FTRACE_MCOUNT_RECORD 335#ifdef CONFIG_FTRACE_MCOUNT_RECORD
@@ -352,6 +358,10 @@ struct ftrace_graph_ret {
352 int depth; 358 int depth;
353}; 359};
354 360
361/* Type of the callback handlers for tracing function graph*/
362typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
363typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
364
355#ifdef CONFIG_FUNCTION_GRAPH_TRACER 365#ifdef CONFIG_FUNCTION_GRAPH_TRACER
356 366
357/* for init task */ 367/* for init task */
@@ -400,10 +410,6 @@ extern char __irqentry_text_end[];
400 410
401#define FTRACE_RETFUNC_DEPTH 50 411#define FTRACE_RETFUNC_DEPTH 50
402#define FTRACE_RETSTACK_ALLOC_SIZE 32 412#define FTRACE_RETSTACK_ALLOC_SIZE 32
403/* Type of the callback handlers for tracing function graph*/
404typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
405typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
406
407extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 413extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
408 trace_func_graph_ent_t entryfunc); 414 trace_func_graph_ent_t entryfunc);
409 415
@@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void)
441static inline void ftrace_graph_init_task(struct task_struct *t) { } 447static inline void ftrace_graph_init_task(struct task_struct *t) { }
442static inline void ftrace_graph_exit_task(struct task_struct *t) { } 448static inline void ftrace_graph_exit_task(struct task_struct *t) { }
443 449
450static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
451 trace_func_graph_ent_t entryfunc)
452{
453 return -1;
454}
455static inline void unregister_ftrace_graph(void) { }
456
444static inline int task_curr_ret_stack(struct task_struct *tsk) 457static inline int task_curr_ret_stack(struct task_struct *tsk)
445{ 458{
446 return -1; 459 return -1;
@@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
492 return tsk->trace & TSK_TRACE_FL_GRAPH; 505 return tsk->trace & TSK_TRACE_FL_GRAPH;
493} 506}
494 507
495extern int ftrace_dump_on_oops; 508enum ftrace_dump_mode;
509
510extern enum ftrace_dump_mode ftrace_dump_on_oops;
496 511
497#ifdef CONFIG_PREEMPT 512#ifdef CONFIG_PREEMPT
498#define INIT_TRACE_RECURSION .trace_recursion = 0, 513#define INIT_TRACE_RECURSION .trace_recursion = 0,
@@ -504,18 +519,6 @@ extern int ftrace_dump_on_oops;
504#define INIT_TRACE_RECURSION 519#define INIT_TRACE_RECURSION
505#endif 520#endif
506 521
507#ifdef CONFIG_HW_BRANCH_TRACER
508
509void trace_hw_branch(u64 from, u64 to);
510void trace_hw_branch_oops(void);
511
512#else /* CONFIG_HW_BRANCH_TRACER */
513
514static inline void trace_hw_branch(u64 from, u64 to) {}
515static inline void trace_hw_branch_oops(void) {}
516
517#endif /* CONFIG_HW_BRANCH_TRACER */
518
519#ifdef CONFIG_FTRACE_SYSCALLS 522#ifdef CONFIG_FTRACE_SYSCALLS
520 523
521unsigned long arch_syscall_addr(int nr); 524unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c0f4b364c711..39e71b0a3bfd 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -58,6 +58,7 @@ struct trace_iterator {
58 /* The below is zeroed out in pipe_read */ 58 /* The below is zeroed out in pipe_read */
59 struct trace_seq seq; 59 struct trace_seq seq;
60 struct trace_entry *ent; 60 struct trace_entry *ent;
61 unsigned long lost_events;
61 int leftover; 62 int leftover;
62 int cpu; 63 int cpu;
63 u64 ts; 64 u64 ts;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5d86fb2309d2..fd0c1b857d3d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -422,6 +422,8 @@ extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
422 422
423extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, 423extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
424 const enum hrtimer_mode mode); 424 const enum hrtimer_mode mode);
425extern int schedule_hrtimeout_range_clock(ktime_t *expires,
426 unsigned long delta, const enum hrtimer_mode mode, int clock);
425extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); 427extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
426 428
427/* Soft interrupt function to run the hrtimer queues: */ 429/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index c70d27af03f9..a2d6ea49ec56 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -9,9 +9,22 @@ enum {
9}; 9};
10 10
11enum { 11enum {
12 HW_BREAKPOINT_R = 1, 12 HW_BREAKPOINT_EMPTY = 0,
13 HW_BREAKPOINT_W = 2, 13 HW_BREAKPOINT_R = 1,
14 HW_BREAKPOINT_X = 4, 14 HW_BREAKPOINT_W = 2,
15 HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
16 HW_BREAKPOINT_X = 4,
17 HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
18};
19
20enum bp_type_idx {
21 TYPE_INST = 0,
22#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
23 TYPE_DATA = 0,
24#else
25 TYPE_DATA = 1,
26#endif
27 TYPE_MAX
15}; 28};
16 29
17#ifdef __KERNEL__ 30#ifdef __KERNEL__
@@ -34,6 +47,12 @@ static inline void hw_breakpoint_init(struct perf_event_attr *attr)
34 attr->sample_period = 1; 47 attr->sample_period = 1;
35} 48}
36 49
50static inline void ptrace_breakpoint_init(struct perf_event_attr *attr)
51{
52 hw_breakpoint_init(attr);
53 attr->exclude_kernel = 1;
54}
55
37static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) 56static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
38{ 57{
39 return bp->attr.bp_addr; 58 return bp->attr.bp_addr;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index b1ed1cd8e2a8..7996fc2c9ba9 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -49,7 +49,6 @@ extern struct group_info init_groups;
49 { .first = &init_task.pids[PIDTYPE_PGID].node }, \ 49 { .first = &init_task.pids[PIDTYPE_PGID].node }, \
50 { .first = &init_task.pids[PIDTYPE_SID].node }, \ 50 { .first = &init_task.pids[PIDTYPE_SID].node }, \
51 }, \ 51 }, \
52 .rcu = RCU_HEAD_INIT, \
53 .level = 0, \ 52 .level = 0, \
54 .numbers = { { \ 53 .numbers = { { \
55 .nr = 0, \ 54 .nr = 0, \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 75f3f00ac1e5..5137db3317f9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -39,7 +39,8 @@
39 * These flags used only by the kernel as part of the 39 * These flags used only by the kernel as part of the
40 * irq handling routines. 40 * irq handling routines.
41 * 41 *
42 * IRQF_DISABLED - keep irqs disabled when calling the action handler 42 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
43 * DEPRECATED. This flag is a NOOP and scheduled to be removed
43 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator 44 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
44 * IRQF_SHARED - allow sharing the irq among several devices 45 * IRQF_SHARED - allow sharing the irq among several devices
45 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur 46 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
@@ -77,6 +78,18 @@ enum {
77 IRQTF_AFFINITY, 78 IRQTF_AFFINITY,
78}; 79};
79 80
81/**
82 * These values can be returned by request_any_context_irq() and
83 * describe the context the interrupt will be run in.
84 *
85 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
86 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
87 */
88enum {
89 IRQC_IS_HARDIRQ = 0,
90 IRQC_IS_NESTED,
91};
92
80typedef irqreturn_t (*irq_handler_t)(int, void *); 93typedef irqreturn_t (*irq_handler_t)(int, void *);
81 94
82/** 95/**
@@ -120,6 +133,10 @@ request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
120 return request_threaded_irq(irq, handler, NULL, flags, name, dev); 133 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
121} 134}
122 135
136extern int __must_check
137request_any_context_irq(unsigned int irq, irq_handler_t handler,
138 unsigned long flags, const char *name, void *dev_id);
139
123extern void exit_irq_thread(void); 140extern void exit_irq_thread(void);
124#else 141#else
125 142
@@ -141,6 +158,13 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler,
141 return request_irq(irq, handler, flags, name, dev); 158 return request_irq(irq, handler, flags, name, dev);
142} 159}
143 160
161static inline int __must_check
162request_any_context_irq(unsigned int irq, irq_handler_t handler,
163 unsigned long flags, const char *name, void *dev_id)
164{
165 return request_irq(irq, handler, flags, name, dev_id);
166}
167
144static inline void exit_irq_thread(void) { } 168static inline void exit_irq_thread(void) { }
145#endif 169#endif
146 170
@@ -209,6 +233,7 @@ extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
209extern int irq_can_set_affinity(unsigned int irq); 233extern int irq_can_set_affinity(unsigned int irq);
210extern int irq_select_affinity(unsigned int irq); 234extern int irq_select_affinity(unsigned int irq);
211 235
236extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
212#else /* CONFIG_SMP */ 237#else /* CONFIG_SMP */
213 238
214static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 239static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -223,6 +248,11 @@ static inline int irq_can_set_affinity(unsigned int irq)
223 248
224static inline int irq_select_affinity(unsigned int irq) { return 0; } 249static inline int irq_select_affinity(unsigned int irq) { return 0; }
225 250
251static inline int irq_set_affinity_hint(unsigned int irq,
252 const struct cpumask *m)
253{
254 return -EINVAL;
255}
226#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ 256#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
227 257
228#ifdef CONFIG_GENERIC_HARDIRQS 258#ifdef CONFIG_GENERIC_HARDIRQS
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3af4ffd591b9..be22ad83689c 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -37,9 +37,9 @@ struct iommu_ops {
37 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 37 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
38 void (*detach_dev)(struct iommu_domain *domain, struct device *dev); 38 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
39 int (*map)(struct iommu_domain *domain, unsigned long iova, 39 int (*map)(struct iommu_domain *domain, unsigned long iova,
40 phys_addr_t paddr, size_t size, int prot); 40 phys_addr_t paddr, int gfp_order, int prot);
41 void (*unmap)(struct iommu_domain *domain, unsigned long iova, 41 int (*unmap)(struct iommu_domain *domain, unsigned long iova,
42 size_t size); 42 int gfp_order);
43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
44 unsigned long iova); 44 unsigned long iova);
45 int (*domain_has_cap)(struct iommu_domain *domain, 45 int (*domain_has_cap)(struct iommu_domain *domain,
@@ -56,10 +56,10 @@ extern int iommu_attach_device(struct iommu_domain *domain,
56 struct device *dev); 56 struct device *dev);
57extern void iommu_detach_device(struct iommu_domain *domain, 57extern void iommu_detach_device(struct iommu_domain *domain,
58 struct device *dev); 58 struct device *dev);
59extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, 59extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
60 phys_addr_t paddr, size_t size, int prot); 60 phys_addr_t paddr, int gfp_order, int prot);
61extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, 61extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
62 size_t size); 62 int gfp_order);
63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
64 unsigned long iova); 64 unsigned long iova);
65extern int iommu_domain_has_cap(struct iommu_domain *domain, 65extern int iommu_domain_has_cap(struct iommu_domain *domain,
@@ -96,16 +96,16 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
96{ 96{
97} 97}
98 98
99static inline int iommu_map_range(struct iommu_domain *domain, 99static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
100 unsigned long iova, phys_addr_t paddr, 100 phys_addr_t paddr, int gfp_order, int prot)
101 size_t size, int prot)
102{ 101{
103 return -ENODEV; 102 return -ENODEV;
104} 103}
105 104
106static inline void iommu_unmap_range(struct iommu_domain *domain, 105static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
107 unsigned long iova, size_t size) 106 int gfp_order)
108{ 107{
108 return -ENODEV;
109} 109}
110 110
111static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 111static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 707ab122e2e6..c03243ad84b4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -195,6 +195,7 @@ struct irq_desc {
195 raw_spinlock_t lock; 195 raw_spinlock_t lock;
196#ifdef CONFIG_SMP 196#ifdef CONFIG_SMP
197 cpumask_var_t affinity; 197 cpumask_var_t affinity;
198 const struct cpumask *affinity_hint;
198 unsigned int node; 199 unsigned int node;
199#ifdef CONFIG_GENERIC_PENDING_IRQ 200#ifdef CONFIG_GENERIC_PENDING_IRQ
200 cpumask_var_t pending_mask; 201 cpumask_var_t pending_mask;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9365227dbaf6..9fb1c1299032 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -490,6 +490,13 @@ static inline void tracing_off(void) { }
490static inline void tracing_off_permanent(void) { } 490static inline void tracing_off_permanent(void) { }
491static inline int tracing_is_on(void) { return 0; } 491static inline int tracing_is_on(void) { return 0; }
492#endif 492#endif
493
494enum ftrace_dump_mode {
495 DUMP_NONE,
496 DUMP_ALL,
497 DUMP_ORIG,
498};
499
493#ifdef CONFIG_TRACING 500#ifdef CONFIG_TRACING
494extern void tracing_start(void); 501extern void tracing_start(void);
495extern void tracing_stop(void); 502extern void tracing_stop(void);
@@ -571,7 +578,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
571extern int 578extern int
572__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); 579__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
573 580
574extern void ftrace_dump(void); 581extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
575#else 582#else
576static inline void 583static inline void
577ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } 584ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
@@ -592,7 +599,7 @@ ftrace_vprintk(const char *fmt, va_list ap)
592{ 599{
593 return 0; 600 return 0;
594} 601}
595static inline void ftrace_dump(void) { } 602static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
596#endif /* CONFIG_TRACING */ 603#endif /* CONFIG_TRACING */
597 604
598/* 605/*
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index ce5983225be4..e1ceaa9b36bb 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -130,7 +130,7 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
130/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 130/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
131#define ktime_to_ns(kt) ((kt).tv64) 131#define ktime_to_ns(kt) ((kt).tv64)
132 132
133#else 133#else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
134 134
135/* 135/*
136 * Helper macros/inlines to get the ktime_t math right in the timespec 136 * Helper macros/inlines to get the ktime_t math right in the timespec
@@ -275,7 +275,7 @@ static inline s64 ktime_to_ns(const ktime_t kt)
275 return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec; 275 return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
276} 276}
277 277
278#endif 278#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
279 279
280/** 280/**
281 * ktime_equal - Compares two ktime_t variables to see if they are equal 281 * ktime_equal - Compares two ktime_t variables to see if they are equal
@@ -295,6 +295,12 @@ static inline s64 ktime_to_us(const ktime_t kt)
295 return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec; 295 return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
296} 296}
297 297
298static inline s64 ktime_to_ms(const ktime_t kt)
299{
300 struct timeval tv = ktime_to_timeval(kt);
301 return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
302}
303
298static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) 304static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
299{ 305{
300 return ktime_to_us(ktime_sub(later, earlier)); 306 return ktime_to_us(ktime_sub(later, earlier));
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 462acaf36f3a..fb19bb92b809 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,7 +19,6 @@ struct anon_vma;
19struct file_ra_state; 19struct file_ra_state;
20struct user_struct; 20struct user_struct;
21struct writeback_control; 21struct writeback_control;
22struct rlimit;
23 22
24#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ 23#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
25extern unsigned long max_mapnr; 24extern unsigned long max_mapnr;
@@ -1449,9 +1448,6 @@ int vmemmap_populate_basepages(struct page *start_page,
1449int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1448int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1450void vmemmap_populate_print_last(void); 1449void vmemmap_populate_print_last(void);
1451 1450
1452extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1453 size_t size);
1454extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1455 1451
1456enum mf_flags { 1452enum mf_flags {
1457 MF_COUNT_INCREASED = 1 << 0, 1453 MF_COUNT_INCREASED = 1 << 0,
diff --git a/include/linux/module.h b/include/linux/module.h
index 515d53ae6a79..6914fcad4673 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -465,8 +465,7 @@ static inline void __module_get(struct module *module)
465 if (module) { 465 if (module) {
466 preempt_disable(); 466 preempt_disable();
467 __this_cpu_inc(module->refptr->incs); 467 __this_cpu_inc(module->refptr->incs);
468 trace_module_get(module, _THIS_IP_, 468 trace_module_get(module, _THIS_IP_);
469 __this_cpu_read(module->refptr->incs));
470 preempt_enable(); 469 preempt_enable();
471 } 470 }
472} 471}
@@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module)
480 479
481 if (likely(module_is_live(module))) { 480 if (likely(module_is_live(module))) {
482 __this_cpu_inc(module->refptr->incs); 481 __this_cpu_inc(module->refptr->incs);
483 trace_module_get(module, _THIS_IP_, 482 trace_module_get(module, _THIS_IP_);
484 __this_cpu_read(module->refptr->incs));
485 } else 483 } else
486 ret = 0; 484 ret = 0;
487 485
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 07ce4609fe50..77c2ae53431c 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -356,6 +356,20 @@ extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struc
356extern u64 nfs_compat_user_ino64(u64 fileid); 356extern u64 nfs_compat_user_ino64(u64 fileid);
357extern void nfs_fattr_init(struct nfs_fattr *fattr); 357extern void nfs_fattr_init(struct nfs_fattr *fattr);
358 358
359extern struct nfs_fattr *nfs_alloc_fattr(void);
360
361static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
362{
363 kfree(fattr);
364}
365
366extern struct nfs_fh *nfs_alloc_fhandle(void);
367
368static inline void nfs_free_fhandle(const struct nfs_fh *fh)
369{
370 kfree(fh);
371}
372
359/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ 373/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
360extern __be32 root_nfs_parse_addr(char *name); /*__init*/ 374extern __be32 root_nfs_parse_addr(char *name); /*__init*/
361extern unsigned long nfs_inc_attr_generation_counter(void); 375extern unsigned long nfs_inc_attr_generation_counter(void);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index e82957acea56..d6e10a4c06e5 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -44,7 +44,6 @@ struct nfs_client {
44 44
45#ifdef CONFIG_NFS_V4 45#ifdef CONFIG_NFS_V4
46 u64 cl_clientid; /* constant */ 46 u64 cl_clientid; /* constant */
47 nfs4_verifier cl_confirm;
48 unsigned long cl_state; 47 unsigned long cl_state;
49 48
50 struct rb_root cl_openowner_id; 49 struct rb_root cl_openowner_id;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 89b28812ec24..51914d7d6cc4 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -386,8 +386,8 @@ struct nfs_removeargs {
386 386
387struct nfs_removeres { 387struct nfs_removeres {
388 const struct nfs_server *server; 388 const struct nfs_server *server;
389 struct nfs_fattr *dir_attr;
389 struct nfs4_change_info cinfo; 390 struct nfs4_change_info cinfo;
390 struct nfs_fattr dir_attr;
391 struct nfs4_sequence_res seq_res; 391 struct nfs4_sequence_res seq_res;
392}; 392};
393 393
@@ -824,6 +824,11 @@ struct nfs4_setclientid {
824 u32 sc_cb_ident; 824 u32 sc_cb_ident;
825}; 825};
826 826
827struct nfs4_setclientid_res {
828 u64 clientid;
829 nfs4_verifier confirm;
830};
831
827struct nfs4_statfs_arg { 832struct nfs4_statfs_arg {
828 const struct nfs_fh * fh; 833 const struct nfs_fh * fh;
829 const u32 * bitmask; 834 const u32 * bitmask;
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index 65e333afaee4..80d55bbc5365 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -40,12 +40,12 @@ struct nfs_fhbase_old {
40 * This is the new flexible, extensible style NFSv2/v3 file handle. 40 * This is the new flexible, extensible style NFSv2/v3 file handle.
41 * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000 41 * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000
42 * 42 *
43 * The file handle is seens as a list of 4byte words. 43 * The file handle starts with a sequence of four-byte words.
44 * The first word contains a version number (1) and four descriptor bytes 44 * The first word contains a version number (1) and three descriptor bytes
45 * that tell how the remaining 3 variable length fields should be handled. 45 * that tell how the remaining 3 variable length fields should be handled.
46 * These three bytes are auth_type, fsid_type and fileid_type. 46 * These three bytes are auth_type, fsid_type and fileid_type.
47 * 47 *
48 * All 4byte values are in host-byte-order. 48 * All four-byte values are in host-byte-order.
49 * 49 *
50 * The auth_type field specifies how the filehandle can be authenticated 50 * The auth_type field specifies how the filehandle can be authenticated
51 * This might allow a file to be confirmed to be in a writable part of a 51 * This might allow a file to be confirmed to be in a writable part of a
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c8e375440403..3fd5c82e0e18 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -203,8 +203,19 @@ struct perf_event_attr {
203 enable_on_exec : 1, /* next exec enables */ 203 enable_on_exec : 1, /* next exec enables */
204 task : 1, /* trace fork/exit */ 204 task : 1, /* trace fork/exit */
205 watermark : 1, /* wakeup_watermark */ 205 watermark : 1, /* wakeup_watermark */
206 206 /*
207 __reserved_1 : 49; 207 * precise_ip:
208 *
209 * 0 - SAMPLE_IP can have arbitrary skid
210 * 1 - SAMPLE_IP must have constant skid
211 * 2 - SAMPLE_IP requested to have 0 skid
212 * 3 - SAMPLE_IP must have 0 skid
213 *
214 * See also PERF_RECORD_MISC_EXACT_IP
215 */
216 precise_ip : 2, /* skid constraint */
217
218 __reserved_1 : 47;
208 219
209 union { 220 union {
210 __u32 wakeup_events; /* wakeup every n events */ 221 __u32 wakeup_events; /* wakeup every n events */
@@ -287,11 +298,24 @@ struct perf_event_mmap_page {
287 __u64 data_tail; /* user-space written tail */ 298 __u64 data_tail; /* user-space written tail */
288}; 299};
289 300
290#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) 301#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
291#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 302#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
292#define PERF_RECORD_MISC_KERNEL (1 << 0) 303#define PERF_RECORD_MISC_KERNEL (1 << 0)
293#define PERF_RECORD_MISC_USER (2 << 0) 304#define PERF_RECORD_MISC_USER (2 << 0)
294#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 305#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
306#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
307#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
308
309/*
310 * Indicates that the content of PERF_SAMPLE_IP points to
311 * the actual instruction that triggered the event. See also
312 * perf_event_attr::precise_ip.
313 */
314#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
315/*
316 * Reserve the last bit to indicate some extended misc field
317 */
318#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
295 319
296struct perf_event_header { 320struct perf_event_header {
297 __u32 type; 321 __u32 type;
@@ -439,6 +463,12 @@ enum perf_callchain_context {
439# include <asm/perf_event.h> 463# include <asm/perf_event.h>
440#endif 464#endif
441 465
466struct perf_guest_info_callbacks {
467 int (*is_in_guest) (void);
468 int (*is_user_mode) (void);
469 unsigned long (*get_guest_ip) (void);
470};
471
442#ifdef CONFIG_HAVE_HW_BREAKPOINT 472#ifdef CONFIG_HAVE_HW_BREAKPOINT
443#include <asm/hw_breakpoint.h> 473#include <asm/hw_breakpoint.h>
444#endif 474#endif
@@ -468,6 +498,17 @@ struct perf_raw_record {
468 void *data; 498 void *data;
469}; 499};
470 500
501struct perf_branch_entry {
502 __u64 from;
503 __u64 to;
504 __u64 flags;
505};
506
507struct perf_branch_stack {
508 __u64 nr;
509 struct perf_branch_entry entries[0];
510};
511
471struct task_struct; 512struct task_struct;
472 513
473/** 514/**
@@ -506,6 +547,8 @@ struct hw_perf_event {
506 547
507struct perf_event; 548struct perf_event;
508 549
550#define PERF_EVENT_TXN_STARTED 1
551
509/** 552/**
510 * struct pmu - generic performance monitoring unit 553 * struct pmu - generic performance monitoring unit
511 */ 554 */
@@ -516,6 +559,16 @@ struct pmu {
516 void (*stop) (struct perf_event *event); 559 void (*stop) (struct perf_event *event);
517 void (*read) (struct perf_event *event); 560 void (*read) (struct perf_event *event);
518 void (*unthrottle) (struct perf_event *event); 561 void (*unthrottle) (struct perf_event *event);
562
563 /*
564 * group events scheduling is treated as a transaction,
565 * add group events as a whole and perform one schedulability test.
566 * If test fails, roll back the whole group
567 */
568
569 void (*start_txn) (const struct pmu *pmu);
570 void (*cancel_txn) (const struct pmu *pmu);
571 int (*commit_txn) (const struct pmu *pmu);
519}; 572};
520 573
521/** 574/**
@@ -571,6 +624,14 @@ enum perf_group_flag {
571 PERF_GROUP_SOFTWARE = 0x1, 624 PERF_GROUP_SOFTWARE = 0x1,
572}; 625};
573 626
627#define SWEVENT_HLIST_BITS 8
628#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
629
630struct swevent_hlist {
631 struct hlist_head heads[SWEVENT_HLIST_SIZE];
632 struct rcu_head rcu_head;
633};
634
574/** 635/**
575 * struct perf_event - performance event kernel representation: 636 * struct perf_event - performance event kernel representation:
576 */ 637 */
@@ -579,6 +640,7 @@ struct perf_event {
579 struct list_head group_entry; 640 struct list_head group_entry;
580 struct list_head event_entry; 641 struct list_head event_entry;
581 struct list_head sibling_list; 642 struct list_head sibling_list;
643 struct hlist_node hlist_entry;
582 int nr_siblings; 644 int nr_siblings;
583 int group_flags; 645 int group_flags;
584 struct perf_event *group_leader; 646 struct perf_event *group_leader;
@@ -726,6 +788,9 @@ struct perf_cpu_context {
726 int active_oncpu; 788 int active_oncpu;
727 int max_pertask; 789 int max_pertask;
728 int exclusive; 790 int exclusive;
791 struct swevent_hlist *swevent_hlist;
792 struct mutex hlist_mutex;
793 int hlist_refcount;
729 794
730 /* 795 /*
731 * Recursion avoidance: 796 * Recursion avoidance:
@@ -769,9 +834,6 @@ extern void perf_disable(void);
769extern void perf_enable(void); 834extern void perf_enable(void);
770extern int perf_event_task_disable(void); 835extern int perf_event_task_disable(void);
771extern int perf_event_task_enable(void); 836extern int perf_event_task_enable(void);
772extern int hw_perf_group_sched_in(struct perf_event *group_leader,
773 struct perf_cpu_context *cpuctx,
774 struct perf_event_context *ctx);
775extern void perf_event_update_userpage(struct perf_event *event); 837extern void perf_event_update_userpage(struct perf_event *event);
776extern int perf_event_release_kernel(struct perf_event *event); 838extern int perf_event_release_kernel(struct perf_event *event);
777extern struct perf_event * 839extern struct perf_event *
@@ -902,6 +964,10 @@ static inline void perf_event_mmap(struct vm_area_struct *vma)
902 __perf_event_mmap(vma); 964 __perf_event_mmap(vma);
903} 965}
904 966
967extern struct perf_guest_info_callbacks *perf_guest_cbs;
968extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
969extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
970
905extern void perf_event_comm(struct task_struct *tsk); 971extern void perf_event_comm(struct task_struct *tsk);
906extern void perf_event_fork(struct task_struct *tsk); 972extern void perf_event_fork(struct task_struct *tsk);
907 973
@@ -971,6 +1037,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi,
971static inline void 1037static inline void
972perf_bp_event(struct perf_event *event, void *data) { } 1038perf_bp_event(struct perf_event *event, void *data) { }
973 1039
1040static inline int perf_register_guest_info_callbacks
1041(struct perf_guest_info_callbacks *callbacks) { return 0; }
1042static inline int perf_unregister_guest_info_callbacks
1043(struct perf_guest_info_callbacks *callbacks) { return 0; }
1044
974static inline void perf_event_mmap(struct vm_area_struct *vma) { } 1045static inline void perf_event_mmap(struct vm_area_struct *vma) { }
975static inline void perf_event_comm(struct task_struct *tsk) { } 1046static inline void perf_event_comm(struct task_struct *tsk) { }
976static inline void perf_event_fork(struct task_struct *tsk) { } 1047static inline void perf_event_fork(struct task_struct *tsk) { }
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index e1fb60729979..4272521e29e9 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -345,18 +345,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
345#define arch_ptrace_stop(code, info) do { } while (0) 345#define arch_ptrace_stop(code, info) do { } while (0)
346#endif 346#endif
347 347
348#ifndef arch_ptrace_untrace
349/*
350 * Do machine-specific work before untracing child.
351 *
352 * This is called for a normal detach as well as from ptrace_exit()
353 * when the tracing task dies.
354 *
355 * Called with write_lock(&tasklist_lock) held.
356 */
357#define arch_ptrace_untrace(task) do { } while (0)
358#endif
359
360extern int task_current_syscall(struct task_struct *target, long *callno, 348extern int task_current_syscall(struct task_struct *target, long *callno,
361 unsigned long args[6], unsigned int maxargs, 349 unsigned long args[6], unsigned int maxargs,
362 unsigned long *sp, unsigned long *pc); 350 unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 5210a5c60877..fe1872e5b37e 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -110,6 +110,7 @@ struct rb_node
110struct rb_root 110struct rb_root
111{ 111{
112 struct rb_node *rb_node; 112 struct rb_node *rb_node;
113 void (*augment_cb)(struct rb_node *node);
113}; 114};
114 115
115 116
@@ -129,7 +130,9 @@ static inline void rb_set_color(struct rb_node *rb, int color)
129 rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; 130 rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
130} 131}
131 132
132#define RB_ROOT (struct rb_root) { NULL, } 133#define RB_ROOT (struct rb_root) { NULL, NULL, }
134#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x}
135
133#define rb_entry(ptr, type, member) container_of(ptr, type, member) 136#define rb_entry(ptr, type, member) container_of(ptr, type, member)
134 137
135#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) 138#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index db266bbed23f..b653b4aaa8a6 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -56,8 +56,6 @@ struct rcu_head {
56}; 56};
57 57
58/* Exported common interfaces */ 58/* Exported common interfaces */
59extern void synchronize_rcu_bh(void);
60extern void synchronize_sched(void);
61extern void rcu_barrier(void); 59extern void rcu_barrier(void);
62extern void rcu_barrier_bh(void); 60extern void rcu_barrier_bh(void);
63extern void rcu_barrier_sched(void); 61extern void rcu_barrier_sched(void);
@@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page);
66 64
67/* Internal to kernel */ 65/* Internal to kernel */
68extern void rcu_init(void); 66extern void rcu_init(void);
69extern int rcu_scheduler_active;
70extern void rcu_scheduler_starting(void);
71 67
72#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 68#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
73#include <linux/rcutree.h> 69#include <linux/rcutree.h>
@@ -83,6 +79,14 @@ extern void rcu_scheduler_starting(void);
83 (ptr)->next = NULL; (ptr)->func = NULL; \ 79 (ptr)->next = NULL; (ptr)->func = NULL; \
84} while (0) 80} while (0)
85 81
82static inline void init_rcu_head_on_stack(struct rcu_head *head)
83{
84}
85
86static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
87{
88}
89
86#ifdef CONFIG_DEBUG_LOCK_ALLOC 90#ifdef CONFIG_DEBUG_LOCK_ALLOC
87 91
88extern struct lockdep_map rcu_lock_map; 92extern struct lockdep_map rcu_lock_map;
@@ -106,12 +110,13 @@ extern int debug_lockdep_rcu_enabled(void);
106/** 110/**
107 * rcu_read_lock_held - might we be in RCU read-side critical section? 111 * rcu_read_lock_held - might we be in RCU read-side critical section?
108 * 112 *
109 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in 113 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
110 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, 114 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
111 * this assumes we are in an RCU read-side critical section unless it can 115 * this assumes we are in an RCU read-side critical section unless it can
112 * prove otherwise. 116 * prove otherwise.
113 * 117 *
114 * Check rcu_scheduler_active to prevent false positives during boot. 118 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
119 * and while lockdep is disabled.
115 */ 120 */
116static inline int rcu_read_lock_held(void) 121static inline int rcu_read_lock_held(void)
117{ 122{
@@ -129,13 +134,15 @@ extern int rcu_read_lock_bh_held(void);
129/** 134/**
130 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? 135 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
131 * 136 *
132 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an 137 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
133 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, 138 * RCU-sched read-side critical section. In absence of
134 * this assumes we are in an RCU-sched read-side critical section unless it 139 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
135 * can prove otherwise. Note that disabling of preemption (including 140 * critical section unless it can prove otherwise. Note that disabling
136 * disabling irqs) counts as an RCU-sched read-side critical section. 141 * of preemption (including disabling irqs) counts as an RCU-sched
142 * read-side critical section.
137 * 143 *
138 * Check rcu_scheduler_active to prevent false positives during boot. 144 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
145 * and while lockdep is disabled.
139 */ 146 */
140#ifdef CONFIG_PREEMPT 147#ifdef CONFIG_PREEMPT
141static inline int rcu_read_lock_sched_held(void) 148static inline int rcu_read_lock_sched_held(void)
@@ -177,7 +184,7 @@ static inline int rcu_read_lock_bh_held(void)
177#ifdef CONFIG_PREEMPT 184#ifdef CONFIG_PREEMPT
178static inline int rcu_read_lock_sched_held(void) 185static inline int rcu_read_lock_sched_held(void)
179{ 186{
180 return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); 187 return preempt_count() != 0 || irqs_disabled();
181} 188}
182#else /* #ifdef CONFIG_PREEMPT */ 189#else /* #ifdef CONFIG_PREEMPT */
183static inline int rcu_read_lock_sched_held(void) 190static inline int rcu_read_lock_sched_held(void)
@@ -192,6 +199,15 @@ static inline int rcu_read_lock_sched_held(void)
192 199
193extern int rcu_my_thread_group_empty(void); 200extern int rcu_my_thread_group_empty(void);
194 201
202#define __do_rcu_dereference_check(c) \
203 do { \
204 static bool __warned; \
205 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
206 __warned = true; \
207 lockdep_rcu_dereference(__FILE__, __LINE__); \
208 } \
209 } while (0)
210
195/** 211/**
196 * rcu_dereference_check - rcu_dereference with debug checking 212 * rcu_dereference_check - rcu_dereference with debug checking
197 * @p: The pointer to read, prior to dereferencing 213 * @p: The pointer to read, prior to dereferencing
@@ -221,8 +237,7 @@ extern int rcu_my_thread_group_empty(void);
221 */ 237 */
222#define rcu_dereference_check(p, c) \ 238#define rcu_dereference_check(p, c) \
223 ({ \ 239 ({ \
224 if (debug_lockdep_rcu_enabled() && !(c)) \ 240 __do_rcu_dereference_check(c); \
225 lockdep_rcu_dereference(__FILE__, __LINE__); \
226 rcu_dereference_raw(p); \ 241 rcu_dereference_raw(p); \
227 }) 242 })
228 243
@@ -239,8 +254,7 @@ extern int rcu_my_thread_group_empty(void);
239 */ 254 */
240#define rcu_dereference_protected(p, c) \ 255#define rcu_dereference_protected(p, c) \
241 ({ \ 256 ({ \
242 if (debug_lockdep_rcu_enabled() && !(c)) \ 257 __do_rcu_dereference_check(c); \
243 lockdep_rcu_dereference(__FILE__, __LINE__); \
244 (p); \ 258 (p); \
245 }) 259 })
246 260
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index a5195875480a..e2e893144a84 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -29,6 +29,10 @@
29 29
30void rcu_sched_qs(int cpu); 30void rcu_sched_qs(int cpu);
31void rcu_bh_qs(int cpu); 31void rcu_bh_qs(int cpu);
32static inline void rcu_note_context_switch(int cpu)
33{
34 rcu_sched_qs(cpu);
35}
32 36
33#define __rcu_read_lock() preempt_disable() 37#define __rcu_read_lock() preempt_disable()
34#define __rcu_read_unlock() preempt_enable() 38#define __rcu_read_unlock() preempt_enable()
@@ -60,8 +64,6 @@ static inline long rcu_batches_completed_bh(void)
60 return 0; 64 return 0;
61} 65}
62 66
63extern int rcu_expedited_torture_stats(char *page);
64
65static inline void rcu_force_quiescent_state(void) 67static inline void rcu_force_quiescent_state(void)
66{ 68{
67} 69}
@@ -74,7 +76,17 @@ static inline void rcu_sched_force_quiescent_state(void)
74{ 76{
75} 77}
76 78
77#define synchronize_rcu synchronize_sched 79extern void synchronize_sched(void);
80
81static inline void synchronize_rcu(void)
82{
83 synchronize_sched();
84}
85
86static inline void synchronize_rcu_bh(void)
87{
88 synchronize_sched();
89}
78 90
79static inline void synchronize_rcu_expedited(void) 91static inline void synchronize_rcu_expedited(void)
80{ 92{
@@ -114,4 +126,17 @@ static inline int rcu_preempt_depth(void)
114 return 0; 126 return 0;
115} 127}
116 128
129#ifdef CONFIG_DEBUG_LOCK_ALLOC
130
131extern int rcu_scheduler_active __read_mostly;
132extern void rcu_scheduler_starting(void);
133
134#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
135
136static inline void rcu_scheduler_starting(void)
137{
138}
139
140#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
141
117#endif /* __LINUX_RCUTINY_H */ 142#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 42cc3a04779e..c0ed1c056f29 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -34,8 +34,8 @@ struct notifier_block;
34 34
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern void rcu_note_context_switch(int cpu);
37extern int rcu_needs_cpu(int cpu); 38extern int rcu_needs_cpu(int cpu);
38extern int rcu_expedited_torture_stats(char *page);
39 39
40#ifdef CONFIG_TREE_PREEMPT_RCU 40#ifdef CONFIG_TREE_PREEMPT_RCU
41 41
@@ -86,6 +86,8 @@ static inline void __rcu_read_unlock_bh(void)
86 86
87extern void call_rcu_sched(struct rcu_head *head, 87extern void call_rcu_sched(struct rcu_head *head,
88 void (*func)(struct rcu_head *rcu)); 88 void (*func)(struct rcu_head *rcu));
89extern void synchronize_rcu_bh(void);
90extern void synchronize_sched(void);
89extern void synchronize_rcu_expedited(void); 91extern void synchronize_rcu_expedited(void);
90 92
91static inline void synchronize_rcu_bh_expedited(void) 93static inline void synchronize_rcu_bh_expedited(void)
@@ -120,4 +122,7 @@ static inline int rcu_blocking_is_gp(void)
120 return num_online_cpus() == 1; 122 return num_online_cpus() == 1;
121} 123}
122 124
125extern void rcu_scheduler_starting(void);
126extern int rcu_scheduler_active __read_mostly;
127
123#endif /* __LINUX_RCUTREE_H */ 128#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 5fcc31ed5771..25b4f686d918 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -120,12 +120,16 @@ int ring_buffer_write(struct ring_buffer *buffer,
120 unsigned long length, void *data); 120 unsigned long length, void *data);
121 121
122struct ring_buffer_event * 122struct ring_buffer_event *
123ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); 123ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
124 unsigned long *lost_events);
124struct ring_buffer_event * 125struct ring_buffer_event *
125ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); 126ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
127 unsigned long *lost_events);
126 128
127struct ring_buffer_iter * 129struct ring_buffer_iter *
128ring_buffer_read_start(struct ring_buffer *buffer, int cpu); 130ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
131void ring_buffer_read_prepare_sync(void);
132void ring_buffer_read_start(struct ring_buffer_iter *iter);
129void ring_buffer_read_finish(struct ring_buffer_iter *iter); 133void ring_buffer_read_finish(struct ring_buffer_iter *iter);
130 134
131struct ring_buffer_event * 135struct ring_buffer_event *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2b7b81df78b3..b55e988988b5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -99,7 +99,6 @@ struct futex_pi_state;
99struct robust_list_head; 99struct robust_list_head;
100struct bio_list; 100struct bio_list;
101struct fs_struct; 101struct fs_struct;
102struct bts_context;
103struct perf_event_context; 102struct perf_event_context;
104 103
105/* 104/*
@@ -275,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask;
275#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
276extern int select_nohz_load_balancer(int cpu); 275extern int select_nohz_load_balancer(int cpu);
277extern int get_nohz_load_balancer(void); 276extern int get_nohz_load_balancer(void);
277extern int nohz_ratelimit(int cpu);
278#else 278#else
279static inline int select_nohz_load_balancer(int cpu) 279static inline int select_nohz_load_balancer(int cpu)
280{ 280{
281 return 0; 281 return 0;
282} 282}
283
284static inline int nohz_ratelimit(int cpu)
285{
286 return 0;
287}
283#endif 288#endif
284 289
285/* 290/*
@@ -954,6 +959,7 @@ struct sched_domain {
954 char *name; 959 char *name;
955#endif 960#endif
956 961
962 unsigned int span_weight;
957 /* 963 /*
958 * Span of all CPUs in this domain. 964 * Span of all CPUs in this domain.
959 * 965 *
@@ -1026,12 +1032,17 @@ struct sched_domain;
1026#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1032#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1027#define WF_FORK 0x02 /* child wakeup after fork */ 1033#define WF_FORK 0x02 /* child wakeup after fork */
1028 1034
1035#define ENQUEUE_WAKEUP 1
1036#define ENQUEUE_WAKING 2
1037#define ENQUEUE_HEAD 4
1038
1039#define DEQUEUE_SLEEP 1
1040
1029struct sched_class { 1041struct sched_class {
1030 const struct sched_class *next; 1042 const struct sched_class *next;
1031 1043
1032 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, 1044 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1033 bool head); 1045 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1034 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1035 void (*yield_task) (struct rq *rq); 1046 void (*yield_task) (struct rq *rq);
1036 1047
1037 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1048 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
@@ -1040,7 +1051,8 @@ struct sched_class {
1040 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1051 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1041 1052
1042#ifdef CONFIG_SMP 1053#ifdef CONFIG_SMP
1043 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1054 int (*select_task_rq)(struct rq *rq, struct task_struct *p,
1055 int sd_flag, int flags);
1044 1056
1045 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1057 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1046 void (*post_schedule) (struct rq *this_rq); 1058 void (*post_schedule) (struct rq *this_rq);
@@ -1077,36 +1089,8 @@ struct load_weight {
1077 unsigned long weight, inv_weight; 1089 unsigned long weight, inv_weight;
1078}; 1090};
1079 1091
1080/*
1081 * CFS stats for a schedulable entity (task, task-group etc)
1082 *
1083 * Current field usage histogram:
1084 *
1085 * 4 se->block_start
1086 * 4 se->run_node
1087 * 4 se->sleep_start
1088 * 6 se->load.weight
1089 */
1090struct sched_entity {
1091 struct load_weight load; /* for load-balancing */
1092 struct rb_node run_node;
1093 struct list_head group_node;
1094 unsigned int on_rq;
1095
1096 u64 exec_start;
1097 u64 sum_exec_runtime;
1098 u64 vruntime;
1099 u64 prev_sum_exec_runtime;
1100
1101 u64 last_wakeup;
1102 u64 avg_overlap;
1103
1104 u64 nr_migrations;
1105
1106 u64 start_runtime;
1107 u64 avg_wakeup;
1108
1109#ifdef CONFIG_SCHEDSTATS 1092#ifdef CONFIG_SCHEDSTATS
1093struct sched_statistics {
1110 u64 wait_start; 1094 u64 wait_start;
1111 u64 wait_max; 1095 u64 wait_max;
1112 u64 wait_count; 1096 u64 wait_count;
@@ -1138,6 +1122,24 @@ struct sched_entity {
1138 u64 nr_wakeups_affine_attempts; 1122 u64 nr_wakeups_affine_attempts;
1139 u64 nr_wakeups_passive; 1123 u64 nr_wakeups_passive;
1140 u64 nr_wakeups_idle; 1124 u64 nr_wakeups_idle;
1125};
1126#endif
1127
1128struct sched_entity {
1129 struct load_weight load; /* for load-balancing */
1130 struct rb_node run_node;
1131 struct list_head group_node;
1132 unsigned int on_rq;
1133
1134 u64 exec_start;
1135 u64 sum_exec_runtime;
1136 u64 vruntime;
1137 u64 prev_sum_exec_runtime;
1138
1139 u64 nr_migrations;
1140
1141#ifdef CONFIG_SCHEDSTATS
1142 struct sched_statistics statistics;
1141#endif 1143#endif
1142 1144
1143#ifdef CONFIG_FAIR_GROUP_SCHED 1145#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1272,12 +1274,6 @@ struct task_struct {
1272 struct list_head ptraced; 1274 struct list_head ptraced;
1273 struct list_head ptrace_entry; 1275 struct list_head ptrace_entry;
1274 1276
1275 /*
1276 * This is the tracer handle for the ptrace BTS extension.
1277 * This field actually belongs to the ptracer task.
1278 */
1279 struct bts_context *bts;
1280
1281 /* PID/PID hash table linkage. */ 1277 /* PID/PID hash table linkage. */
1282 struct pid_link pids[PIDTYPE_MAX]; 1278 struct pid_link pids[PIDTYPE_MAX];
1283 struct list_head thread_group; 1279 struct list_head thread_group;
@@ -1846,6 +1842,7 @@ extern void sched_clock_idle_sleep_event(void);
1846extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1842extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1847 1843
1848#ifdef CONFIG_HOTPLUG_CPU 1844#ifdef CONFIG_HOTPLUG_CPU
1845extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1849extern void idle_task_exit(void); 1846extern void idle_task_exit(void);
1850#else 1847#else
1851static inline void idle_task_exit(void) {} 1848static inline void idle_task_exit(void) {}
@@ -2122,10 +2119,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2122extern char *get_task_comm(char *to, struct task_struct *tsk); 2119extern char *get_task_comm(char *to, struct task_struct *tsk);
2123 2120
2124#ifdef CONFIG_SMP 2121#ifdef CONFIG_SMP
2125extern void wait_task_context_switch(struct task_struct *p);
2126extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2122extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2127#else 2123#else
2128static inline void wait_task_context_switch(struct task_struct *p) {}
2129static inline unsigned long wait_task_inactive(struct task_struct *p, 2124static inline unsigned long wait_task_inactive(struct task_struct *p,
2130 long match_state) 2125 long match_state)
2131{ 2126{
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 193d4bfe42ff..f5364a1de68b 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -33,8 +33,8 @@ struct plat_sci_port {
33 char *clk; /* clock string */ 33 char *clk; /* clock string */
34 struct device *dma_dev; 34 struct device *dma_dev;
35#ifdef CONFIG_SERIAL_SH_SCI_DMA 35#ifdef CONFIG_SERIAL_SH_SCI_DMA
36 enum sh_dmae_slave_chan_id dma_slave_tx; 36 unsigned int dma_slave_tx;
37 enum sh_dmae_slave_chan_id dma_slave_rx; 37 unsigned int dma_slave_rx;
38#endif 38#endif
39}; 39};
40 40
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
new file mode 100644
index 000000000000..1636d1e2a5f1
--- /dev/null
+++ b/include/linux/sh_clk.h
@@ -0,0 +1,150 @@
1#ifndef __SH_CLOCK_H
2#define __SH_CLOCK_H
3
4#include <linux/list.h>
5#include <linux/seq_file.h>
6#include <linux/cpufreq.h>
7#include <linux/clk.h>
8#include <linux/err.h>
9
10struct clk;
11
12struct clk_ops {
13 void (*init)(struct clk *clk);
14 int (*enable)(struct clk *clk);
15 void (*disable)(struct clk *clk);
16 unsigned long (*recalc)(struct clk *clk);
17 int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id);
18 int (*set_parent)(struct clk *clk, struct clk *parent);
19 long (*round_rate)(struct clk *clk, unsigned long rate);
20};
21
22struct clk {
23 struct list_head node;
24 const char *name;
25 int id;
26
27 struct clk *parent;
28 struct clk_ops *ops;
29
30 struct list_head children;
31 struct list_head sibling; /* node for children */
32
33 int usecount;
34
35 unsigned long rate;
36 unsigned long flags;
37
38 void __iomem *enable_reg;
39 unsigned int enable_bit;
40
41 unsigned long arch_flags;
42 void *priv;
43 struct dentry *dentry;
44 struct cpufreq_frequency_table *freq_table;
45};
46
47#define CLK_ENABLE_ON_INIT (1 << 0)
48
49/* drivers/sh/clk.c */
50unsigned long followparent_recalc(struct clk *);
51void recalculate_root_clocks(void);
52void propagate_rate(struct clk *);
53int clk_reparent(struct clk *child, struct clk *parent);
54int clk_register(struct clk *);
55void clk_unregister(struct clk *);
56void clk_enable_init_clocks(void);
57
58/**
59 * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter
60 * @clk: clock source
61 * @rate: desired clock rate in Hz
62 * @algo_id: algorithm id to be passed down to ops->set_rate
63 *
64 * Returns success (0) or negative errno.
65 */
66int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id);
67
68enum clk_sh_algo_id {
69 NO_CHANGE = 0,
70
71 IUS_N1_N1,
72 IUS_322,
73 IUS_522,
74 IUS_N11,
75
76 SB_N1,
77
78 SB3_N1,
79 SB3_32,
80 SB3_43,
81 SB3_54,
82
83 BP_N1,
84
85 IP_N1,
86};
87
88struct clk_div_mult_table {
89 unsigned int *divisors;
90 unsigned int nr_divisors;
91 unsigned int *multipliers;
92 unsigned int nr_multipliers;
93};
94
95struct cpufreq_frequency_table;
96void clk_rate_table_build(struct clk *clk,
97 struct cpufreq_frequency_table *freq_table,
98 int nr_freqs,
99 struct clk_div_mult_table *src_table,
100 unsigned long *bitmap);
101
102long clk_rate_table_round(struct clk *clk,
103 struct cpufreq_frequency_table *freq_table,
104 unsigned long rate);
105
106int clk_rate_table_find(struct clk *clk,
107 struct cpufreq_frequency_table *freq_table,
108 unsigned long rate);
109
110#define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \
111{ \
112 .parent = _parent, \
113 .enable_reg = (void __iomem *)_enable_reg, \
114 .enable_bit = _enable_bit, \
115 .flags = _flags, \
116}
117
118int sh_clk_mstp32_register(struct clk *clks, int nr);
119
120#define SH_CLK_DIV4(_parent, _reg, _shift, _div_bitmap, _flags) \
121{ \
122 .parent = _parent, \
123 .enable_reg = (void __iomem *)_reg, \
124 .enable_bit = _shift, \
125 .arch_flags = _div_bitmap, \
126 .flags = _flags, \
127}
128
129struct clk_div4_table {
130 struct clk_div_mult_table *div_mult_table;
131 void (*kick)(struct clk *clk);
132};
133
134int sh_clk_div4_register(struct clk *clks, int nr,
135 struct clk_div4_table *table);
136int sh_clk_div4_enable_register(struct clk *clks, int nr,
137 struct clk_div4_table *table);
138int sh_clk_div4_reparent_register(struct clk *clks, int nr,
139 struct clk_div4_table *table);
140
141#define SH_CLK_DIV6(_parent, _reg, _flags) \
142{ \
143 .parent = _parent, \
144 .enable_reg = (void __iomem *)_reg, \
145 .flags = _flags, \
146}
147
148int sh_clk_div6_register(struct clk *clks, int nr);
149
150#endif /* __SH_CLOCK_H */
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
new file mode 100644
index 000000000000..b08cd4efa15c
--- /dev/null
+++ b/include/linux/sh_dma.h
@@ -0,0 +1,102 @@
1/*
2 * Header for the new SH dmaengine driver
3 *
4 * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef SH_DMA_H
11#define SH_DMA_H
12
13#include <linux/list.h>
14#include <linux/dmaengine.h>
15
16/* Used by slave DMA clients to request DMA to/from a specific peripheral */
17struct sh_dmae_slave {
18 unsigned int slave_id; /* Set by the platform */
19 struct device *dma_dev; /* Set by the platform */
20 const struct sh_dmae_slave_config *config; /* Set by the driver */
21};
22
23struct sh_dmae_regs {
24 u32 sar; /* SAR / source address */
25 u32 dar; /* DAR / destination address */
26 u32 tcr; /* TCR / transfer count */
27};
28
29struct sh_desc {
30 struct sh_dmae_regs hw;
31 struct list_head node;
32 struct dma_async_tx_descriptor async_tx;
33 enum dma_data_direction direction;
34 dma_cookie_t cookie;
35 size_t partial;
36 int chunks;
37 int mark;
38};
39
40struct sh_dmae_slave_config {
41 unsigned int slave_id;
42 dma_addr_t addr;
43 u32 chcr;
44 char mid_rid;
45};
46
47struct sh_dmae_channel {
48 unsigned int offset;
49 unsigned int dmars;
50 unsigned int dmars_bit;
51};
52
53struct sh_dmae_pdata {
54 const struct sh_dmae_slave_config *slave;
55 int slave_num;
56 const struct sh_dmae_channel *channel;
57 int channel_num;
58 unsigned int ts_low_shift;
59 unsigned int ts_low_mask;
60 unsigned int ts_high_shift;
61 unsigned int ts_high_mask;
62 const unsigned int *ts_shift;
63 int ts_shift_num;
64 u16 dmaor_init;
65};
66
67/* DMA register */
68#define SAR 0x00
69#define DAR 0x04
70#define TCR 0x08
71#define CHCR 0x0C
72#define DMAOR 0x40
73
74/* DMAOR definitions */
75#define DMAOR_AE 0x00000004
76#define DMAOR_NMIF 0x00000002
77#define DMAOR_DME 0x00000001
78
79/* Definitions for the SuperH DMAC */
80#define REQ_L 0x00000000
81#define REQ_E 0x00080000
82#define RACK_H 0x00000000
83#define RACK_L 0x00040000
84#define ACK_R 0x00000000
85#define ACK_W 0x00020000
86#define ACK_H 0x00000000
87#define ACK_L 0x00010000
88#define DM_INC 0x00004000
89#define DM_DEC 0x00008000
90#define DM_FIX 0x0000c000
91#define SM_INC 0x00001000
92#define SM_DEC 0x00002000
93#define SM_FIX 0x00003000
94#define RS_IN 0x00000200
95#define RS_OUT 0x00000300
96#define TS_BLK 0x00000040
97#define TM_BUR 0x00000020
98#define CHCR_DE 0x00000001
99#define CHCR_TE 0x00000002
100#define CHCR_IE 0x00000004
101
102#endif
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 51d288d8ac88..0d6cd38e673d 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -1,6 +1,8 @@
1#ifndef __SH_INTC_H 1#ifndef __SH_INTC_H
2#define __SH_INTC_H 2#define __SH_INTC_H
3 3
4#include <linux/ioport.h>
5
4typedef unsigned char intc_enum; 6typedef unsigned char intc_enum;
5 7
6struct intc_vect { 8struct intc_vect {
@@ -21,6 +23,9 @@ struct intc_group {
21struct intc_mask_reg { 23struct intc_mask_reg {
22 unsigned long set_reg, clr_reg, reg_width; 24 unsigned long set_reg, clr_reg, reg_width;
23 intc_enum enum_ids[32]; 25 intc_enum enum_ids[32];
26#ifdef CONFIG_INTC_BALANCING
27 unsigned long dist_reg;
28#endif
24#ifdef CONFIG_SMP 29#ifdef CONFIG_SMP
25 unsigned long smp; 30 unsigned long smp;
26#endif 31#endif
@@ -39,8 +44,14 @@ struct intc_sense_reg {
39 intc_enum enum_ids[16]; 44 intc_enum enum_ids[16];
40}; 45};
41 46
47#ifdef CONFIG_INTC_BALANCING
48#define INTC_SMP_BALANCING(reg) .dist_reg = (reg)
49#else
50#define INTC_SMP_BALANCING(reg)
51#endif
52
42#ifdef CONFIG_SMP 53#ifdef CONFIG_SMP
43#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8) 54#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8)
44#else 55#else
45#define INTC_SMP(stride, nr) 56#define INTC_SMP(stride, nr)
46#endif 57#endif
@@ -71,6 +82,8 @@ struct intc_hw_desc {
71 82
72struct intc_desc { 83struct intc_desc {
73 char *name; 84 char *name;
85 struct resource *resource;
86 unsigned int num_resources;
74 intc_enum force_enable; 87 intc_enum force_enable;
75 intc_enum force_disable; 88 intc_enum force_disable;
76 struct intc_hw_desc hw; 89 struct intc_hw_desc hw;
@@ -92,9 +105,18 @@ struct intc_desc symbol __initdata = { \
92 prio_regs, sense_regs, ack_regs), \ 105 prio_regs, sense_regs, ack_regs), \
93} 106}
94 107
95void __init register_intc_controller(struct intc_desc *desc); 108int __init register_intc_controller(struct intc_desc *desc);
96int intc_set_priority(unsigned int irq, unsigned int prio); 109int intc_set_priority(unsigned int irq, unsigned int prio);
97 110
111#ifdef CONFIG_INTC_USERIMASK
112int register_intc_userimask(unsigned long addr);
113#else
114static inline int register_intc_userimask(unsigned long addr)
115{
116 return 0;
117}
118#endif
119
98int reserve_irq_vector(unsigned int irq); 120int reserve_irq_vector(unsigned int irq);
99void reserve_irq_legacy(void); 121void reserve_irq_legacy(void);
100 122
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 4d5ecb222af9..4d5d2f546dbf 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -27,6 +27,8 @@
27#ifndef _LINUX_SRCU_H 27#ifndef _LINUX_SRCU_H
28#define _LINUX_SRCU_H 28#define _LINUX_SRCU_H
29 29
30#include <linux/mutex.h>
31
30struct srcu_struct_array { 32struct srcu_struct_array {
31 int c[2]; 33 int c[2];
32}; 34};
@@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp);
84/** 86/**
85 * srcu_read_lock_held - might we be in SRCU read-side critical section? 87 * srcu_read_lock_held - might we be in SRCU read-side critical section?
86 * 88 *
87 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in 89 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
88 * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, 90 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
89 * this assumes we are in an SRCU read-side critical section unless it can 91 * this assumes we are in an SRCU read-side critical section unless it can
90 * prove otherwise. 92 * prove otherwise.
91 */ 93 */
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index baba3a23a814..6b524a0d02e4 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -1,13 +1,101 @@
1#ifndef _LINUX_STOP_MACHINE 1#ifndef _LINUX_STOP_MACHINE
2#define _LINUX_STOP_MACHINE 2#define _LINUX_STOP_MACHINE
3/* "Bogolock": stop the entire machine, disable interrupts. This is a 3
4 very heavy lock, which is equivalent to grabbing every spinlock
5 (and more). So the "read" side to such a lock is anything which
6 disables preeempt. */
7#include <linux/cpu.h> 4#include <linux/cpu.h>
8#include <linux/cpumask.h> 5#include <linux/cpumask.h>
6#include <linux/list.h>
9#include <asm/system.h> 7#include <asm/system.h>
10 8
9/*
10 * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
11 * monopolization mechanism. The caller can specify a non-sleeping
12 * function to be executed on a single or multiple cpus preempting all
13 * other processes and monopolizing those cpus until it finishes.
14 *
15 * Resources for this mechanism are preallocated when a cpu is brought
16 * up and requests are guaranteed to be served as long as the target
17 * cpus are online.
18 */
19typedef int (*cpu_stop_fn_t)(void *arg);
20
21#ifdef CONFIG_SMP
22
23struct cpu_stop_work {
24 struct list_head list; /* cpu_stopper->works */
25 cpu_stop_fn_t fn;
26 void *arg;
27 struct cpu_stop_done *done;
28};
29
30int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
31void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
32 struct cpu_stop_work *work_buf);
33int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
34int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
35
36#else /* CONFIG_SMP */
37
38#include <linux/workqueue.h>
39
40struct cpu_stop_work {
41 struct work_struct work;
42 cpu_stop_fn_t fn;
43 void *arg;
44};
45
46static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
47{
48 int ret = -ENOENT;
49 preempt_disable();
50 if (cpu == smp_processor_id())
51 ret = fn(arg);
52 preempt_enable();
53 return ret;
54}
55
56static void stop_one_cpu_nowait_workfn(struct work_struct *work)
57{
58 struct cpu_stop_work *stwork =
59 container_of(work, struct cpu_stop_work, work);
60 preempt_disable();
61 stwork->fn(stwork->arg);
62 preempt_enable();
63}
64
65static inline void stop_one_cpu_nowait(unsigned int cpu,
66 cpu_stop_fn_t fn, void *arg,
67 struct cpu_stop_work *work_buf)
68{
69 if (cpu == smp_processor_id()) {
70 INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
71 work_buf->fn = fn;
72 work_buf->arg = arg;
73 schedule_work(&work_buf->work);
74 }
75}
76
77static inline int stop_cpus(const struct cpumask *cpumask,
78 cpu_stop_fn_t fn, void *arg)
79{
80 if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
81 return stop_one_cpu(raw_smp_processor_id(), fn, arg);
82 return -ENOENT;
83}
84
85static inline int try_stop_cpus(const struct cpumask *cpumask,
86 cpu_stop_fn_t fn, void *arg)
87{
88 return stop_cpus(cpumask, fn, arg);
89}
90
91#endif /* CONFIG_SMP */
92
93/*
94 * stop_machine "Bogolock": stop the entire machine, disable
95 * interrupts. This is a very heavy lock, which is equivalent to
96 * grabbing every spinlock (and more). So the "read" side to such a
97 * lock is anything which disables preeempt.
98 */
11#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) 99#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
12 100
13/** 101/**
@@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
36 */ 124 */
37int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 125int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
38 126
39/** 127#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
40 * stop_machine_create: create all stop_machine threads
41 *
42 * Description: This causes all stop_machine threads to be created before
43 * stop_machine actually gets called. This can be used by subsystems that
44 * need a non failing stop_machine infrastructure.
45 */
46int stop_machine_create(void);
47
48/**
49 * stop_machine_destroy: destroy all stop_machine threads
50 *
51 * Description: This causes all stop_machine threads which were created with
52 * stop_machine_create to be destroyed again.
53 */
54void stop_machine_destroy(void);
55
56#else
57 128
58static inline int stop_machine(int (*fn)(void *), void *data, 129static inline int stop_machine(int (*fn)(void *), void *data,
59 const struct cpumask *cpus) 130 const struct cpumask *cpus)
@@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data,
65 return ret; 136 return ret;
66} 137}
67 138
68static inline int stop_machine_create(void) { return 0; } 139#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
69static inline void stop_machine_destroy(void) { } 140#endif /* _LINUX_STOP_MACHINE */
70
71#endif /* CONFIG_SMP */
72#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 996df4dac7d4..87d7ec0bf779 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -54,6 +54,7 @@ struct rpc_cred {
54#define RPCAUTH_CRED_NEW 0 54#define RPCAUTH_CRED_NEW 0
55#define RPCAUTH_CRED_UPTODATE 1 55#define RPCAUTH_CRED_UPTODATE 1
56#define RPCAUTH_CRED_HASHED 2 56#define RPCAUTH_CRED_HASHED 2
57#define RPCAUTH_CRED_NEGATIVE 3
57 58
58#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 59#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0
59 60
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index d48d4e605f74..671538d25bc1 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -82,6 +82,7 @@ struct gss_cred {
82 enum rpc_gss_svc gc_service; 82 enum rpc_gss_svc gc_service;
83 struct gss_cl_ctx *gc_ctx; 83 struct gss_cl_ctx *gc_ctx;
84 struct gss_upcall_msg *gc_upcall; 84 struct gss_upcall_msg *gc_upcall;
85 unsigned long gc_upcall_timestamp;
85 unsigned char gc_machine_cred : 1; 86 unsigned char gc_machine_cred : 1;
86}; 87};
87 88
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 03f33330ece2..5d8048beb051 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -35,7 +35,8 @@ int gss_import_sec_context(
35 const void* input_token, 35 const void* input_token,
36 size_t bufsize, 36 size_t bufsize,
37 struct gss_api_mech *mech, 37 struct gss_api_mech *mech,
38 struct gss_ctx **ctx_id); 38 struct gss_ctx **ctx_id,
39 gfp_t gfp_mask);
39u32 gss_get_mic( 40u32 gss_get_mic(
40 struct gss_ctx *ctx_id, 41 struct gss_ctx *ctx_id,
41 struct xdr_buf *message, 42 struct xdr_buf *message,
@@ -80,6 +81,8 @@ struct gss_api_mech {
80 /* pseudoflavors supported by this mechanism: */ 81 /* pseudoflavors supported by this mechanism: */
81 int gm_pf_num; 82 int gm_pf_num;
82 struct pf_desc * gm_pfs; 83 struct pf_desc * gm_pfs;
84 /* Should the following be a callback operation instead? */
85 const char *gm_upcall_enctypes;
83}; 86};
84 87
85/* and must provide the following operations: */ 88/* and must provide the following operations: */
@@ -87,7 +90,8 @@ struct gss_api_ops {
87 int (*gss_import_sec_context)( 90 int (*gss_import_sec_context)(
88 const void *input_token, 91 const void *input_token,
89 size_t bufsize, 92 size_t bufsize,
90 struct gss_ctx *ctx_id); 93 struct gss_ctx *ctx_id,
94 gfp_t gfp_mask);
91 u32 (*gss_get_mic)( 95 u32 (*gss_get_mic)(
92 struct gss_ctx *ctx_id, 96 struct gss_ctx *ctx_id,
93 struct xdr_buf *message, 97 struct xdr_buf *message,
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index e7bbdba474d5..5af2931cf58d 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -4,7 +4,7 @@
4 * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h, 4 * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
5 * lib/gssapi/krb5/gssapiP_krb5.h, and others 5 * lib/gssapi/krb5/gssapiP_krb5.h, and others
6 * 6 *
7 * Copyright (c) 2000 The Regents of the University of Michigan. 7 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
8 * All rights reserved. 8 * All rights reserved.
9 * 9 *
10 * Andy Adamson <andros@umich.edu> 10 * Andy Adamson <andros@umich.edu>
@@ -36,17 +36,86 @@
36 * 36 *
37 */ 37 */
38 38
39#include <linux/crypto.h>
39#include <linux/sunrpc/auth_gss.h> 40#include <linux/sunrpc/auth_gss.h>
40#include <linux/sunrpc/gss_err.h> 41#include <linux/sunrpc/gss_err.h>
41#include <linux/sunrpc/gss_asn1.h> 42#include <linux/sunrpc/gss_asn1.h>
42 43
44/* Length of constant used in key derivation */
45#define GSS_KRB5_K5CLENGTH (5)
46
47/* Maximum key length (in bytes) for the supported crypto algorithms*/
48#define GSS_KRB5_MAX_KEYLEN (32)
49
50/* Maximum checksum function output for the supported crypto algorithms */
51#define GSS_KRB5_MAX_CKSUM_LEN (20)
52
53/* Maximum blocksize for the supported crypto algorithms */
54#define GSS_KRB5_MAX_BLOCKSIZE (16)
55
56struct krb5_ctx;
57
58struct gss_krb5_enctype {
59 const u32 etype; /* encryption (key) type */
60 const u32 ctype; /* checksum type */
61 const char *name; /* "friendly" name */
62 const char *encrypt_name; /* crypto encrypt name */
63 const char *cksum_name; /* crypto checksum name */
64 const u16 signalg; /* signing algorithm */
65 const u16 sealalg; /* sealing algorithm */
66 const u32 blocksize; /* encryption blocksize */
67 const u32 conflen; /* confounder length
68 (normally the same as
69 the blocksize) */
70 const u32 cksumlength; /* checksum length */
71 const u32 keyed_cksum; /* is it a keyed cksum? */
72 const u32 keybytes; /* raw key len, in bytes */
73 const u32 keylength; /* final key len, in bytes */
74 u32 (*encrypt) (struct crypto_blkcipher *tfm,
75 void *iv, void *in, void *out,
76 int length); /* encryption function */
77 u32 (*decrypt) (struct crypto_blkcipher *tfm,
78 void *iv, void *in, void *out,
79 int length); /* decryption function */
80 u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
81 struct xdr_netobj *in,
82 struct xdr_netobj *out); /* complete key generation */
83 u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
84 struct xdr_buf *buf, int ec,
85 struct page **pages); /* v2 encryption function */
86 u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
87 struct xdr_buf *buf, u32 *headskip,
88 u32 *tailskip); /* v2 decryption function */
89};
90
91/* krb5_ctx flags definitions */
92#define KRB5_CTX_FLAG_INITIATOR 0x00000001
93#define KRB5_CTX_FLAG_CFX 0x00000002
94#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
95
43struct krb5_ctx { 96struct krb5_ctx {
44 int initiate; /* 1 = initiating, 0 = accepting */ 97 int initiate; /* 1 = initiating, 0 = accepting */
98 u32 enctype;
99 u32 flags;
100 const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
45 struct crypto_blkcipher *enc; 101 struct crypto_blkcipher *enc;
46 struct crypto_blkcipher *seq; 102 struct crypto_blkcipher *seq;
103 struct crypto_blkcipher *acceptor_enc;
104 struct crypto_blkcipher *initiator_enc;
105 struct crypto_blkcipher *acceptor_enc_aux;
106 struct crypto_blkcipher *initiator_enc_aux;
107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
108 u8 cksum[GSS_KRB5_MAX_KEYLEN];
47 s32 endtime; 109 s32 endtime;
48 u32 seq_send; 110 u32 seq_send;
111 u64 seq_send64;
49 struct xdr_netobj mech_used; 112 struct xdr_netobj mech_used;
113 u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
114 u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
115 u8 initiator_seal[GSS_KRB5_MAX_KEYLEN];
116 u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN];
117 u8 initiator_integ[GSS_KRB5_MAX_KEYLEN];
118 u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
50}; 119};
51 120
52extern spinlock_t krb5_seq_lock; 121extern spinlock_t krb5_seq_lock;
@@ -57,6 +126,18 @@ extern spinlock_t krb5_seq_lock;
57#define KG_TOK_MIC_MSG 0x0101 126#define KG_TOK_MIC_MSG 0x0101
58#define KG_TOK_WRAP_MSG 0x0201 127#define KG_TOK_WRAP_MSG 0x0201
59 128
129#define KG2_TOK_INITIAL 0x0101
130#define KG2_TOK_RESPONSE 0x0202
131#define KG2_TOK_MIC 0x0404
132#define KG2_TOK_WRAP 0x0504
133
134#define KG2_TOKEN_FLAG_SENTBYACCEPTOR 0x01
135#define KG2_TOKEN_FLAG_SEALED 0x02
136#define KG2_TOKEN_FLAG_ACCEPTORSUBKEY 0x04
137
138#define KG2_RESP_FLAG_ERROR 0x0001
139#define KG2_RESP_FLAG_DELEG_OK 0x0002
140
60enum sgn_alg { 141enum sgn_alg {
61 SGN_ALG_DES_MAC_MD5 = 0x0000, 142 SGN_ALG_DES_MAC_MD5 = 0x0000,
62 SGN_ALG_MD2_5 = 0x0001, 143 SGN_ALG_MD2_5 = 0x0001,
@@ -81,6 +162,9 @@ enum seal_alg {
81#define CKSUMTYPE_RSA_MD5_DES 0x0008 162#define CKSUMTYPE_RSA_MD5_DES 0x0008
82#define CKSUMTYPE_NIST_SHA 0x0009 163#define CKSUMTYPE_NIST_SHA 0x0009
83#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c 164#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
165#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
166#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
167#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
84 168
85/* from gssapi_err_krb5.h */ 169/* from gssapi_err_krb5.h */
86#define KG_CCACHE_NOMATCH (39756032L) 170#define KG_CCACHE_NOMATCH (39756032L)
@@ -111,11 +195,56 @@ enum seal_alg {
111#define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */ 195#define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */
112#define ENCTYPE_DES_HMAC_SHA1 0x0008 196#define ENCTYPE_DES_HMAC_SHA1 0x0008
113#define ENCTYPE_DES3_CBC_SHA1 0x0010 197#define ENCTYPE_DES3_CBC_SHA1 0x0010
198#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
199#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
200#define ENCTYPE_ARCFOUR_HMAC 0x0017
201#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
114#define ENCTYPE_UNKNOWN 0x01ff 202#define ENCTYPE_UNKNOWN 0x01ff
115 203
116s32 204/*
117make_checksum(char *, char *header, int hdrlen, struct xdr_buf *body, 205 * Constants used for key derivation
118 int body_offset, struct xdr_netobj *cksum); 206 */
207/* for 3DES */
208#define KG_USAGE_SEAL (22)
209#define KG_USAGE_SIGN (23)
210#define KG_USAGE_SEQ (24)
211
212/* from rfc3961 */
213#define KEY_USAGE_SEED_CHECKSUM (0x99)
214#define KEY_USAGE_SEED_ENCRYPTION (0xAA)
215#define KEY_USAGE_SEED_INTEGRITY (0x55)
216
217/* from rfc4121 */
218#define KG_USAGE_ACCEPTOR_SEAL (22)
219#define KG_USAGE_ACCEPTOR_SIGN (23)
220#define KG_USAGE_INITIATOR_SEAL (24)
221#define KG_USAGE_INITIATOR_SIGN (25)
222
223/*
224 * This compile-time check verifies that we will not exceed the
225 * slack space allotted by the client and server auth_gss code
226 * before they call gss_wrap().
227 */
228#define GSS_KRB5_MAX_SLACK_NEEDED \
229 (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
230 + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
231 + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
232 + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
233 + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\
234 + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
235 + 4 + 4 /* RPC verifier */ \
236 + GSS_KRB5_TOK_HDR_LEN \
237 + GSS_KRB5_MAX_CKSUM_LEN)
238
239u32
240make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
241 struct xdr_buf *body, int body_offset, u8 *cksumkey,
242 unsigned int usage, struct xdr_netobj *cksumout);
243
244u32
245make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen,
246 struct xdr_buf *body, int body_offset, u8 *key,
247 unsigned int usage, struct xdr_netobj *cksum);
119 248
120u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *, 249u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
121 struct xdr_netobj *); 250 struct xdr_netobj *);
@@ -149,11 +278,54 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
149 int offset); 278 int offset);
150 279
151s32 280s32
152krb5_make_seq_num(struct crypto_blkcipher *key, 281krb5_make_seq_num(struct krb5_ctx *kctx,
282 struct crypto_blkcipher *key,
153 int direction, 283 int direction,
154 u32 seqnum, unsigned char *cksum, unsigned char *buf); 284 u32 seqnum, unsigned char *cksum, unsigned char *buf);
155 285
156s32 286s32
157krb5_get_seq_num(struct crypto_blkcipher *key, 287krb5_get_seq_num(struct krb5_ctx *kctx,
158 unsigned char *cksum, 288 unsigned char *cksum,
159 unsigned char *buf, int *direction, u32 *seqnum); 289 unsigned char *buf, int *direction, u32 *seqnum);
290
291int
292xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen);
293
294u32
295krb5_derive_key(const struct gss_krb5_enctype *gk5e,
296 const struct xdr_netobj *inkey,
297 struct xdr_netobj *outkey,
298 const struct xdr_netobj *in_constant,
299 gfp_t gfp_mask);
300
301u32
302gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
303 struct xdr_netobj *randombits,
304 struct xdr_netobj *key);
305
306u32
307gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
308 struct xdr_netobj *randombits,
309 struct xdr_netobj *key);
310
311u32
312gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
313 struct xdr_buf *buf, int ec,
314 struct page **pages);
315
316u32
317gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
318 struct xdr_buf *buf, u32 *plainoffset,
319 u32 *plainlen);
320
321int
322krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
323 struct crypto_blkcipher *cipher,
324 unsigned char *cksum);
325
326int
327krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
328 struct crypto_blkcipher *cipher,
329 s32 seqnum);
330void
331gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 77f78e56c481..b6edbc0ea83d 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -26,6 +26,7 @@
26#define _LINUX_SUNRPC_METRICS_H 26#define _LINUX_SUNRPC_METRICS_H
27 27
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/ktime.h>
29 30
30#define RPC_IOSTATS_VERS "1.0" 31#define RPC_IOSTATS_VERS "1.0"
31 32
@@ -58,9 +59,9 @@ struct rpc_iostats {
58 * and the total time the request spent from init to release 59 * and the total time the request spent from init to release
59 * are measured. 60 * are measured.
60 */ 61 */
61 unsigned long long om_queue, /* jiffies queued for xmit */ 62 ktime_t om_queue, /* queued for xmit */
62 om_rtt, /* jiffies for RPC RTT */ 63 om_rtt, /* RPC RTT */
63 om_execute; /* jiffies for RPC execution */ 64 om_execute; /* RPC execution */
64} ____cacheline_aligned; 65} ____cacheline_aligned;
65 66
66struct rpc_task; 67struct rpc_task;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 7bc7fd5291ce..7be4f3a6d246 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -10,6 +10,7 @@
10#define _LINUX_SUNRPC_SCHED_H_ 10#define _LINUX_SUNRPC_SCHED_H_
11 11
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/ktime.h>
13#include <linux/sunrpc/types.h> 14#include <linux/sunrpc/types.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
15#include <linux/wait.h> 16#include <linux/wait.h>
@@ -40,21 +41,15 @@ struct rpc_wait {
40 * This is the RPC task struct 41 * This is the RPC task struct
41 */ 42 */
42struct rpc_task { 43struct rpc_task {
43#ifdef RPC_DEBUG
44 unsigned long tk_magic; /* 0xf00baa */
45#endif
46 atomic_t tk_count; /* Reference count */ 44 atomic_t tk_count; /* Reference count */
47 struct list_head tk_task; /* global list of tasks */ 45 struct list_head tk_task; /* global list of tasks */
48 struct rpc_clnt * tk_client; /* RPC client */ 46 struct rpc_clnt * tk_client; /* RPC client */
49 struct rpc_rqst * tk_rqstp; /* RPC request */ 47 struct rpc_rqst * tk_rqstp; /* RPC request */
50 int tk_status; /* result of last operation */
51 48
52 /* 49 /*
53 * RPC call state 50 * RPC call state
54 */ 51 */
55 struct rpc_message tk_msg; /* RPC call info */ 52 struct rpc_message tk_msg; /* RPC call info */
56 __u8 tk_garb_retry;
57 __u8 tk_cred_retry;
58 53
59 /* 54 /*
60 * callback to be executed after waking up 55 * callback to be executed after waking up
@@ -67,7 +62,6 @@ struct rpc_task {
67 void * tk_calldata; 62 void * tk_calldata;
68 63
69 unsigned long tk_timeout; /* timeout for rpc_sleep() */ 64 unsigned long tk_timeout; /* timeout for rpc_sleep() */
70 unsigned short tk_flags; /* misc flags */
71 unsigned long tk_runstate; /* Task run status */ 65 unsigned long tk_runstate; /* Task run status */
72 struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could 66 struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
73 * be any workqueue 67 * be any workqueue
@@ -78,17 +72,19 @@ struct rpc_task {
78 struct rpc_wait tk_wait; /* RPC wait */ 72 struct rpc_wait tk_wait; /* RPC wait */
79 } u; 73 } u;
80 74
81 unsigned short tk_timeouts; /* maj timeouts */ 75 ktime_t tk_start; /* RPC task init timestamp */
82 size_t tk_bytes_sent; /* total bytes sent */
83 unsigned long tk_start; /* RPC task init timestamp */
84 long tk_rtt; /* round-trip time (jiffies) */
85 76
86 pid_t tk_owner; /* Process id for batching tasks */ 77 pid_t tk_owner; /* Process id for batching tasks */
87 unsigned char tk_priority : 2;/* Task priority */ 78 int tk_status; /* result of last operation */
79 unsigned short tk_flags; /* misc flags */
80 unsigned short tk_timeouts; /* maj timeouts */
88 81
89#ifdef RPC_DEBUG 82#ifdef RPC_DEBUG
90 unsigned short tk_pid; /* debugging aid */ 83 unsigned short tk_pid; /* debugging aid */
91#endif 84#endif
85 unsigned char tk_priority : 2,/* Task priority */
86 tk_garb_retry : 2,
87 tk_cred_retry : 2;
92}; 88};
93#define tk_xprt tk_client->cl_xprt 89#define tk_xprt tk_client->cl_xprt
94 90
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index f5cc0898bc53..35cf2e8cd7c6 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -1,7 +1,10 @@
1/* 1/*
2 * include/linux/sunrpc/xdr.h 2 * XDR standard data types and function declarations
3 * 3 *
4 * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de> 4 * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
5 *
6 * Based on:
7 * RFC 4506 "XDR: External Data Representation Standard", May 2006
5 */ 8 */
6 9
7#ifndef _SUNRPC_XDR_H_ 10#ifndef _SUNRPC_XDR_H_
@@ -62,7 +65,6 @@ struct xdr_buf {
62 65
63 unsigned int buflen, /* Total length of storage buffer */ 66 unsigned int buflen, /* Total length of storage buffer */
64 len; /* Length of XDR encoded message */ 67 len; /* Length of XDR encoded message */
65
66}; 68};
67 69
68/* 70/*
@@ -178,7 +180,7 @@ struct xdr_array2_desc {
178}; 180};
179 181
180extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 182extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
181 struct xdr_array2_desc *desc); 183 struct xdr_array2_desc *desc);
182extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 184extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
183 struct xdr_array2_desc *desc); 185 struct xdr_array2_desc *desc);
184 186
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 6f9457a75b8f..b51470302399 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -13,6 +13,7 @@
13#include <linux/socket.h> 13#include <linux/socket.h>
14#include <linux/in.h> 14#include <linux/in.h>
15#include <linux/kref.h> 15#include <linux/kref.h>
16#include <linux/ktime.h>
16#include <linux/sunrpc/sched.h> 17#include <linux/sunrpc/sched.h>
17#include <linux/sunrpc/xdr.h> 18#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/msg_prot.h> 19#include <linux/sunrpc/msg_prot.h>
@@ -65,8 +66,6 @@ struct rpc_rqst {
65 struct rpc_task * rq_task; /* RPC task data */ 66 struct rpc_task * rq_task; /* RPC task data */
66 __be32 rq_xid; /* request XID */ 67 __be32 rq_xid; /* request XID */
67 int rq_cong; /* has incremented xprt->cong */ 68 int rq_cong; /* has incremented xprt->cong */
68 int rq_reply_bytes_recvd; /* number of reply */
69 /* bytes received */
70 u32 rq_seqno; /* gss seq no. used on req. */ 69 u32 rq_seqno; /* gss seq no. used on req. */
71 int rq_enc_pages_num; 70 int rq_enc_pages_num;
72 struct page **rq_enc_pages; /* scratch pages for use by 71 struct page **rq_enc_pages; /* scratch pages for use by
@@ -77,12 +76,16 @@ struct rpc_rqst {
77 __u32 * rq_buffer; /* XDR encode buffer */ 76 __u32 * rq_buffer; /* XDR encode buffer */
78 size_t rq_callsize, 77 size_t rq_callsize,
79 rq_rcvsize; 78 rq_rcvsize;
79 size_t rq_xmit_bytes_sent; /* total bytes sent */
80 size_t rq_reply_bytes_recvd; /* total reply bytes */
81 /* received */
80 82
81 struct xdr_buf rq_private_buf; /* The receive buffer 83 struct xdr_buf rq_private_buf; /* The receive buffer
82 * used in the softirq. 84 * used in the softirq.
83 */ 85 */
84 unsigned long rq_majortimeo; /* major timeout alarm */ 86 unsigned long rq_majortimeo; /* major timeout alarm */
85 unsigned long rq_timeout; /* Current timeout value */ 87 unsigned long rq_timeout; /* Current timeout value */
88 ktime_t rq_rtt; /* round-trip time */
86 unsigned int rq_retries; /* # of retries */ 89 unsigned int rq_retries; /* # of retries */
87 unsigned int rq_connect_cookie; 90 unsigned int rq_connect_cookie;
88 /* A cookie used to track the 91 /* A cookie used to track the
@@ -94,7 +97,7 @@ struct rpc_rqst {
94 */ 97 */
95 u32 rq_bytes_sent; /* Bytes we have sent */ 98 u32 rq_bytes_sent; /* Bytes we have sent */
96 99
97 unsigned long rq_xtime; /* when transmitted */ 100 ktime_t rq_xtime; /* transmit time stamp */
98 int rq_ntrans; 101 int rq_ntrans;
99 102
100#if defined(CONFIG_NFS_V4_1) 103#if defined(CONFIG_NFS_V4_1)
@@ -174,8 +177,7 @@ struct rpc_xprt {
174 /* 177 /*
175 * Connection of transports 178 * Connection of transports
176 */ 179 */
177 unsigned long connect_timeout, 180 unsigned long bind_timeout,
178 bind_timeout,
179 reestablish_timeout; 181 reestablish_timeout;
180 unsigned int connect_cookie; /* A cookie that gets bumped 182 unsigned int connect_cookie; /* A cookie that gets bumped
181 every time the transport 183 every time the transport
@@ -294,7 +296,6 @@ void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
294void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); 296void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
295void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); 297void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
296void xprt_write_space(struct rpc_xprt *xprt); 298void xprt_write_space(struct rpc_xprt *xprt);
297void xprt_update_rtt(struct rpc_task *task);
298void xprt_adjust_cwnd(struct rpc_task *task, int result); 299void xprt_adjust_cwnd(struct rpc_task *task, int result);
299struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); 300struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
300void xprt_complete_rqst(struct rpc_task *task, int copied); 301void xprt_complete_rqst(struct rpc_task *task, int copied);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index d2ae79e21be3..b232ccc0ee29 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -42,6 +42,7 @@ enum tick_nohz_mode {
42 * @idle_waketime: Time when the idle was interrupted 42 * @idle_waketime: Time when the idle was interrupted
43 * @idle_exittime: Time when the idle state was left 43 * @idle_exittime: Time when the idle state was left
44 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped 44 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
45 * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
45 * @sleep_length: Duration of the current idle sleep 46 * @sleep_length: Duration of the current idle sleep
46 * @do_timer_lst: CPU was the last one doing do_timer before going idle 47 * @do_timer_lst: CPU was the last one doing do_timer before going idle
47 */ 48 */
@@ -60,7 +61,7 @@ struct tick_sched {
60 ktime_t idle_waketime; 61 ktime_t idle_waketime;
61 ktime_t idle_exittime; 62 ktime_t idle_exittime;
62 ktime_t idle_sleeptime; 63 ktime_t idle_sleeptime;
63 ktime_t idle_lastupdate; 64 ktime_t iowait_sleeptime;
64 ktime_t sleep_length; 65 ktime_t sleep_length;
65 unsigned long last_jiffies; 66 unsigned long last_jiffies;
66 unsigned long next_jiffies; 67 unsigned long next_jiffies;
@@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle);
124extern void tick_nohz_restart_sched_tick(void); 125extern void tick_nohz_restart_sched_tick(void);
125extern ktime_t tick_nohz_get_sleep_length(void); 126extern ktime_t tick_nohz_get_sleep_length(void);
126extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 127extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
128extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
127# else 129# else
128static inline void tick_nohz_stop_sched_tick(int inidle) { } 130static inline void tick_nohz_stop_sched_tick(int inidle) { }
129static inline void tick_nohz_restart_sched_tick(void) { } 131static inline void tick_nohz_restart_sched_tick(void) { }
@@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
134 return len; 136 return len;
135} 137}
136static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 138static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
139static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
137# endif /* !NO_HZ */ 140# endif /* !NO_HZ */
138 141
139#endif 142#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index 6e026e45a179..ea3559f0b3f2 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -150,7 +150,6 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
150extern int timekeeping_valid_for_hres(void); 150extern int timekeeping_valid_for_hres(void);
151extern u64 timekeeping_max_deferment(void); 151extern u64 timekeeping_max_deferment(void);
152extern void update_wall_time(void); 152extern void update_wall_time(void);
153extern void update_xtime_cache(u64 nsec);
154extern void timekeeping_leap_insert(int leapsecond); 153extern void timekeeping_leap_insert(int leapsecond);
155 154
156struct tms; 155struct tms;
diff --git a/include/linux/timer.h b/include/linux/timer.h
index a2d1eb6cb3f0..ea965b857a50 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -10,13 +10,19 @@
10struct tvec_base; 10struct tvec_base;
11 11
12struct timer_list { 12struct timer_list {
13 /*
14 * All fields that change during normal runtime grouped to the
15 * same cacheline
16 */
13 struct list_head entry; 17 struct list_head entry;
14 unsigned long expires; 18 unsigned long expires;
19 struct tvec_base *base;
15 20
16 void (*function)(unsigned long); 21 void (*function)(unsigned long);
17 unsigned long data; 22 unsigned long data;
18 23
19 struct tvec_base *base; 24 int slack;
25
20#ifdef CONFIG_TIMER_STATS 26#ifdef CONFIG_TIMER_STATS
21 void *start_site; 27 void *start_site;
22 char start_comm[16]; 28 char start_comm[16];
@@ -165,6 +171,8 @@ extern int mod_timer(struct timer_list *timer, unsigned long expires);
165extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); 171extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
166extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); 172extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
167 173
174extern void set_timer_slack(struct timer_list *time, int slack_hz);
175
168#define TIMER_NOT_PINNED 0 176#define TIMER_NOT_PINNED 0
169#define TIMER_PINNED 1 177#define TIMER_PINNED 1
170/* 178/*
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 7a082b32d8e1..32d852f8cbe4 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -232,13 +232,11 @@ struct timex {
232 */ 232 */
233extern unsigned long tick_usec; /* USER_HZ period (usec) */ 233extern unsigned long tick_usec; /* USER_HZ period (usec) */
234extern unsigned long tick_nsec; /* ACTHZ period (nsec) */ 234extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
235extern int tickadj; /* amount of adjustment per tick */
236 235
237/* 236/*
238 * phase-lock loop variables 237 * phase-lock loop variables
239 */ 238 */
240extern int time_status; /* clock synchronization status bits */ 239extern int time_status; /* clock synchronization status bits */
241extern long time_adjust; /* The amount of adjtime left */
242 240
243extern void ntp_init(void); 241extern void ntp_init(void);
244extern void ntp_clear(void); 242extern void ntp_clear(void);
@@ -271,9 +269,6 @@ extern void second_overflow(void);
271extern void update_ntp_one_tick(void); 269extern void update_ntp_one_tick(void);
272extern int do_adjtimex(struct timex *); 270extern int do_adjtimex(struct timex *);
273 271
274/* Don't use! Compatibility define for existing users. */
275#define tickadj (500/HZ ? : 1)
276
277int read_current_timer(unsigned long *timer_val); 272int read_current_timer(unsigned long *timer_val);
278 273
279/* The clock frequency of the i8253/i8254 PIT */ 274/* The clock frequency of the i8253/i8254 PIT */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 78b4bd3be496..1d85f9a6a199 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -33,6 +33,65 @@ struct tracepoint {
33 * Keep in sync with vmlinux.lds.h. 33 * Keep in sync with vmlinux.lds.h.
34 */ 34 */
35 35
36/*
37 * Connect a probe to a tracepoint.
38 * Internal API, should not be used directly.
39 */
40extern int tracepoint_probe_register(const char *name, void *probe);
41
42/*
43 * Disconnect a probe from a tracepoint.
44 * Internal API, should not be used directly.
45 */
46extern int tracepoint_probe_unregister(const char *name, void *probe);
47
48extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
49extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
50extern void tracepoint_probe_update_all(void);
51
52struct tracepoint_iter {
53 struct module *module;
54 struct tracepoint *tracepoint;
55};
56
57extern void tracepoint_iter_start(struct tracepoint_iter *iter);
58extern void tracepoint_iter_next(struct tracepoint_iter *iter);
59extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
60extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
61extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
62 struct tracepoint *begin, struct tracepoint *end);
63
64/*
65 * tracepoint_synchronize_unregister must be called between the last tracepoint
66 * probe unregistration and the end of module exit to make sure there is no
67 * caller executing a probe when it is freed.
68 */
69static inline void tracepoint_synchronize_unregister(void)
70{
71 synchronize_sched();
72}
73
74#define PARAMS(args...) args
75
76#ifdef CONFIG_TRACEPOINTS
77extern void tracepoint_update_probe_range(struct tracepoint *begin,
78 struct tracepoint *end);
79#else
80static inline void tracepoint_update_probe_range(struct tracepoint *begin,
81 struct tracepoint *end)
82{ }
83#endif /* CONFIG_TRACEPOINTS */
84
85#endif /* _LINUX_TRACEPOINT_H */
86
87/*
88 * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include
89 * file ifdef protection.
90 * This is due to the way trace events work. If a file includes two
91 * trace event headers under one "CREATE_TRACE_POINTS" the first include
92 * will override the TRACE_EVENT and break the second include.
93 */
94
36#ifndef DECLARE_TRACE 95#ifndef DECLARE_TRACE
37 96
38#define TP_PROTO(args...) args 97#define TP_PROTO(args...) args
@@ -96,9 +155,6 @@ struct tracepoint {
96#define EXPORT_TRACEPOINT_SYMBOL(name) \ 155#define EXPORT_TRACEPOINT_SYMBOL(name) \
97 EXPORT_SYMBOL(__tracepoint_##name) 156 EXPORT_SYMBOL(__tracepoint_##name)
98 157
99extern void tracepoint_update_probe_range(struct tracepoint *begin,
100 struct tracepoint *end);
101
102#else /* !CONFIG_TRACEPOINTS */ 158#else /* !CONFIG_TRACEPOINTS */
103#define DECLARE_TRACE(name, proto, args) \ 159#define DECLARE_TRACE(name, proto, args) \
104 static inline void _do_trace_##name(struct tracepoint *tp, proto) \ 160 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
@@ -119,61 +175,9 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
119#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) 175#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
120#define EXPORT_TRACEPOINT_SYMBOL(name) 176#define EXPORT_TRACEPOINT_SYMBOL(name)
121 177
122static inline void tracepoint_update_probe_range(struct tracepoint *begin,
123 struct tracepoint *end)
124{ }
125#endif /* CONFIG_TRACEPOINTS */ 178#endif /* CONFIG_TRACEPOINTS */
126#endif /* DECLARE_TRACE */ 179#endif /* DECLARE_TRACE */
127 180
128/*
129 * Connect a probe to a tracepoint.
130 * Internal API, should not be used directly.
131 */
132extern int tracepoint_probe_register(const char *name, void *probe);
133
134/*
135 * Disconnect a probe from a tracepoint.
136 * Internal API, should not be used directly.
137 */
138extern int tracepoint_probe_unregister(const char *name, void *probe);
139
140extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
141extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
142extern void tracepoint_probe_update_all(void);
143
144struct tracepoint_iter {
145 struct module *module;
146 struct tracepoint *tracepoint;
147};
148
149extern void tracepoint_iter_start(struct tracepoint_iter *iter);
150extern void tracepoint_iter_next(struct tracepoint_iter *iter);
151extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
152extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
153extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
154 struct tracepoint *begin, struct tracepoint *end);
155
156/*
157 * tracepoint_synchronize_unregister must be called between the last tracepoint
158 * probe unregistration and the end of module exit to make sure there is no
159 * caller executing a probe when it is freed.
160 */
161static inline void tracepoint_synchronize_unregister(void)
162{
163 synchronize_sched();
164}
165
166#define PARAMS(args...) args
167
168#endif /* _LINUX_TRACEPOINT_H */
169
170/*
171 * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
172 * This is due to the way trace events work. If a file includes two
173 * trace event headers under one "CREATE_TRACE_POINTS" the first include
174 * will override the TRACE_EVENT and break the second include.
175 */
176
177#ifndef TRACE_EVENT 181#ifndef TRACE_EVENT
178/* 182/*
179 * For use with the TRACE_EVENT macro: 183 * For use with the TRACE_EVENT macro:
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a48e16b77d5e..76d96d035ea0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -127,12 +127,26 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
127/* 127/*
128 * Used for wake-one threads: 128 * Used for wake-one threads:
129 */ 129 */
130static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132{
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue(q, wait);
135}
136
130static inline void __add_wait_queue_tail(wait_queue_head_t *head, 137static inline void __add_wait_queue_tail(wait_queue_head_t *head,
131 wait_queue_t *new) 138 wait_queue_t *new)
132{ 139{
133 list_add_tail(&new->task_list, &head->task_list); 140 list_add_tail(&new->task_list, &head->task_list);
134} 141}
135 142
143static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
144 wait_queue_t *wait)
145{
146 wait->flags |= WQ_FLAG_EXCLUSIVE;
147 __add_wait_queue_tail(q, wait);
148}
149
136static inline void __remove_wait_queue(wait_queue_head_t *head, 150static inline void __remove_wait_queue(wait_queue_head_t *head,
137 wait_queue_t *old) 151 wait_queue_t *old)
138{ 152{
@@ -404,25 +418,6 @@ do { \
404}) 418})
405 419
406/* 420/*
407 * Must be called with the spinlock in the wait_queue_head_t held.
408 */
409static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
410 wait_queue_t * wait)
411{
412 wait->flags |= WQ_FLAG_EXCLUSIVE;
413 __add_wait_queue_tail(q, wait);
414}
415
416/*
417 * Must be called with the spinlock in the wait_queue_head_t held.
418 */
419static inline void remove_wait_queue_locked(wait_queue_head_t *q,
420 wait_queue_t * wait)
421{
422 __remove_wait_queue(q, wait);
423}
424
425/*
426 * These are the old interfaces to sleep waiting for an event. 421 * These are the old interfaces to sleep waiting for an event.
427 * They are racy. DO NOT use them, use the wait_event* interfaces above. 422 * They are racy. DO NOT use them, use the wait_event* interfaces above.
428 * We plan to remove these interfaces. 423 * We plan to remove these interfaces.