aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/smp.h1
-rw-r--r--include/asm-alpha/thread_info.h8
-rw-r--r--include/asm-arm/arch-at91/cpu.h6
-rw-r--r--include/asm-avr32/arch-at32ap/cpu.h33
-rw-r--r--include/asm-avr32/setup.h2
-rw-r--r--include/asm-avr32/unistd.h4
-rw-r--r--include/asm-blackfin/processor.h6
-rw-r--r--include/asm-blackfin/system.h4
-rw-r--r--include/asm-frv/tlb.h4
-rw-r--r--include/asm-i386/mmzone.h6
-rw-r--r--include/asm-i386/msr.h56
-rw-r--r--include/asm-i386/paravirt.h5
-rw-r--r--include/asm-i386/smp.h37
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-ia64/smp.h6
-rw-r--r--include/asm-ia64/thread_info.h2
-rw-r--r--include/asm-m32r/smp.h6
-rw-r--r--include/asm-m68k/thread_info.h6
-rw-r--r--include/asm-mips/system.h2
-rw-r--r--include/asm-parisc/compat.h2
-rw-r--r--include/asm-powerpc/smp.h1
-rw-r--r--include/asm-s390/smp.h1
-rw-r--r--include/asm-sh/cpu-sh3/dma.h2
-rw-r--r--include/asm-sh/cpu-sh4/dma-sh7780.h2
-rw-r--r--include/asm-sh/cpu-sh4/dma.h2
-rw-r--r--include/asm-sparc/smp.h1
-rw-r--r--include/asm-sparc64/smp.h1
-rw-r--r--include/asm-um/required-features.h9
-rw-r--r--include/asm-um/smp.h4
-rw-r--r--include/asm-x86_64/smp.h14
-rw-r--r--include/asm-x86_64/system.h2
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/linux/aio.h3
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/compiler-gcc3.h6
-rw-r--r--include/linux/compiler-gcc4.h3
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/futex.h42
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/gfp.h6
-rw-r--r--include/linux/highmem.h27
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/ktime.h6
-rw-r--r--include/linux/mca.h2
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/nfs4_acl.h1
-rw-r--r--include/linux/notifier.h66
-rw-r--r--include/linux/pm.h31
-rw-r--r--include/linux/raid/md_k.h1
-rw-r--r--include/linux/relay.h3
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/signal.h125
-rw-r--r--include/linux/smp.h1
-rw-r--r--include/linux/sunrpc/svc.h19
-rw-r--r--include/linux/sunrpc/svcsock.h3
-rw-r--r--include/linux/suspend.h24
-rw-r--r--include/linux/svga.h2
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/vmstat.h3
-rw-r--r--include/linux/workqueue.h95
67 files changed, 515 insertions, 253 deletions
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index a1a1eca6be4..286e1d844f6 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -51,6 +51,7 @@ int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, in
51 51
52#else /* CONFIG_SMP */ 52#else /* CONFIG_SMP */
53 53
54#define hard_smp_processor_id() 0
54#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) 55#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; })
55 56
56#endif /* CONFIG_SMP */ 57#endif /* CONFIG_SMP */
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
index eeb3bef91e1..f4defc2bd3f 100644
--- a/include/asm-alpha/thread_info.h
+++ b/include/asm-alpha/thread_info.h
@@ -97,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
97 1 << TIF_UAC_SIGBUS) 97 1 << TIF_UAC_SIGBUS)
98 98
99#define SET_UNALIGN_CTL(task,value) ({ \ 99#define SET_UNALIGN_CTL(task,value) ({ \
100 (task)->thread_info->flags = (((task)->thread_info->flags & \ 100 task_thread_info(task)->flags = ((task_thread_info(task)->flags & \
101 ~ALPHA_UAC_MASK) \ 101 ~ALPHA_UAC_MASK) \
102 | (((value) << ALPHA_UAC_SHIFT) & (1<<TIF_UAC_NOPRINT))\ 102 | (((value) << ALPHA_UAC_SHIFT) & (1<<TIF_UAC_NOPRINT))\
103 | (((value) << (ALPHA_UAC_SHIFT + 1)) & (1<<TIF_UAC_SIGBUS)) \ 103 | (((value) << (ALPHA_UAC_SHIFT + 1)) & (1<<TIF_UAC_SIGBUS)) \
@@ -105,11 +105,11 @@ register struct thread_info *__current_thread_info __asm__("$8");
105 0; }) 105 0; })
106 106
107#define GET_UNALIGN_CTL(task,value) ({ \ 107#define GET_UNALIGN_CTL(task,value) ({ \
108 put_user(((task)->thread_info->flags & (1 << TIF_UAC_NOPRINT)) \ 108 put_user((task_thread_info(task)->flags & (1 << TIF_UAC_NOPRINT))\
109 >> ALPHA_UAC_SHIFT \ 109 >> ALPHA_UAC_SHIFT \
110 | ((task)->thread_info->flags & (1 << TIF_UAC_SIGBUS)) \ 110 | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\
111 >> (ALPHA_UAC_SHIFT + 1) \ 111 >> (ALPHA_UAC_SHIFT + 1) \
112 | ((task)->thread_info->flags & (1 << TIF_UAC_NOFIX)) \ 112 | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\
113 >> (ALPHA_UAC_SHIFT - 1), \ 113 >> (ALPHA_UAC_SHIFT - 1), \
114 (int __user *)(value)); \ 114 (int __user *)(value)); \
115 }) 115 })
diff --git a/include/asm-arm/arch-at91/cpu.h b/include/asm-arm/arch-at91/cpu.h
index d464ca58cdb..7ef4eebe9f8 100644
--- a/include/asm-arm/arch-at91/cpu.h
+++ b/include/asm-arm/arch-at91/cpu.h
@@ -68,4 +68,10 @@ static inline unsigned long at91_arch_identify(void)
68#define cpu_is_at91sam9263() (0) 68#define cpu_is_at91sam9263() (0)
69#endif 69#endif
70 70
71/*
72 * Since this is ARM, we will never run on any AVR32 CPU. But these
73 * definitions may reduce clutter in common drivers.
74 */
75#define cpu_is_at32ap7000() (0)
76
71#endif 77#endif
diff --git a/include/asm-avr32/arch-at32ap/cpu.h b/include/asm-avr32/arch-at32ap/cpu.h
new file mode 100644
index 00000000000..2bdc5bd6f79
--- /dev/null
+++ b/include/asm-avr32/arch-at32ap/cpu.h
@@ -0,0 +1,33 @@
1/*
2 * AVR32 and (fake) AT91 CPU identification
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARCH_CPU_H
11#define __ASM_ARCH_CPU_H
12
13/*
14 * Only AT32AP7000 is defined for now. We can identify the specific
15 * chip at runtime, but I'm not sure if it's really worth it.
16 */
17#ifdef CONFIG_CPU_AT32AP7000
18# define cpu_is_at32ap7000() (1)
19#else
20# define cpu_is_at32ap7000() (0)
21#endif
22
23/*
24 * Since this is AVR32, we will never run on any AT91 CPU. But these
25 * definitions may reduce clutter in common drivers.
26 */
27#define cpu_is_at91rm9200() (0)
28#define cpu_is_at91sam9xe() (0)
29#define cpu_is_at91sam9260() (0)
30#define cpu_is_at91sam9261() (0)
31#define cpu_is_at91sam9263() (0)
32
33#endif /* __ASM_ARCH_CPU_H */
diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h
index 1ff1a217015..b0828d43e11 100644
--- a/include/asm-avr32/setup.h
+++ b/include/asm-avr32/setup.h
@@ -110,7 +110,7 @@ struct tagtable {
110 int (*parse)(struct tag *); 110 int (*parse)(struct tag *);
111}; 111};
112 112
113#define __tag __attribute_used__ __attribute__((__section__(".taglist"))) 113#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
114#define __tagtable(tag, fn) \ 114#define __tagtable(tag, fn) \
115 static struct tagtable __tagtable_##fn __tag = { tag, fn } 115 static struct tagtable __tagtable_##fn __tag = { tag, fn }
116 116
diff --git a/include/asm-avr32/unistd.h b/include/asm-avr32/unistd.h
index 8f512047181..2418cce624c 100644
--- a/include/asm-avr32/unistd.h
+++ b/include/asm-avr32/unistd.h
@@ -295,8 +295,10 @@
295#define __NR_shmdt 276 295#define __NR_shmdt 276
296#define __NR_shmctl 277 296#define __NR_shmctl 277
297 297
298#define __NR_utimensat 278
299
298#ifdef __KERNEL__ 300#ifdef __KERNEL__
299#define NR_syscalls 278 301#define NR_syscalls 279
300 302
301 303
302#define __ARCH_WANT_IPC_PARSE_VERSION 304#define __ARCH_WANT_IPC_PARSE_VERSION
diff --git a/include/asm-blackfin/processor.h b/include/asm-blackfin/processor.h
index 997465c93e8..0336ff132c1 100644
--- a/include/asm-blackfin/processor.h
+++ b/include/asm-blackfin/processor.h
@@ -58,10 +58,10 @@ do { \
58 (_regs)->pc = (_pc); \ 58 (_regs)->pc = (_pc); \
59 if (current->mm) \ 59 if (current->mm) \
60 (_regs)->p5 = current->mm->start_data; \ 60 (_regs)->p5 = current->mm->start_data; \
61 current->thread_info->l1_task_info.stack_start \ 61 task_thread_info(current)->l1_task_info.stack_start \
62 = (void *)current->mm->context.stack_start; \ 62 = (void *)current->mm->context.stack_start; \
63 current->thread_info->l1_task_info.lowest_sp = (void *)(_usp); \ 63 task_thread_info(current)->l1_task_info.lowest_sp = (void *)(_usp); \
64 memcpy(L1_SCRATCH_TASK_INFO, &current->thread_info->l1_task_info, \ 64 memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info, \
65 sizeof(*L1_SCRATCH_TASK_INFO)); \ 65 sizeof(*L1_SCRATCH_TASK_INFO)); \
66 wrusp(_usp); \ 66 wrusp(_usp); \
67} while(0) 67} while(0)
diff --git a/include/asm-blackfin/system.h b/include/asm-blackfin/system.h
index b5bf6e7cb5e..5e5f1a0566c 100644
--- a/include/asm-blackfin/system.h
+++ b/include/asm-blackfin/system.h
@@ -239,9 +239,9 @@ asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_stru
239 239
240#define switch_to(prev,next,last) \ 240#define switch_to(prev,next,last) \
241do { \ 241do { \
242 memcpy (&prev->thread_info->l1_task_info, L1_SCRATCH_TASK_INFO, \ 242 memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
243 sizeof *L1_SCRATCH_TASK_INFO); \ 243 sizeof *L1_SCRATCH_TASK_INFO); \
244 memcpy (L1_SCRATCH_TASK_INFO, &next->thread_info->l1_task_info, \ 244 memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \
245 sizeof *L1_SCRATCH_TASK_INFO); \ 245 sizeof *L1_SCRATCH_TASK_INFO); \
246 (last) = resume (prev, next); \ 246 (last) = resume (prev, next); \
247} while (0) 247} while (0)
diff --git a/include/asm-frv/tlb.h b/include/asm-frv/tlb.h
index f94fe5cb9b3..cd458eb6d75 100644
--- a/include/asm-frv/tlb.h
+++ b/include/asm-frv/tlb.h
@@ -3,7 +3,11 @@
3 3
4#include <asm/tlbflush.h> 4#include <asm/tlbflush.h>
5 5
6#ifdef CONFIG_MMU
7extern void check_pgt_cache(void);
8#else
6#define check_pgt_cache() do {} while(0) 9#define check_pgt_cache() do {} while(0)
10#endif
7 11
8/* 12/*
9 * we don't need any special per-pte or per-vma handling... 13 * we don't need any special per-pte or per-vma handling...
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 3503ad66945..118e9812778 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -122,21 +122,21 @@ static inline int pfn_valid(int pfn)
122 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) 122 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
123#define alloc_bootmem_node(pgdat, x) \ 123#define alloc_bootmem_node(pgdat, x) \
124({ \ 124({ \
125 struct pglist_data __attribute__ ((unused)) \ 125 struct pglist_data __maybe_unused \
126 *__alloc_bootmem_node__pgdat = (pgdat); \ 126 *__alloc_bootmem_node__pgdat = (pgdat); \
127 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ 127 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
128 __pa(MAX_DMA_ADDRESS)); \ 128 __pa(MAX_DMA_ADDRESS)); \
129}) 129})
130#define alloc_bootmem_pages_node(pgdat, x) \ 130#define alloc_bootmem_pages_node(pgdat, x) \
131({ \ 131({ \
132 struct pglist_data __attribute__ ((unused)) \ 132 struct pglist_data __maybe_unused \
133 *__alloc_bootmem_node__pgdat = (pgdat); \ 133 *__alloc_bootmem_node__pgdat = (pgdat); \
134 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ 134 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
135 __pa(MAX_DMA_ADDRESS)) \ 135 __pa(MAX_DMA_ADDRESS)) \
136}) 136})
137#define alloc_bootmem_low_pages_node(pgdat, x) \ 137#define alloc_bootmem_low_pages_node(pgdat, x) \
138({ \ 138({ \
139 struct pglist_data __attribute__ ((unused)) \ 139 struct pglist_data __maybe_unused \
140 *__alloc_bootmem_node__pgdat = (pgdat); \ 140 *__alloc_bootmem_node__pgdat = (pgdat); \
141 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \ 141 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
142}) 142})
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 26861df52cc..df21ea04936 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -86,62 +86,50 @@ static inline unsigned long long native_read_pmc(void)
86 86
87#define rdmsr(msr,val1,val2) \ 87#define rdmsr(msr,val1,val2) \
88 do { \ 88 do { \
89 unsigned long long __val = native_read_msr(msr); \ 89 u64 __val = native_read_msr(msr); \
90 val1 = __val; \ 90 (val1) = (u32)__val; \
91 val2 = __val >> 32; \ 91 (val2) = (u32)(__val >> 32); \
92 } while(0) 92 } while(0)
93 93
94#define wrmsr(msr,val1,val2) \ 94static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
95 native_write_msr(msr, ((unsigned long long)val2 << 32) | val1)
96
97#define rdmsrl(msr,val) \
98 do { \
99 (val) = native_read_msr(msr); \
100 } while(0)
101
102static inline void wrmsrl (unsigned long msr, unsigned long long val)
103{ 95{
104 unsigned long lo, hi; 96 native_write_msr(__msr, ((u64)__high << 32) | __low);
105 lo = (unsigned long) val;
106 hi = val >> 32;
107 wrmsr (msr, lo, hi);
108} 97}
109 98
99#define rdmsrl(msr,val) \
100 ((val) = native_read_msr(msr))
101
102#define wrmsrl(msr,val) native_write_msr(msr, val)
103
110/* wrmsr with exception handling */ 104/* wrmsr with exception handling */
111#define wrmsr_safe(msr,val1,val2) \ 105static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
112 (native_write_msr_safe(msr, ((unsigned long long)val2 << 32) | val1)) 106{
107 return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
108}
113 109
114/* rdmsr with exception handling */ 110/* rdmsr with exception handling */
115#define rdmsr_safe(msr,p1,p2) \ 111#define rdmsr_safe(msr,p1,p2) \
116 ({ \ 112 ({ \
117 int __err; \ 113 int __err; \
118 unsigned long long __val = native_read_msr_safe(msr, &__err);\ 114 u64 __val = native_read_msr_safe(msr, &__err); \
119 (*p1) = __val; \ 115 (*p1) = (u32)__val; \
120 (*p2) = __val >> 32; \ 116 (*p2) = (u32)(__val >> 32); \
121 __err; \ 117 __err; \
122 }) 118 })
123 119
124#define rdtsc(low,high) \
125 do { \
126 u64 _l = native_read_tsc(); \
127 (low) = (u32)_l; \
128 (high) = _l >> 32; \
129 } while(0)
130
131#define rdtscl(low) \ 120#define rdtscl(low) \
132 do { \ 121 ((low) = (u32)native_read_tsc())
133 (low) = native_read_tsc(); \
134 } while(0)
135 122
136#define rdtscll(val) ((val) = native_read_tsc()) 123#define rdtscll(val) \
124 ((val) = native_read_tsc())
137 125
138#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 126#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
139 127
140#define rdpmc(counter,low,high) \ 128#define rdpmc(counter,low,high) \
141 do { \ 129 do { \
142 u64 _l = native_read_pmc(); \ 130 u64 _l = native_read_pmc(); \
143 low = (u32)_l; \ 131 (low) = (u32)_l; \
144 high = _l >> 32; \ 132 (high) = (u32)(_l >> 32); \
145 } while(0) 133 } while(0)
146#endif /* !CONFIG_PARAVIRT */ 134#endif /* !CONFIG_PARAVIRT */
147 135
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index e2e7f98723c..bc5c12c1358 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -560,11 +560,6 @@ static inline u64 paravirt_read_tsc(void)
560{ 560{
561 return PVOP_CALL0(u64, read_tsc); 561 return PVOP_CALL0(u64, read_tsc);
562} 562}
563#define rdtsc(low,high) do { \
564 u64 _l = paravirt_read_tsc(); \
565 low = (u32)_l; \
566 high = _l >> 32; \
567} while(0)
568 563
569#define rdtscl(low) do { \ 564#define rdtscl(low) do { \
570 u64 _l = paravirt_read_tsc(); \ 565 u64 _l = paravirt_read_tsc(); \
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 090abc1da32..0c713278706 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -124,20 +124,6 @@ static inline int num_booting_cpus(void)
124 return cpus_weight(cpu_callout_map); 124 return cpus_weight(cpu_callout_map);
125} 125}
126 126
127#ifdef CONFIG_X86_LOCAL_APIC
128
129#ifdef APIC_DEFINITION
130extern int hard_smp_processor_id(void);
131#else
132#include <mach_apicdef.h>
133static inline int hard_smp_processor_id(void)
134{
135 /* we don't want to mark this access volatile - bad code generation */
136 return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
137}
138#endif
139#endif
140
141extern int safe_smp_processor_id(void); 127extern int safe_smp_processor_id(void);
142extern int __cpu_disable(void); 128extern int __cpu_disable(void);
143extern void __cpu_die(unsigned int cpu); 129extern void __cpu_die(unsigned int cpu);
@@ -152,10 +138,31 @@ extern unsigned int num_processors;
152 138
153#define NO_PROC_ID 0xFF /* No processor magic marker */ 139#define NO_PROC_ID 0xFF /* No processor magic marker */
154 140
155#endif 141#endif /* CONFIG_SMP */
156 142
157#ifndef __ASSEMBLY__ 143#ifndef __ASSEMBLY__
158 144
145#ifdef CONFIG_X86_LOCAL_APIC
146
147#ifdef APIC_DEFINITION
148extern int hard_smp_processor_id(void);
149#else
150#include <mach_apicdef.h>
151static inline int hard_smp_processor_id(void)
152{
153 /* we don't want to mark this access volatile - bad code generation */
154 return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
155}
156#endif /* APIC_DEFINITION */
157
158#else /* CONFIG_X86_LOCAL_APIC */
159
160#ifndef CONFIG_SMP
161#define hard_smp_processor_id() 0
162#endif
163
164#endif /* CONFIG_X86_LOCAL_APIC */
165
159extern u8 apicid_2_node[]; 166extern u8 apicid_2_node[];
160 167
161#ifdef CONFIG_X86_LOCAL_APIC 168#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index bf01d4b342b..4cb0f91ae64 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -172,7 +172,7 @@ static inline struct thread_info *current_thread_info(void)
172#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 172#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
173#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */ 173#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
174 174
175#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) 175#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
176 176
177#endif /* __KERNEL__ */ 177#endif /* __KERNEL__ */
178 178
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 60fd4ae014f..c60024989eb 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,6 +38,8 @@ ia64_get_lid (void)
38 return lid.f.id << 8 | lid.f.eid; 38 return lid.f.id << 8 | lid.f.eid;
39} 39}
40 40
41#define hard_smp_processor_id() ia64_get_lid()
42
41#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
42 44
43#define XTP_OFFSET 0x1e0008 45#define XTP_OFFSET 0x1e0008
@@ -110,8 +112,6 @@ max_xtp (void)
110 writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */ 112 writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
111} 113}
112 114
113#define hard_smp_processor_id() ia64_get_lid()
114
115/* Upping and downing of CPUs */ 115/* Upping and downing of CPUs */
116extern int __cpu_disable (void); 116extern int __cpu_disable (void);
117extern void __cpu_die (unsigned int cpu); 117extern void __cpu_die (unsigned int cpu);
@@ -128,7 +128,7 @@ extern void unlock_ipi_calllock(void);
128extern void identify_siblings (struct cpuinfo_ia64 *); 128extern void identify_siblings (struct cpuinfo_ia64 *);
129extern int is_multithreading_enabled(void); 129extern int is_multithreading_enabled(void);
130 130
131#else 131#else /* CONFIG_SMP */
132 132
133#define cpu_logical_id(i) 0 133#define cpu_logical_id(i) 0
134#define cpu_physical_id(i) ia64_get_lid() 134#define cpu_physical_id(i) ia64_get_lid()
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 91698599f91..d2814750658 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -110,6 +110,6 @@ struct thread_info {
110 110
111#define TS_POLLING 1 /* true if in idle loop and not sleeping */ 111#define TS_POLLING 1 /* true if in idle loop and not sleeping */
112 112
113#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) 113#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
114 114
115#endif /* _ASM_IA64_THREAD_INFO_H */ 115#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index abd937ac523..078e1a51a04 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -108,6 +108,10 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
108#define IPI_SHIFT (0) 108#define IPI_SHIFT (0)
109#define NR_IPIS (8) 109#define NR_IPIS (8)
110 110
111#endif /* CONFIG_SMP */ 111#else /* CONFIG_SMP */
112
113#define hard_smp_processor_id() 0
114
115#endif /* CONFIG_SMP */
112 116
113#endif /* _ASM_M32R_SMP_H */ 117#endif /* _ASM_M32R_SMP_H */
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index c4d622a57df..d635a375248 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -37,17 +37,17 @@ struct thread_info {
37#define init_stack (init_thread_union.stack) 37#define init_stack (init_thread_union.stack)
38 38
39#define task_thread_info(tsk) (&(tsk)->thread.info) 39#define task_thread_info(tsk) (&(tsk)->thread.info)
40#define task_stack_page(tsk) ((void *)(tsk)->thread_info) 40#define task_stack_page(tsk) ((tsk)->stack)
41#define current_thread_info() task_thread_info(current) 41#define current_thread_info() task_thread_info(current)
42 42
43#define __HAVE_THREAD_FUNCTIONS 43#define __HAVE_THREAD_FUNCTIONS
44 44
45#define setup_thread_stack(p, org) ({ \ 45#define setup_thread_stack(p, org) ({ \
46 *(struct task_struct **)(p)->thread_info = (p); \ 46 *(struct task_struct **)(p)->stack = (p); \
47 task_thread_info(p)->task = (p); \ 47 task_thread_info(p)->task = (p); \
48}) 48})
49 49
50#define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1) 50#define end_of_stack(p) ((unsigned long *)(p)->stack + 1)
51 51
52/* entry.S relies on these definitions! 52/* entry.S relies on these definitions!
53 * bits 0-7 are tested at every exception exit 53 * bits 0-7 are tested at every exception exit
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 30f23a2b46c..3713d256d36 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -55,7 +55,7 @@ do { \
55 if (cpu_has_dsp) \ 55 if (cpu_has_dsp) \
56 __save_dsp(prev); \ 56 __save_dsp(prev); \
57 next->thread.emulated_fp = 0; \ 57 next->thread.emulated_fp = 0; \
58 (last) = resume(prev, next, next->thread_info); \ 58 (last) = resume(prev, next, task_thread_info(next)); \
59 if (cpu_has_dsp) \ 59 if (cpu_has_dsp) \
60 __restore_dsp(current); \ 60 __restore_dsp(current); \
61} while(0) 61} while(0)
diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h
index fe857902353..11f4222597a 100644
--- a/include/asm-parisc/compat.h
+++ b/include/asm-parisc/compat.h
@@ -152,7 +152,7 @@ static __inline__ void __user *compat_alloc_user_space(long len)
152 152
153static inline int __is_compat_task(struct task_struct *t) 153static inline int __is_compat_task(struct task_struct *t)
154{ 154{
155 return test_ti_thread_flag(t->thread_info, TIF_32BIT); 155 return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
156} 156}
157 157
158static inline int is_compat_task(void) 158static inline int is_compat_task(void)
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 01717f266dc..d037f50580e 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -83,6 +83,7 @@ extern void __cpu_die(unsigned int cpu);
83 83
84#else 84#else
85/* for UP */ 85/* for UP */
86#define hard_smp_processor_id() 0
86#define smp_setup_cpu_maps() 87#define smp_setup_cpu_maps()
87 88
88#endif /* CONFIG_SMP */ 89#endif /* CONFIG_SMP */
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 0a28e6d6ef4..76e424f718c 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -110,6 +110,7 @@ static inline void smp_send_stop(void)
110 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 110 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
111} 111}
112 112
113#define hard_smp_processor_id() 0
113#define smp_cpu_not_running(cpu) 1 114#define smp_cpu_not_running(cpu) 1
114#define smp_setup_cpu_possible_map() do { } while (0) 115#define smp_setup_cpu_possible_map() do { } while (0)
115#endif 116#endif
diff --git a/include/asm-sh/cpu-sh3/dma.h b/include/asm-sh/cpu-sh3/dma.h
index 954801b4602..3a66dc45802 100644
--- a/include/asm-sh/cpu-sh3/dma.h
+++ b/include/asm-sh/cpu-sh3/dma.h
@@ -26,7 +26,7 @@ enum {
26 XMIT_SZ_128BIT, 26 XMIT_SZ_128BIT,
27}; 27};
28 28
29static unsigned int ts_shift[] __attribute__ ((used)) = { 29static unsigned int ts_shift[] __maybe_unused = {
30 [XMIT_SZ_8BIT] = 0, 30 [XMIT_SZ_8BIT] = 0,
31 [XMIT_SZ_16BIT] = 1, 31 [XMIT_SZ_16BIT] = 1,
32 [XMIT_SZ_32BIT] = 2, 32 [XMIT_SZ_32BIT] = 2,
diff --git a/include/asm-sh/cpu-sh4/dma-sh7780.h b/include/asm-sh/cpu-sh4/dma-sh7780.h
index 6c90d28331b..71b426a6e48 100644
--- a/include/asm-sh/cpu-sh4/dma-sh7780.h
+++ b/include/asm-sh/cpu-sh4/dma-sh7780.h
@@ -28,7 +28,7 @@ enum {
28/* 28/*
29 * The DMA count is defined as the number of bytes to transfer. 29 * The DMA count is defined as the number of bytes to transfer.
30 */ 30 */
31static unsigned int __attribute__ ((used)) ts_shift[] = { 31static unsigned int ts_shift[] __maybe_unused = {
32 [XMIT_SZ_8BIT] = 0, 32 [XMIT_SZ_8BIT] = 0,
33 [XMIT_SZ_16BIT] = 1, 33 [XMIT_SZ_16BIT] = 1,
34 [XMIT_SZ_32BIT] = 2, 34 [XMIT_SZ_32BIT] = 2,
diff --git a/include/asm-sh/cpu-sh4/dma.h b/include/asm-sh/cpu-sh4/dma.h
index c135e9cebd9..36e26a96476 100644
--- a/include/asm-sh/cpu-sh4/dma.h
+++ b/include/asm-sh/cpu-sh4/dma.h
@@ -53,7 +53,7 @@ enum {
53/* 53/*
54 * The DMA count is defined as the number of bytes to transfer. 54 * The DMA count is defined as the number of bytes to transfer.
55 */ 55 */
56static unsigned int ts_shift[] __attribute__ ((used)) = { 56static unsigned int ts_shift[] __maybe_unused = {
57 [XMIT_SZ_64BIT] = 3, 57 [XMIT_SZ_64BIT] = 3,
58 [XMIT_SZ_8BIT] = 0, 58 [XMIT_SZ_8BIT] = 0,
59 [XMIT_SZ_16BIT] = 1, 59 [XMIT_SZ_16BIT] = 1,
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index b9da9a600e3..b3f492208fd 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -165,6 +165,7 @@ void smp_setup_cpu_possible_map(void);
165 165
166#else /* SMP */ 166#else /* SMP */
167 167
168#define hard_smp_processor_id() 0
168#define smp_setup_cpu_possible_map() do { } while (0) 169#define smp_setup_cpu_possible_map() do { } while (0)
169 170
170#endif /* !(SMP) */ 171#endif /* !(SMP) */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index cca54804b72..869d16fb907 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -48,6 +48,7 @@ extern unsigned char boot_cpu_id;
48 48
49#else 49#else
50 50
51#define hard_smp_processor_id() 0
51#define smp_setup_cpu_possible_map() do { } while (0) 52#define smp_setup_cpu_possible_map() do { } while (0)
52#define boot_cpu_id (0) 53#define boot_cpu_id (0)
53 54
diff --git a/include/asm-um/required-features.h b/include/asm-um/required-features.h
new file mode 100644
index 00000000000..dfb967b2d2f
--- /dev/null
+++ b/include/asm-um/required-features.h
@@ -0,0 +1,9 @@
1#ifndef __UM_REQUIRED_FEATURES_H
2#define __UM_REQUIRED_FEATURES_H
3
4/*
5 * Nothing to see, just need something for the i386 and x86_64 asm
6 * headers to include.
7 */
8
9#endif
diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h
index ca552261ed1..84f8cf29324 100644
--- a/include/asm-um/smp.h
+++ b/include/asm-um/smp.h
@@ -24,6 +24,10 @@ extern inline void smp_cpus_done(unsigned int maxcpus)
24 24
25extern struct task_struct *idle_threads[NR_CPUS]; 25extern struct task_struct *idle_threads[NR_CPUS];
26 26
27#else
28
29#define hard_smp_processor_id() 0
30
27#endif 31#endif
28 32
29#endif 33#endif
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d5704421456..3f303d2365e 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -57,12 +57,6 @@ static inline int num_booting_cpus(void)
57 57
58#define raw_smp_processor_id() read_pda(cpunumber) 58#define raw_smp_processor_id() read_pda(cpunumber)
59 59
60static inline int hard_smp_processor_id(void)
61{
62 /* we don't want to mark this access volatile - bad code generation */
63 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
64}
65
66extern int __cpu_disable(void); 60extern int __cpu_disable(void);
67extern void __cpu_die(unsigned int cpu); 61extern void __cpu_die(unsigned int cpu);
68extern void prefill_possible_map(void); 62extern void prefill_possible_map(void);
@@ -71,7 +65,13 @@ extern unsigned __cpuinitdata disabled_cpus;
71 65
72#define NO_PROC_ID 0xFF /* No processor magic marker */ 66#define NO_PROC_ID 0xFF /* No processor magic marker */
73 67
74#endif 68#endif /* CONFIG_SMP */
69
70static inline int hard_smp_processor_id(void)
71{
72 /* we don't want to mark this access volatile - bad code generation */
73 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
74}
75 75
76/* 76/*
77 * Some lowlevel functions might want to know about 77 * Some lowlevel functions might want to know about
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index b7b8021e8c4..ead9f9a5623 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -39,7 +39,7 @@
39 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ 39 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
40 [ti_flags] "i" (offsetof(struct thread_info, flags)),\ 40 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
41 [tif_fork] "i" (TIF_FORK), \ 41 [tif_fork] "i" (TIF_FORK), \
42 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ 42 [thread_info] "i" (offsetof(struct task_struct, stack)), \
43 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ 43 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
44 : "memory", "cc" __EXTRA_CLOBBER) 44 : "memory", "cc" __EXTRA_CLOBBER)
45 45
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 74a6c74397f..10bb5a8ed68 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -162,7 +162,7 @@ static inline struct thread_info *stack_thread_info(void)
162#define TS_COMPAT 0x0002 /* 32bit syscall active */ 162#define TS_COMPAT 0x0002 /* 32bit syscall active */
163#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */ 163#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
164 164
165#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) 165#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
166 166
167#endif /* __KERNEL__ */ 167#endif /* __KERNEL__ */
168 168
diff --git a/include/linux/aio.h b/include/linux/aio.h
index a30ef13c9e6..43dc2ebfaa0 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -226,7 +226,8 @@ int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
226 __put_ioctx(kioctx); \ 226 __put_ioctx(kioctx); \
227} while (0) 227} while (0)
228 228
229#define in_aio() !is_sync_wait(current->io_wait) 229#define in_aio() (unlikely(!is_sync_wait(current->io_wait)))
230
230/* may be used for debugging */ 231/* may be used for debugging */
231#define warn_if_async() \ 232#define warn_if_async() \
232do { \ 233do { \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a686eabe22d..db5b00a792f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -854,7 +854,7 @@ static inline void put_dev_sector(Sector p)
854 854
855struct work_struct; 855struct work_struct;
856int kblockd_schedule_work(struct work_struct *work); 856int kblockd_schedule_work(struct work_struct *work);
857void kblockd_flush(void); 857void kblockd_flush_work(struct work_struct *work);
858 858
859#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 859#define MODULE_ALIAS_BLOCKDEV(major,minor) \
860 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 860 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 2665ca04cf8..bf297b03a4e 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -49,6 +49,7 @@ struct clocksource;
49 * @shift: cycle to nanosecond divisor (power of two) 49 * @shift: cycle to nanosecond divisor (power of two)
50 * @flags: flags describing special properties 50 * @flags: flags describing special properties
51 * @vread: vsyscall based read 51 * @vread: vsyscall based read
52 * @resume: resume function for the clocksource, if necessary
52 * @cycle_interval: Used internally by timekeeping core, please ignore. 53 * @cycle_interval: Used internally by timekeeping core, please ignore.
53 * @xtime_interval: Used internally by timekeeping core, please ignore. 54 * @xtime_interval: Used internally by timekeeping core, please ignore.
54 */ 55 */
@@ -65,6 +66,7 @@ struct clocksource {
65 u32 shift; 66 u32 shift;
66 unsigned long flags; 67 unsigned long flags;
67 cycle_t (*vread)(void); 68 cycle_t (*vread)(void);
69 void (*resume)(void);
68 70
69 /* timekeeping specific data, ignore */ 71 /* timekeeping specific data, ignore */
70 cycle_t cycle_interval; 72 cycle_t cycle_interval;
@@ -209,6 +211,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
209extern int clocksource_register(struct clocksource*); 211extern int clocksource_register(struct clocksource*);
210extern struct clocksource* clocksource_get_next(void); 212extern struct clocksource* clocksource_get_next(void);
211extern void clocksource_change_rating(struct clocksource *cs, int rating); 213extern void clocksource_change_rating(struct clocksource *cs, int rating);
214extern void clocksource_resume(void);
212 215
213#ifdef CONFIG_GENERIC_TIME_VSYSCALL 216#ifdef CONFIG_GENERIC_TIME_VSYSCALL
214extern void update_vsyscall(struct timespec *ts, struct clocksource *c); 217extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ccd863dd77f..70a157a130b 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -253,5 +253,8 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
253 const compat_sigset_t __user *sigmask, 253 const compat_sigset_t __user *sigmask,
254 compat_size_t sigsetsize); 254 compat_size_t sigsetsize);
255 255
256asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename,
257 struct compat_timespec __user *t, int flags);
258
256#endif /* CONFIG_COMPAT */ 259#endif /* CONFIG_COMPAT */
257#endif /* _LINUX_COMPAT_H */ 260#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a9f794716a8..03ec2311fb2 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -40,3 +40,4 @@
40#define noinline __attribute__((noinline)) 40#define noinline __attribute__((noinline))
41#define __attribute_pure__ __attribute__((pure)) 41#define __attribute_pure__ __attribute__((pure))
42#define __attribute_const__ __attribute__((__const__)) 42#define __attribute_const__ __attribute__((__const__))
43#define __maybe_unused __attribute__((unused))
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
index ecd621fd27d..a9e2863c2db 100644
--- a/include/linux/compiler-gcc3.h
+++ b/include/linux/compiler-gcc3.h
@@ -4,9 +4,11 @@
4#include <linux/compiler-gcc.h> 4#include <linux/compiler-gcc.h>
5 5
6#if __GNUC_MINOR__ >= 3 6#if __GNUC_MINOR__ >= 3
7# define __attribute_used__ __attribute__((__used__)) 7# define __used __attribute__((__used__))
8# define __attribute_used__ __used /* deprecated */
8#else 9#else
9# define __attribute_used__ __attribute__((__unused__)) 10# define __used __attribute__((__unused__))
11# define __attribute_used__ __used /* deprecated */
10#endif 12#endif
11 13
12#if __GNUC_MINOR__ >= 4 14#if __GNUC_MINOR__ >= 4
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index fd0cc7c4a63..a03e9398a6c 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -12,7 +12,8 @@
12# define __inline __inline __attribute__((always_inline)) 12# define __inline __inline __attribute__((always_inline))
13#endif 13#endif
14 14
15#define __attribute_used__ __attribute__((__used__)) 15#define __used __attribute__((__used__))
16#define __attribute_used__ __used /* deprecated */
16#define __must_check __attribute__((warn_unused_result)) 17#define __must_check __attribute__((warn_unused_result))
17#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) 18#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
18#define __always_inline inline __attribute__((always_inline)) 19#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 3b6949b4174..498c3592076 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -108,15 +108,30 @@ extern void __chk_io_ptr(const void __iomem *);
108 * Allow us to avoid 'defined but not used' warnings on functions and data, 108 * Allow us to avoid 'defined but not used' warnings on functions and data,
109 * as well as force them to be emitted to the assembly file. 109 * as well as force them to be emitted to the assembly file.
110 * 110 *
111 * As of gcc 3.3, static functions that are not marked with attribute((used)) 111 * As of gcc 3.4, static functions that are not marked with attribute((used))
112 * may be elided from the assembly file. As of gcc 3.3, static data not so 112 * may be elided from the assembly file. As of gcc 3.4, static data not so
113 * marked will not be elided, but this may change in a future gcc version. 113 * marked will not be elided, but this may change in a future gcc version.
114 * 114 *
115 * NOTE: Because distributions shipped with a backported unit-at-a-time
116 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
117 * for gcc >=3.3 instead of 3.4.
118 *
115 * In prior versions of gcc, such functions and data would be emitted, but 119 * In prior versions of gcc, such functions and data would be emitted, but
116 * would be warned about except with attribute((unused)). 120 * would be warned about except with attribute((unused)).
121 *
122 * Mark functions that are referenced only in inline assembly as __used so
123 * the code is emitted even though it appears to be unreferenced.
117 */ 124 */
118#ifndef __attribute_used__ 125#ifndef __attribute_used__
119# define __attribute_used__ /* unimplemented */ 126# define __attribute_used__ /* deprecated */
127#endif
128
129#ifndef __used
130# define __used /* unimplemented */
131#endif
132
133#ifndef __maybe_unused
134# define __maybe_unused /* unimplemented */
120#endif 135#endif
121 136
122/* 137/*
diff --git a/include/linux/fb.h b/include/linux/fb.h
index dff7a728948..c654d0e9ce3 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -868,7 +868,7 @@ struct fb_info {
868#define fb_writeq sbus_writeq 868#define fb_writeq sbus_writeq
869#define fb_memset sbus_memset_io 869#define fb_memset sbus_memset_io
870 870
871#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) 871#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) || defined(__avr32__)
872 872
873#define fb_readb __raw_readb 873#define fb_readb __raw_readb
874#define fb_readw __raw_readw 874#define fb_readw __raw_readw
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 820125c628c..899fc7f20ed 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5 5
6union ktime;
7
6/* Second argument to futex syscall */ 8/* Second argument to futex syscall */
7 9
8 10
@@ -15,6 +17,19 @@
15#define FUTEX_LOCK_PI 6 17#define FUTEX_LOCK_PI 6
16#define FUTEX_UNLOCK_PI 7 18#define FUTEX_UNLOCK_PI 7
17#define FUTEX_TRYLOCK_PI 8 19#define FUTEX_TRYLOCK_PI 8
20#define FUTEX_CMP_REQUEUE_PI 9
21
22#define FUTEX_PRIVATE_FLAG 128
23#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG
24
25#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
26#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
27#define FUTEX_REQUEUE_PRIVATE (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG)
28#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
29#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
30#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
31#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
32#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
18 33
19/* 34/*
20 * Support for robust futexes: the kernel cleans up held futexes at 35 * Support for robust futexes: the kernel cleans up held futexes at
@@ -83,9 +98,14 @@ struct robust_list_head {
83#define FUTEX_OWNER_DIED 0x40000000 98#define FUTEX_OWNER_DIED 0x40000000
84 99
85/* 100/*
101 * Some processes have been requeued on this PI-futex
102 */
103#define FUTEX_WAITER_REQUEUED 0x20000000
104
105/*
86 * The rest of the robust-futex field is for the TID: 106 * The rest of the robust-futex field is for the TID:
87 */ 107 */
88#define FUTEX_TID_MASK 0x3fffffff 108#define FUTEX_TID_MASK 0x0fffffff
89 109
90/* 110/*
91 * This limit protects against a deliberately circular list. 111 * This limit protects against a deliberately circular list.
@@ -94,7 +114,7 @@ struct robust_list_head {
94#define ROBUST_LIST_LIMIT 2048 114#define ROBUST_LIST_LIMIT 2048
95 115
96#ifdef __KERNEL__ 116#ifdef __KERNEL__
97long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, 117long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
98 u32 __user *uaddr2, u32 val2, u32 val3); 118 u32 __user *uaddr2, u32 val2, u32 val3);
99 119
100extern int 120extern int
@@ -106,9 +126,20 @@ handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
106 * Don't rearrange members without looking at hash_futex(). 126 * Don't rearrange members without looking at hash_futex().
107 * 127 *
108 * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. 128 * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
109 * We set bit 0 to indicate if it's an inode-based key. 129 * We use the two low order bits of offset to tell what is the kind of key :
110 */ 130 * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE)
131 * (no reference on an inode or mm)
132 * 01 : Shared futex (PTHREAD_PROCESS_SHARED)
133 * mapped on a file (reference on the underlying inode)
134 * 10 : Shared futex (PTHREAD_PROCESS_SHARED)
135 * (but private mapping on an mm, and reference taken on it)
136*/
137
138#define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */
139#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
140
111union futex_key { 141union futex_key {
142 u32 __user *uaddr;
112 struct { 143 struct {
113 unsigned long pgoff; 144 unsigned long pgoff;
114 struct inode *inode; 145 struct inode *inode;
@@ -125,7 +156,8 @@ union futex_key {
125 int offset; 156 int offset;
126 } both; 157 } both;
127}; 158};
128int get_futex_key(u32 __user *uaddr, union futex_key *key); 159int get_futex_key(u32 __user *uaddr, struct rw_semaphore *shared,
160 union futex_key *key);
129void get_futex_key_refs(union futex_key *key); 161void get_futex_key_refs(union futex_key *key);
130void drop_futex_key_refs(union futex_key *key); 162void drop_futex_key_refs(union futex_key *key);
131 163
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 2c65da7cabb..f589559cf07 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -413,6 +413,7 @@ char *disk_name (struct gendisk *hd, int part, char *buf);
413extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 413extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
414extern void add_partition(struct gendisk *, int, sector_t, sector_t, int); 414extern void add_partition(struct gendisk *, int, sector_t, sector_t, int);
415extern void delete_partition(struct gendisk *, int); 415extern void delete_partition(struct gendisk *, int);
416extern void printk_all_partitions(void);
416 417
417extern struct gendisk *alloc_disk_node(int minors, int node_id); 418extern struct gendisk *alloc_disk_node(int minors, int node_id);
418extern struct gendisk *alloc_disk(int minors); 419extern struct gendisk *alloc_disk(int minors);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 97a36c3d96e..0d2ef0b082a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -176,10 +176,6 @@ extern void FASTCALL(free_cold_page(struct page *page));
176#define free_page(addr) free_pages((addr),0) 176#define free_page(addr) free_pages((addr),0)
177 177
178void page_alloc_init(void); 178void page_alloc_init(void);
179#ifdef CONFIG_NUMA 179void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
180void drain_node_pages(int node);
181#else
182static inline void drain_node_pages(int node) { };
183#endif
184 180
185#endif /* __LINUX_GFP_H */ 181#endif /* __LINUX_GFP_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index a515eb0afdf..98e2cce996a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -94,17 +94,26 @@ static inline void clear_highpage(struct page *page)
94 94
95/* 95/*
96 * Same but also flushes aliased cache contents to RAM. 96 * Same but also flushes aliased cache contents to RAM.
97 *
98 * This must be a macro because KM_USER0 and friends aren't defined if
99 * !CONFIG_HIGHMEM
97 */ 100 */
98static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) 101#define zero_user_page(page, offset, size, km_type) \
102 do { \
103 void *kaddr; \
104 \
105 BUG_ON((offset) + (size) > PAGE_SIZE); \
106 \
107 kaddr = kmap_atomic(page, km_type); \
108 memset((char *)kaddr + (offset), 0, (size)); \
109 flush_dcache_page(page); \
110 kunmap_atomic(kaddr, (km_type)); \
111 } while (0)
112
113static inline void __deprecated memclear_highpage_flush(struct page *page,
114 unsigned int offset, unsigned int size)
99{ 115{
100 void *kaddr; 116 zero_user_page(page, offset, size, KM_USER0);
101
102 BUG_ON(offset + size > PAGE_SIZE);
103
104 kaddr = kmap_atomic(page, KM_USER0);
105 memset((char *)kaddr + offset, 0, size);
106 flush_dcache_page(page);
107 kunmap_atomic(kaddr, KM_USER0);
108} 117}
109 118
110#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 119#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 795102309bf..45170b2fa25 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -95,7 +95,7 @@ extern struct group_info init_groups;
95#define INIT_TASK(tsk) \ 95#define INIT_TASK(tsk) \
96{ \ 96{ \
97 .state = 0, \ 97 .state = 0, \
98 .thread_info = &init_thread_info, \ 98 .stack = &init_thread_info, \
99 .usage = ATOMIC_INIT(2), \ 99 .usage = ATOMIC_INIT(2), \
100 .flags = 0, \ 100 .flags = 0, \
101 .lock_depth = -1, \ 101 .lock_depth = -1, \
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 1c65e7a9f18..00dd957e245 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -30,4 +30,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu);
30int kthread_stop(struct task_struct *k); 30int kthread_stop(struct task_struct *k);
31int kthread_should_stop(void); 31int kthread_should_stop(void);
32 32
33int kthreadd(void *unused);
34extern struct task_struct *kthreadd_task;
35
33#endif /* _LINUX_KTHREAD_H */ 36#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 81bb9c7a4eb..c762954bda1 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -43,7 +43,7 @@
43 * plain scalar nanosecond based representation can be selected by the 43 * plain scalar nanosecond based representation can be selected by the
44 * config switch CONFIG_KTIME_SCALAR. 44 * config switch CONFIG_KTIME_SCALAR.
45 */ 45 */
46typedef union { 46union ktime {
47 s64 tv64; 47 s64 tv64;
48#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR) 48#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
49 struct { 49 struct {
@@ -54,7 +54,9 @@ typedef union {
54# endif 54# endif
55 } tv; 55 } tv;
56#endif 56#endif
57} ktime_t; 57};
58
59typedef union ktime ktime_t; /* Kill this */
58 60
59#define KTIME_MAX ((s64)~((u64)1 << 63)) 61#define KTIME_MAX ((s64)~((u64)1 << 63))
60#if (BITS_PER_LONG == 64) 62#if (BITS_PER_LONG == 64)
diff --git a/include/linux/mca.h b/include/linux/mca.h
index 5cff2923092..37972704617 100644
--- a/include/linux/mca.h
+++ b/include/linux/mca.h
@@ -94,6 +94,7 @@ struct mca_bus {
94struct mca_driver { 94struct mca_driver {
95 const short *id_table; 95 const short *id_table;
96 void *driver_data; 96 void *driver_data;
97 int integrated_id;
97 struct device_driver driver; 98 struct device_driver driver;
98}; 99};
99#define to_mca_driver(mdriver) container_of(mdriver, struct mca_driver, driver) 100#define to_mca_driver(mdriver) container_of(mdriver, struct mca_driver, driver)
@@ -125,6 +126,7 @@ extern enum MCA_AdapterStatus mca_device_status(struct mca_device *mca_dev);
125extern struct bus_type mca_bus_type; 126extern struct bus_type mca_bus_type;
126 127
127extern int mca_register_driver(struct mca_driver *drv); 128extern int mca_register_driver(struct mca_driver *drv);
129extern int mca_register_driver_integrated(struct mca_driver *, int);
128extern void mca_unregister_driver(struct mca_driver *drv); 130extern void mca_unregister_driver(struct mca_driver *drv);
129 131
130/* WARNING: only called by the boot time device setup */ 132/* WARNING: only called by the boot time device setup */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2f1544e8304..d09b1345a3a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -83,6 +83,9 @@ struct per_cpu_pages {
83 83
84struct per_cpu_pageset { 84struct per_cpu_pageset {
85 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 85 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
86#ifdef CONFIG_NUMA
87 s8 expire;
88#endif
86#ifdef CONFIG_SMP 89#ifdef CONFIG_SMP
87 s8 stat_threshold; 90 s8 stat_threshold;
88 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 91 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
diff --git a/include/linux/module.h b/include/linux/module.h
index 6d3dc9c4ff9..792d483c9af 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -356,6 +356,9 @@ struct module
356 keeping pointers to this stuff */ 356 keeping pointers to this stuff */
357 char *args; 357 char *args;
358}; 358};
359#ifndef MODULE_ARCH_INIT
360#define MODULE_ARCH_INIT {}
361#endif
359 362
360/* FIXME: It'd be nice to isolate modules during init, too, so they 363/* FIXME: It'd be nice to isolate modules during init, too, so they
361 aren't used before they (may) fail. But presently too much code 364 aren't used before they (may) fail. But presently too much code
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b81bc2adaef..0d50ea3df68 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -121,11 +121,12 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
121 * Also see Documentation/mutex-design.txt. 121 * Also see Documentation/mutex-design.txt.
122 */ 122 */
123extern void fastcall mutex_lock(struct mutex *lock); 123extern void fastcall mutex_lock(struct mutex *lock);
124extern int fastcall mutex_lock_interruptible(struct mutex *lock); 124extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
125 125
126#ifdef CONFIG_DEBUG_LOCK_ALLOC 126#ifdef CONFIG_DEBUG_LOCK_ALLOC
127extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 127extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
128extern int mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass); 128extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
129 unsigned int subclass);
129#else 130#else
130# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 131# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
131# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 132# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
diff --git a/include/linux/nfs4_acl.h b/include/linux/nfs4_acl.h
index 409b6e02f33..c9c05a78e9b 100644
--- a/include/linux/nfs4_acl.h
+++ b/include/linux/nfs4_acl.h
@@ -44,7 +44,6 @@
44#define NFS4_ACL_MAX 170 44#define NFS4_ACL_MAX 170
45 45
46struct nfs4_acl *nfs4_acl_new(int); 46struct nfs4_acl *nfs4_acl_new(int);
47void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
48int nfs4_acl_get_whotype(char *, u32); 47int nfs4_acl_get_whotype(char *, u32);
49int nfs4_acl_write_who(int who, char *p); 48int nfs4_acl_write_who(int who, char *p);
50int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group, 49int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 10a43ed0527..9431101bf87 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -112,32 +112,40 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
112 112
113#ifdef __KERNEL__ 113#ifdef __KERNEL__
114 114
115extern int atomic_notifier_chain_register(struct atomic_notifier_head *, 115extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
116 struct notifier_block *); 116 struct notifier_block *nb);
117extern int blocking_notifier_chain_register(struct blocking_notifier_head *, 117extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
118 struct notifier_block *); 118 struct notifier_block *nb);
119extern int raw_notifier_chain_register(struct raw_notifier_head *, 119extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
120 struct notifier_block *); 120 struct notifier_block *nb);
121extern int srcu_notifier_chain_register(struct srcu_notifier_head *, 121extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
122 struct notifier_block *); 122 struct notifier_block *nb);
123 123
124extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *, 124extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
125 struct notifier_block *); 125 struct notifier_block *nb);
126extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *, 126extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
127 struct notifier_block *); 127 struct notifier_block *nb);
128extern int raw_notifier_chain_unregister(struct raw_notifier_head *, 128extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
129 struct notifier_block *); 129 struct notifier_block *nb);
130extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *, 130extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
131 struct notifier_block *); 131 struct notifier_block *nb);
132 132
133extern int atomic_notifier_call_chain(struct atomic_notifier_head *, 133extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
134 unsigned long val, void *v); 134 unsigned long val, void *v);
135extern int blocking_notifier_call_chain(struct blocking_notifier_head *, 135extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
136 unsigned long val, void *v, int nr_to_call, int *nr_calls);
137extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
136 unsigned long val, void *v); 138 unsigned long val, void *v);
137extern int raw_notifier_call_chain(struct raw_notifier_head *, 139extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
140 unsigned long val, void *v, int nr_to_call, int *nr_calls);
141extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
138 unsigned long val, void *v); 142 unsigned long val, void *v);
139extern int srcu_notifier_call_chain(struct srcu_notifier_head *, 143extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
144 unsigned long val, void *v, int nr_to_call, int *nr_calls);
145extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
140 unsigned long val, void *v); 146 unsigned long val, void *v);
147extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
148 unsigned long val, void *v, int nr_to_call, int *nr_calls);
141 149
142#define NOTIFY_DONE 0x0000 /* Don't care */ 150#define NOTIFY_DONE 0x0000 /* Don't care */
143#define NOTIFY_OK 0x0001 /* Suits me */ 151#define NOTIFY_OK 0x0001 /* Suits me */
@@ -186,6 +194,20 @@ extern int srcu_notifier_call_chain(struct srcu_notifier_head *,
186#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ 194#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
187#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 195#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
188#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 196#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
197#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */
198#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
199
200/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
201 * operation in progress
202 */
203#define CPU_TASKS_FROZEN 0x0010
204
205#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
206#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
207#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
208#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
209#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
210#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
189 211
190#endif /* __KERNEL__ */ 212#endif /* __KERNEL__ */
191#endif /* _LINUX_NOTIFIER_H */ 213#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 6e8fa3049e5..87545e0f0b5 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -107,26 +107,11 @@ typedef int __bitwise suspend_state_t;
107#define PM_SUSPEND_ON ((__force suspend_state_t) 0) 107#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
108#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1) 108#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
109#define PM_SUSPEND_MEM ((__force suspend_state_t) 3) 109#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
110#define PM_SUSPEND_DISK ((__force suspend_state_t) 4) 110#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
111#define PM_SUSPEND_MAX ((__force suspend_state_t) 5)
112
113typedef int __bitwise suspend_disk_method_t;
114
115/* invalid must be 0 so struct pm_ops initialisers can leave it out */
116#define PM_DISK_INVALID ((__force suspend_disk_method_t) 0)
117#define PM_DISK_PLATFORM ((__force suspend_disk_method_t) 1)
118#define PM_DISK_SHUTDOWN ((__force suspend_disk_method_t) 2)
119#define PM_DISK_REBOOT ((__force suspend_disk_method_t) 3)
120#define PM_DISK_TEST ((__force suspend_disk_method_t) 4)
121#define PM_DISK_TESTPROC ((__force suspend_disk_method_t) 5)
122#define PM_DISK_MAX ((__force suspend_disk_method_t) 6)
123 111
124/** 112/**
125 * struct pm_ops - Callbacks for managing platform dependent suspend states. 113 * struct pm_ops - Callbacks for managing platform dependent suspend states.
126 * @valid: Callback to determine whether the given state can be entered. 114 * @valid: Callback to determine whether the given state can be entered.
127 * If %CONFIG_SOFTWARE_SUSPEND is set then %PM_SUSPEND_DISK is
128 * always valid and never passed to this call. If not assigned,
129 * no suspend states are valid.
130 * Valid states are advertised in /sys/power/state but can still 115 * Valid states are advertised in /sys/power/state but can still
131 * be rejected by prepare or enter if the conditions aren't right. 116 * be rejected by prepare or enter if the conditions aren't right.
132 * There is a %pm_valid_only_mem function available that can be assigned 117 * There is a %pm_valid_only_mem function available that can be assigned
@@ -140,24 +125,12 @@ typedef int __bitwise suspend_disk_method_t;
140 * 125 *
141 * @finish: Called when the system has left the given state and all devices 126 * @finish: Called when the system has left the given state and all devices
142 * are resumed. The return value is ignored. 127 * are resumed. The return value is ignored.
143 *
144 * @pm_disk_mode: The generic code always allows one of the shutdown methods
145 * %PM_DISK_SHUTDOWN, %PM_DISK_REBOOT, %PM_DISK_TEST and
146 * %PM_DISK_TESTPROC. If this variable is set, the mode it is set
147 * to is allowed in addition to those modes and is also made default.
148 * When this mode is sent selected, the @prepare call will be called
149 * before suspending to disk (if present), the @enter call should be
150 * present and will be called after all state has been saved and the
151 * machine is ready to be powered off; the @finish callback is called
152 * after state has been restored. All these calls are called with
153 * %PM_SUSPEND_DISK as the state.
154 */ 128 */
155struct pm_ops { 129struct pm_ops {
156 int (*valid)(suspend_state_t state); 130 int (*valid)(suspend_state_t state);
157 int (*prepare)(suspend_state_t state); 131 int (*prepare)(suspend_state_t state);
158 int (*enter)(suspend_state_t state); 132 int (*enter)(suspend_state_t state);
159 int (*finish)(suspend_state_t state); 133 int (*finish)(suspend_state_t state);
160 suspend_disk_method_t pm_disk_mode;
161}; 134};
162 135
163/** 136/**
@@ -276,8 +249,6 @@ extern void device_power_up(void);
276extern void device_resume(void); 249extern void device_resume(void);
277 250
278#ifdef CONFIG_PM 251#ifdef CONFIG_PM
279extern suspend_disk_method_t pm_disk_mode;
280
281extern int device_suspend(pm_message_t state); 252extern int device_suspend(pm_message_t state);
282extern int device_prepare_suspend(pm_message_t state); 253extern int device_prepare_suspend(pm_message_t state);
283 254
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index de72c49747c..a121f36f443 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -201,7 +201,6 @@ struct mddev_s
201 struct mutex reconfig_mutex; 201 struct mutex reconfig_mutex;
202 atomic_t active; 202 atomic_t active;
203 203
204 int changed; /* true if we might need to reread partition info */
205 int degraded; /* whether md should consider 204 int degraded; /* whether md should consider
206 * adding a spare 205 * adding a spare
207 */ 206 */
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 759a0f97bec..6cd8c4425fc 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/timer.h>
15#include <linux/wait.h> 16#include <linux/wait.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/fs.h> 18#include <linux/fs.h>
@@ -38,7 +39,7 @@ struct rchan_buf
38 size_t subbufs_consumed; /* count of sub-buffers consumed */ 39 size_t subbufs_consumed; /* count of sub-buffers consumed */
39 struct rchan *chan; /* associated channel */ 40 struct rchan *chan; /* associated channel */
40 wait_queue_head_t read_wait; /* reader wait queue */ 41 wait_queue_head_t read_wait; /* reader wait queue */
41 struct delayed_work wake_readers; /* reader wake-up work struct */ 42 struct timer_list timer; /* reader wake-up timer */
42 struct dentry *dentry; /* channel file dentry */ 43 struct dentry *dentry; /* channel file dentry */
43 struct kref kref; /* channel buffer refcount */ 44 struct kref kref; /* channel buffer refcount */
44 struct page **page_array; /* array of current buffer pages */ 45 struct page **page_array; /* array of current buffer pages */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d95c480f58..17b72d88c4c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -817,7 +817,7 @@ struct prio_array;
817 817
818struct task_struct { 818struct task_struct {
819 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 819 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
820 struct thread_info *thread_info; 820 void *stack;
821 atomic_t usage; 821 atomic_t usage;
822 unsigned int flags; /* per process flags, defined below */ 822 unsigned int flags; /* per process flags, defined below */
823 unsigned int ptrace; 823 unsigned int ptrace;
@@ -1317,6 +1317,7 @@ extern int in_egroup_p(gid_t);
1317 1317
1318extern void proc_caches_init(void); 1318extern void proc_caches_init(void);
1319extern void flush_signals(struct task_struct *); 1319extern void flush_signals(struct task_struct *);
1320extern void ignore_signals(struct task_struct *);
1320extern void flush_signal_handlers(struct task_struct *, int force_default); 1321extern void flush_signal_handlers(struct task_struct *, int force_default);
1321extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 1322extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
1322 1323
@@ -1512,8 +1513,8 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
1512 1513
1513#ifndef __HAVE_THREAD_FUNCTIONS 1514#ifndef __HAVE_THREAD_FUNCTIONS
1514 1515
1515#define task_thread_info(task) (task)->thread_info 1516#define task_thread_info(task) ((struct thread_info *)(task)->stack)
1516#define task_stack_page(task) ((void*)((task)->thread_info)) 1517#define task_stack_page(task) ((task)->stack)
1517 1518
1518static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 1519static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1519{ 1520{
@@ -1523,7 +1524,7 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
1523 1524
1524static inline unsigned long *end_of_stack(struct task_struct *p) 1525static inline unsigned long *end_of_stack(struct task_struct *p)
1525{ 1526{
1526 return (unsigned long *)(p->thread_info + 1); 1527 return (unsigned long *)(task_thread_info(p) + 1);
1527} 1528}
1528 1529
1529#endif 1530#endif
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 14749056dd6..3fa0fab4a04 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -243,6 +243,131 @@ extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
243 243
244extern struct kmem_cache *sighand_cachep; 244extern struct kmem_cache *sighand_cachep;
245 245
246/*
247 * In POSIX a signal is sent either to a specific thread (Linux task)
248 * or to the process as a whole (Linux thread group). How the signal
249 * is sent determines whether it's to one thread or the whole group,
250 * which determines which signal mask(s) are involved in blocking it
251 * from being delivered until later. When the signal is delivered,
252 * either it's caught or ignored by a user handler or it has a default
253 * effect that applies to the whole thread group (POSIX process).
254 *
255 * The possible effects an unblocked signal set to SIG_DFL can have are:
256 * ignore - Nothing Happens
257 * terminate - kill the process, i.e. all threads in the group,
258 * similar to exit_group. The group leader (only) reports
259 * WIFSIGNALED status to its parent.
260 * coredump - write a core dump file describing all threads using
261 * the same mm and then kill all those threads
262 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
263 *
264 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
265 * Other signals when not blocked and set to SIG_DFL behaves as follows.
266 * The job control signals also have other special effects.
267 *
268 * +--------------------+------------------+
269 * | POSIX signal | default action |
270 * +--------------------+------------------+
271 * | SIGHUP | terminate |
272 * | SIGINT | terminate |
273 * | SIGQUIT | coredump |
274 * | SIGILL | coredump |
275 * | SIGTRAP | coredump |
276 * | SIGABRT/SIGIOT | coredump |
277 * | SIGBUS | coredump |
278 * | SIGFPE | coredump |
279 * | SIGKILL | terminate(+) |
280 * | SIGUSR1 | terminate |
281 * | SIGSEGV | coredump |
282 * | SIGUSR2 | terminate |
283 * | SIGPIPE | terminate |
284 * | SIGALRM | terminate |
285 * | SIGTERM | terminate |
286 * | SIGCHLD | ignore |
287 * | SIGCONT | ignore(*) |
288 * | SIGSTOP | stop(*)(+) |
289 * | SIGTSTP | stop(*) |
290 * | SIGTTIN | stop(*) |
291 * | SIGTTOU | stop(*) |
292 * | SIGURG | ignore |
293 * | SIGXCPU | coredump |
294 * | SIGXFSZ | coredump |
295 * | SIGVTALRM | terminate |
296 * | SIGPROF | terminate |
297 * | SIGPOLL/SIGIO | terminate |
298 * | SIGSYS/SIGUNUSED | coredump |
299 * | SIGSTKFLT | terminate |
300 * | SIGWINCH | ignore |
301 * | SIGPWR | terminate |
302 * | SIGRTMIN-SIGRTMAX | terminate |
303 * +--------------------+------------------+
304 * | non-POSIX signal | default action |
305 * +--------------------+------------------+
306 * | SIGEMT | coredump |
307 * +--------------------+------------------+
308 *
309 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
310 * (*) Special job control effects:
311 * When SIGCONT is sent, it resumes the process (all threads in the group)
312 * from TASK_STOPPED state and also clears any pending/queued stop signals
313 * (any of those marked with "stop(*)"). This happens regardless of blocking,
314 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
315 * any pending/queued SIGCONT signals; this happens regardless of blocking,
316 * catching, or ignored the stop signal, though (except for SIGSTOP) the
317 * default action of stopping the process may happen later or never.
318 */
319
320#ifdef SIGEMT
321#define SIGEMT_MASK rt_sigmask(SIGEMT)
322#else
323#define SIGEMT_MASK 0
324#endif
325
326#if SIGRTMIN > BITS_PER_LONG
327#define rt_sigmask(sig) (1ULL << ((sig)-1))
328#else
329#define rt_sigmask(sig) sigmask(sig)
330#endif
331#define siginmask(sig, mask) (rt_sigmask(sig) & (mask))
332
333#define SIG_KERNEL_ONLY_MASK (\
334 rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
335
336#define SIG_KERNEL_STOP_MASK (\
337 rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \
338 rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) )
339
340#define SIG_KERNEL_COREDUMP_MASK (\
341 rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \
342 rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \
343 rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \
344 rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \
345 rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \
346 SIGEMT_MASK )
347
348#define SIG_KERNEL_IGNORE_MASK (\
349 rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \
350 rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) )
351
352#define sig_kernel_only(sig) \
353 (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK))
354#define sig_kernel_coredump(sig) \
355 (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK))
356#define sig_kernel_ignore(sig) \
357 (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK))
358#define sig_kernel_stop(sig) \
359 (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK))
360
361#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
362
363#define sig_user_defined(t, signr) \
364 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
365 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
366
367#define sig_fatal(t, signr) \
368 (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
369 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
370
246#endif /* __KERNEL__ */ 371#endif /* __KERNEL__ */
247 372
248#endif /* _LINUX_SIGNAL_H */ 373#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7ba23ec8211..3f70149eabb 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -83,7 +83,6 @@ void smp_prepare_boot_cpu(void);
83 * These macros fold the SMP functionality into a single CPU system 83 * These macros fold the SMP functionality into a single CPU system
84 */ 84 */
85#define raw_smp_processor_id() 0 85#define raw_smp_processor_id() 0
86#define hard_smp_processor_id() 0
87static inline int up_smp_call_function(void) 86static inline int up_smp_call_function(void)
88{ 87{
89 return 0; 88 return 0;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 35fa4d5aadd..4a7ae8ab6eb 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -396,4 +396,23 @@ char * svc_print_addr(struct svc_rqst *, char *, size_t);
396 396
397#define RPC_MAX_ADDRBUFLEN (63U) 397#define RPC_MAX_ADDRBUFLEN (63U)
398 398
399/*
400 * When we want to reduce the size of the reserved space in the response
401 * buffer, we need to take into account the size of any checksum data that
402 * may be at the end of the packet. This is difficult to determine exactly
403 * for all cases without actually generating the checksum, so we just use a
404 * static value.
405 */
406static inline void
407svc_reserve_auth(struct svc_rqst *rqstp, int space)
408{
409 int added_space = 0;
410
411 switch(rqstp->rq_authop->flavour) {
412 case RPC_AUTH_GSS:
413 added_space = RPC_MAX_AUTH_SIZE;
414 }
415 return svc_reserve(rqstp, space + added_space);
416}
417
399#endif /* SUNRPC_SVC_H */ 418#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 7909687557b..e21dd93ac4b 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -37,7 +37,8 @@ struct svc_sock {
37 37
38 atomic_t sk_reserved; /* space on outq that is reserved */ 38 atomic_t sk_reserved; /* space on outq that is reserved */
39 39
40 spinlock_t sk_defer_lock; /* protects sk_deferred */ 40 spinlock_t sk_lock; /* protects sk_deferred and
41 * sk_info_authunix */
41 struct list_head sk_deferred; /* deferred requests that need to 42 struct list_head sk_deferred; /* deferred requests that need to
42 * be revisted */ 43 * be revisted */
43 struct mutex sk_mutex; /* to serialize sending data */ 44 struct mutex sk_mutex; /* to serialize sending data */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 9d2aa1a12aa..d74da9122b6 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -32,6 +32,24 @@ static inline int pm_prepare_console(void) { return 0; }
32static inline void pm_restore_console(void) {} 32static inline void pm_restore_console(void) {}
33#endif 33#endif
34 34
35/**
36 * struct hibernation_ops - hibernation platform support
37 *
38 * The methods in this structure allow a platform to override the default
39 * mechanism of shutting down the machine during a hibernation transition.
40 *
41 * All three methods must be assigned.
42 *
43 * @prepare: prepare system for hibernation
44 * @enter: shut down system after state has been saved to disk
45 * @finish: finish/clean up after state has been reloaded
46 */
47struct hibernation_ops {
48 int (*prepare)(void);
49 int (*enter)(void);
50 void (*finish)(void);
51};
52
35#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) 53#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
36/* kernel/power/snapshot.c */ 54/* kernel/power/snapshot.c */
37extern void __init register_nosave_region(unsigned long, unsigned long); 55extern void __init register_nosave_region(unsigned long, unsigned long);
@@ -39,11 +57,17 @@ extern int swsusp_page_is_forbidden(struct page *);
39extern void swsusp_set_page_free(struct page *); 57extern void swsusp_set_page_free(struct page *);
40extern void swsusp_unset_page_free(struct page *); 58extern void swsusp_unset_page_free(struct page *);
41extern unsigned long get_safe_page(gfp_t gfp_mask); 59extern unsigned long get_safe_page(gfp_t gfp_mask);
60
61extern void hibernation_set_ops(struct hibernation_ops *ops);
62extern int hibernate(void);
42#else 63#else
43static inline void register_nosave_region(unsigned long b, unsigned long e) {} 64static inline void register_nosave_region(unsigned long b, unsigned long e) {}
44static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 65static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
45static inline void swsusp_set_page_free(struct page *p) {} 66static inline void swsusp_set_page_free(struct page *p) {}
46static inline void swsusp_unset_page_free(struct page *p) {} 67static inline void swsusp_unset_page_free(struct page *p) {}
68
69static inline void hibernation_set_ops(struct hibernation_ops *ops) {}
70static inline int hibernate(void) { return -ENOSYS; }
47#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */ 71#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */
48 72
49void save_processor_state(void); 73void save_processor_state(void);
diff --git a/include/linux/svga.h b/include/linux/svga.h
index e1cc552e04f..13ad0b82ac2 100644
--- a/include/linux/svga.h
+++ b/include/linux/svga.h
@@ -113,6 +113,8 @@ void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect);
113void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit); 113void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit);
114void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor); 114void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor);
115int svga_get_tilemax(struct fb_info *info); 115int svga_get_tilemax(struct fb_info *info);
116void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
117 struct fb_var_screeninfo *var);
116 118
117int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node); 119int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node);
118int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node); 120int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 1912c6cbef5..3139f441229 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -576,6 +576,8 @@ asmlinkage long sys_fstatat64(int dfd, char __user *filename,
576 struct stat64 __user *statbuf, int flag); 576 struct stat64 __user *statbuf, int flag);
577asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, 577asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
578 int bufsiz); 578 int bufsiz);
579asmlinkage long sys_utimensat(int dfd, char __user *filename,
580 struct timespec __user *utimes, int flags);
579asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename, 581asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename,
580 struct compat_timeval __user *t); 582 struct compat_timeval __user *t);
581asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename, 583asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index acb1f105870..d9325cf8a13 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -212,8 +212,6 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
212extern void __dec_zone_state(struct zone *, enum zone_stat_item); 212extern void __dec_zone_state(struct zone *, enum zone_stat_item);
213 213
214void refresh_cpu_vm_stats(int); 214void refresh_cpu_vm_stats(int);
215void refresh_vm_stats(void);
216
217#else /* CONFIG_SMP */ 215#else /* CONFIG_SMP */
218 216
219/* 217/*
@@ -260,7 +258,6 @@ static inline void __dec_zone_page_state(struct page *page,
260#define mod_zone_page_state __mod_zone_page_state 258#define mod_zone_page_state __mod_zone_page_state
261 259
262static inline void refresh_cpu_vm_stats(int cpu) { } 260static inline void refresh_cpu_vm_stats(int cpu) { }
263static inline void refresh_vm_stats(void) { }
264#endif 261#endif
265 262
266#endif /* _LINUX_VMSTAT_H */ 263#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f16ba1e0687..d555f31c074 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -24,15 +24,13 @@ typedef void (*work_func_t)(struct work_struct *work);
24struct work_struct { 24struct work_struct {
25 atomic_long_t data; 25 atomic_long_t data;
26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
27#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
28#define WORK_STRUCT_FLAG_MASK (3UL) 27#define WORK_STRUCT_FLAG_MASK (3UL)
29#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 28#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
30 struct list_head entry; 29 struct list_head entry;
31 work_func_t func; 30 work_func_t func;
32}; 31};
33 32
34#define WORK_DATA_INIT(autorelease) \ 33#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
35 ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
36 34
37struct delayed_work { 35struct delayed_work {
38 struct work_struct work; 36 struct work_struct work;
@@ -44,14 +42,8 @@ struct execute_work {
44}; 42};
45 43
46#define __WORK_INITIALIZER(n, f) { \ 44#define __WORK_INITIALIZER(n, f) { \
47 .data = WORK_DATA_INIT(0), \ 45 .data = WORK_DATA_INIT(), \
48 .entry = { &(n).entry, &(n).entry }, \ 46 .entry = { &(n).entry, &(n).entry }, \
49 .func = (f), \
50 }
51
52#define __WORK_INITIALIZER_NAR(n, f) { \
53 .data = WORK_DATA_INIT(1), \
54 .entry = { &(n).entry, &(n).entry }, \
55 .func = (f), \ 47 .func = (f), \
56 } 48 }
57 49
@@ -60,23 +52,12 @@ struct execute_work {
60 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 52 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
61 } 53 }
62 54
63#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
64 .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
65 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
66 }
67
68#define DECLARE_WORK(n, f) \ 55#define DECLARE_WORK(n, f) \
69 struct work_struct n = __WORK_INITIALIZER(n, f) 56 struct work_struct n = __WORK_INITIALIZER(n, f)
70 57
71#define DECLARE_WORK_NAR(n, f) \
72 struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
73
74#define DECLARE_DELAYED_WORK(n, f) \ 58#define DECLARE_DELAYED_WORK(n, f) \
75 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 59 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
76 60
77#define DECLARE_DELAYED_WORK_NAR(n, f) \
78 struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
79
80/* 61/*
81 * initialize a work item's function pointer 62 * initialize a work item's function pointer
82 */ 63 */
@@ -95,16 +76,9 @@ struct execute_work {
95 * assignment of the work data initializer allows the compiler 76 * assignment of the work data initializer allows the compiler
96 * to generate better code. 77 * to generate better code.
97 */ 78 */
98#define INIT_WORK(_work, _func) \ 79#define INIT_WORK(_work, _func) \
99 do { \
100 (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
101 INIT_LIST_HEAD(&(_work)->entry); \
102 PREPARE_WORK((_work), (_func)); \
103 } while (0)
104
105#define INIT_WORK_NAR(_work, _func) \
106 do { \ 80 do { \
107 (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \ 81 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
108 INIT_LIST_HEAD(&(_work)->entry); \ 82 INIT_LIST_HEAD(&(_work)->entry); \
109 PREPARE_WORK((_work), (_func)); \ 83 PREPARE_WORK((_work), (_func)); \
110 } while (0) 84 } while (0)
@@ -115,12 +89,6 @@ struct execute_work {
115 init_timer(&(_work)->timer); \ 89 init_timer(&(_work)->timer); \
116 } while (0) 90 } while (0)
117 91
118#define INIT_DELAYED_WORK_NAR(_work, _func) \
119 do { \
120 INIT_WORK_NAR(&(_work)->work, (_func)); \
121 init_timer(&(_work)->timer); \
122 } while (0)
123
124#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ 92#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
125 do { \ 93 do { \
126 INIT_WORK(&(_work)->work, (_func)); \ 94 INIT_WORK(&(_work)->work, (_func)); \
@@ -143,24 +111,10 @@ struct execute_work {
143 work_pending(&(w)->work) 111 work_pending(&(w)->work)
144 112
145/** 113/**
146 * work_release - Release a work item under execution 114 * work_clear_pending - for internal use only, mark a work item as not pending
147 * @work: The work item to release 115 * @work: The work item in question
148 *
149 * This is used to release a work item that has been initialised with automatic
150 * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
151 * function the opportunity to grab auxiliary data from the container of the
152 * work_struct before clearing the pending bit as the work_struct may be
153 * subject to deallocation the moment the pending bit is cleared.
154 *
155 * In such a case, this should be called in the work function after it has
156 * fetched any data it may require from the containter of the work_struct.
157 * After this function has been called, the work_struct may be scheduled for
158 * further execution or it may be deallocated unless other precautions are
159 * taken.
160 *
161 * This should also be used to release a delayed work item.
162 */ 116 */
163#define work_release(work) \ 117#define work_clear_pending(work) \
164 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) 118 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
165 119
166 120
@@ -174,27 +128,28 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
174extern void destroy_workqueue(struct workqueue_struct *wq); 128extern void destroy_workqueue(struct workqueue_struct *wq);
175 129
176extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 130extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
177extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); 131extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
132 struct delayed_work *work, unsigned long delay));
178extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 133extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
179 struct delayed_work *work, unsigned long delay); 134 struct delayed_work *work, unsigned long delay);
135
180extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 136extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
137extern void flush_scheduled_work(void);
181 138
182extern int FASTCALL(schedule_work(struct work_struct *work)); 139extern int FASTCALL(schedule_work(struct work_struct *work));
183extern int FASTCALL(run_scheduled_work(struct work_struct *work)); 140extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
184extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 141 unsigned long delay));
185 142extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
186extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 143 unsigned long delay);
187extern int schedule_on_each_cpu(work_func_t func); 144extern int schedule_on_each_cpu(work_func_t func);
188extern void flush_scheduled_work(void);
189extern int current_is_keventd(void); 145extern int current_is_keventd(void);
190extern int keventd_up(void); 146extern int keventd_up(void);
191 147
192extern void init_workqueues(void); 148extern void init_workqueues(void);
193void cancel_rearming_delayed_work(struct delayed_work *work);
194void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
195 struct delayed_work *);
196int execute_in_process_context(work_func_t fn, struct execute_work *); 149int execute_in_process_context(work_func_t fn, struct execute_work *);
197 150
151extern void cancel_work_sync(struct work_struct *work);
152
198/* 153/*
199 * Kill off a pending schedule_delayed_work(). Note that the work callback 154 * Kill off a pending schedule_delayed_work(). Note that the work callback
200 * function may still be running on return from cancel_delayed_work(), unless 155 * function may still be running on return from cancel_delayed_work(), unless
@@ -207,8 +162,18 @@ static inline int cancel_delayed_work(struct delayed_work *work)
207 162
208 ret = del_timer(&work->timer); 163 ret = del_timer(&work->timer);
209 if (ret) 164 if (ret)
210 work_release(&work->work); 165 work_clear_pending(&work->work);
211 return ret; 166 return ret;
212} 167}
213 168
169extern void cancel_rearming_delayed_work(struct delayed_work *work);
170
171/* Obsolete. use cancel_rearming_delayed_work() */
172static inline
173void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
174 struct delayed_work *work)
175{
176 cancel_rearming_delayed_work(work);
177}
178
214#endif 179#endif