aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/mmu_context.h6
-rw-r--r--include/asm-alpha/processor.h13
-rw-r--r--include/asm-alpha/ptrace.h6
-rw-r--r--include/asm-alpha/system.h18
-rw-r--r--include/asm-alpha/thread_info.h2
-rw-r--r--include/asm-arm/processor.h8
-rw-r--r--include/asm-arm/system.h12
-rw-r--r--include/asm-arm/thread_info.h7
-rw-r--r--include/asm-arm26/system.h12
-rw-r--r--include/asm-arm26/thread_info.h9
-rw-r--r--include/asm-cris/arch-v10/processor.h2
-rw-r--r--include/asm-cris/arch-v32/processor.h2
-rw-r--r--include/asm-cris/processor.h3
-rw-r--r--include/asm-cris/thread_info.h2
-rw-r--r--include/asm-frv/thread_info.h2
-rw-r--r--include/asm-h8300/thread_info.h2
-rw-r--r--include/asm-i386/i387.h8
-rw-r--r--include/asm-i386/processor.h12
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-i386/topology.h1
-rw-r--r--include/asm-ia64/compat.h2
-rw-r--r--include/asm-ia64/processor.h2
-rw-r--r--include/asm-ia64/ptrace.h4
-rw-r--r--include/asm-ia64/system.h9
-rw-r--r--include/asm-ia64/thread_info.h9
-rw-r--r--include/asm-ia64/topology.h2
-rw-r--r--include/asm-m32r/ptrace.h3
-rw-r--r--include/asm-m32r/system.h10
-rw-r--r--include/asm-m32r/thread_info.h2
-rw-r--r--include/asm-m68k/amigahw.h12
-rw-r--r--include/asm-m68k/amigaints.h2
-rw-r--r--include/asm-m68k/checksum.h2
-rw-r--r--include/asm-m68k/dsp56k.h2
-rw-r--r--include/asm-m68k/floppy.h2
-rw-r--r--include/asm-m68k/hardirq.h9
-rw-r--r--include/asm-m68k/io.h49
-rw-r--r--include/asm-m68k/irq.h9
-rw-r--r--include/asm-m68k/machdep.h1
-rw-r--r--include/asm-m68k/raw_io.h40
-rw-r--r--include/asm-m68k/signal.h2
-rw-r--r--include/asm-m68k/sun3_pgtable.h2
-rw-r--r--include/asm-m68k/sun3ints.h1
-rw-r--r--include/asm-m68k/sun3xflop.h4
-rw-r--r--include/asm-m68k/thread_info.h1
-rw-r--r--include/asm-m68k/uaccess.h20
-rw-r--r--include/asm-m68k/zorro.h8
-rw-r--r--include/asm-m68knommu/machdep.h1
-rw-r--r--include/asm-m68knommu/thread_info.h2
-rw-r--r--include/asm-mips/mach-ip27/topology.h1
-rw-r--r--include/asm-mips/processor.h10
-rw-r--r--include/asm-mips/system.h12
-rw-r--r--include/asm-mips/thread_info.h2
-rw-r--r--include/asm-parisc/system.h9
-rw-r--r--include/asm-parisc/thread_info.h3
-rw-r--r--include/asm-powerpc/system.h10
-rw-r--r--include/asm-powerpc/thread_info.h3
-rw-r--r--include/asm-powerpc/topology.h1
-rw-r--r--include/asm-ppc/system.h10
-rw-r--r--include/asm-s390/elf.h2
-rw-r--r--include/asm-s390/processor.h8
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-s390/thread_info.h2
-rw-r--r--include/asm-sh/ptrace.h10
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sh64/thread_info.h2
-rw-r--r--include/asm-sparc/system.h12
-rw-r--r--include/asm-sparc/thread_info.h3
-rw-r--r--include/asm-sparc64/elf.h2
-rw-r--r--include/asm-sparc64/mmu_context.h2
-rw-r--r--include/asm-sparc64/processor.h5
-rw-r--r--include/asm-sparc64/system.h14
-rw-r--r--include/asm-um/thread_info.h3
-rw-r--r--include/asm-v850/processor.h8
-rw-r--r--include/asm-v850/thread_info.h2
-rw-r--r--include/asm-x86_64/compat.h2
-rw-r--r--include/asm-x86_64/i387.h10
-rw-r--r--include/asm-x86_64/processor.h4
-rw-r--r--include/asm-x86_64/system.h9
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-x86_64/topology.h1
-rw-r--r--include/asm-xtensa/processor.h6
-rw-r--r--include/asm-xtensa/ptrace.h4
-rw-r--r--include/asm-xtensa/thread_info.h2
-rw-r--r--include/linux/sched.h15
-rw-r--r--include/linux/topology.h2
87 files changed, 343 insertions, 222 deletions
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index a714d0cdc204..6f92482cc96c 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -156,7 +156,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
156 /* Always update the PCB ASN. Another thread may have allocated 156 /* Always update the PCB ASN. Another thread may have allocated
157 a new mm->context (via flush_tlb_mm) without the ASN serial 157 a new mm->context (via flush_tlb_mm) without the ASN serial
158 number wrapping. We have no way to detect when this is needed. */ 158 number wrapping. We have no way to detect when this is needed. */
159 next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK; 159 task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
160} 160}
161 161
162__EXTERN_INLINE void 162__EXTERN_INLINE void
@@ -235,7 +235,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
235 if (cpu_online(i)) 235 if (cpu_online(i))
236 mm->context[i] = 0; 236 mm->context[i] = 0;
237 if (tsk != current) 237 if (tsk != current)
238 tsk->thread_info->pcb.ptbr 238 task_thread_info(tsk)->pcb.ptbr
239 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 239 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
240 return 0; 240 return 0;
241} 241}
@@ -249,7 +249,7 @@ destroy_context(struct mm_struct *mm)
249static inline void 249static inline void
250enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 250enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
251{ 251{
252 tsk->thread_info->pcb.ptbr 252 task_thread_info(tsk)->pcb.ptbr
253 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 253 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
254} 254}
255 255
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index bb1a7a3abb8b..425b7b6d28cb 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -52,19 +52,10 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
52 52
53unsigned long get_wchan(struct task_struct *p); 53unsigned long get_wchan(struct task_struct *p);
54 54
55/* See arch/alpha/kernel/ptrace.c for details. */ 55#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
56#define PT_REG(reg) \
57 (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
58
59#define SW_REG(reg) \
60 (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
61 + offsetof(struct switch_stack, reg))
62
63#define KSTK_EIP(tsk) \
64 (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
65 56
66#define KSTK_ESP(tsk) \ 57#define KSTK_ESP(tsk) \
67 ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp) 58 ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
68 59
69#define cpu_relax() barrier() 60#define cpu_relax() barrier()
70 61
diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h
index 072375c135b4..9933b8b3612e 100644
--- a/include/asm-alpha/ptrace.h
+++ b/include/asm-alpha/ptrace.h
@@ -75,10 +75,10 @@ struct switch_stack {
75#define profile_pc(regs) instruction_pointer(regs) 75#define profile_pc(regs) instruction_pointer(regs)
76extern void show_regs(struct pt_regs *); 76extern void show_regs(struct pt_regs *);
77 77
78#define alpha_task_regs(task) \ 78#define task_pt_regs(task) \
79 ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1) 79 ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
80 80
81#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0) 81#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
82 82
83#endif 83#endif
84 84
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 050e86d12891..cc9c7e8cced5 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -131,15 +131,25 @@ struct el_common_EV6_mcheck {
131extern void halt(void) __attribute__((noreturn)); 131extern void halt(void) __attribute__((noreturn));
132#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) 132#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
133 133
134#define switch_to(P,N,L) \ 134#define switch_to(P,N,L) \
135 do { \ 135 do { \
136 (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \ 136 (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
137 check_mmu_context(); \ 137 check_mmu_context(); \
138 } while (0) 138 } while (0)
139 139
140struct task_struct; 140struct task_struct;
141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
142 142
143/*
144 * On SMP systems, when the scheduler does migration-cost autodetection,
145 * it needs a way to flush as much of the CPU's caches as possible.
146 *
147 * TODO: fill this in!
148 */
149static inline void sched_cacheflush(void)
150{
151}
152
143#define imb() \ 153#define imb() \
144__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 154__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
145 155
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
index d51491ed00b8..69ffd93f8e22 100644
--- a/include/asm-alpha/thread_info.h
+++ b/include/asm-alpha/thread_info.h
@@ -54,8 +54,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
54#define alloc_thread_info(tsk) \ 54#define alloc_thread_info(tsk) \
55 ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 55 ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
56#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 56#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
57#define get_thread_info(ti) get_task_struct((ti)->task)
58#define put_thread_info(ti) put_task_struct((ti)->task)
59 57
60#endif /* __ASSEMBLY__ */ 58#endif /* __ASSEMBLY__ */
61 59
diff --git a/include/asm-arm/processor.h b/include/asm-arm/processor.h
index 7d4118e09054..31290694648b 100644
--- a/include/asm-arm/processor.h
+++ b/include/asm-arm/processor.h
@@ -85,9 +85,11 @@ unsigned long get_wchan(struct task_struct *p);
85 */ 85 */
86extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 86extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
87 87
88#define KSTK_REGS(tsk) (((struct pt_regs *)(THREAD_START_SP + (unsigned long)(tsk)->thread_info)) - 1) 88#define task_pt_regs(p) \
89#define KSTK_EIP(tsk) KSTK_REGS(tsk)->ARM_pc 89 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
90#define KSTK_ESP(tsk) KSTK_REGS(tsk)->ARM_sp 90
91#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
92#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
91 93
92/* 94/*
93 * Prefetching support - only ARMv5. 95 * Prefetching support - only ARMv5.
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 5621d61ebc07..eb2de8c10515 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -168,10 +168,20 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
168 168
169#define switch_to(prev,next,last) \ 169#define switch_to(prev,next,last) \
170do { \ 170do { \
171 last = __switch_to(prev,prev->thread_info,next->thread_info); \ 171 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
172} while (0) 172} while (0)
173 173
174/* 174/*
175 * On SMP systems, when the scheduler does migration-cost autodetection,
176 * it needs a way to flush as much of the CPU's caches as possible.
177 *
178 * TODO: fill this in!
179 */
180static inline void sched_cacheflush(void)
181{
182}
183
184/*
175 * CPU interrupt mask handling. 185 * CPU interrupt mask handling.
176 */ 186 */
177#if __LINUX_ARM_ARCH__ >= 6 187#if __LINUX_ARM_ARCH__ >= 6
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h
index 7c98557b717f..33a33cbb6329 100644
--- a/include/asm-arm/thread_info.h
+++ b/include/asm-arm/thread_info.h
@@ -96,13 +96,10 @@ static inline struct thread_info *current_thread_info(void)
96extern struct thread_info *alloc_thread_info(struct task_struct *task); 96extern struct thread_info *alloc_thread_info(struct task_struct *task);
97extern void free_thread_info(struct thread_info *); 97extern void free_thread_info(struct thread_info *);
98 98
99#define get_thread_info(ti) get_task_struct((ti)->task)
100#define put_thread_info(ti) put_task_struct((ti)->task)
101
102#define thread_saved_pc(tsk) \ 99#define thread_saved_pc(tsk) \
103 ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc))) 100 ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
104#define thread_saved_fp(tsk) \ 101#define thread_saved_fp(tsk) \
105 ((unsigned long)((tsk)->thread_info->cpu_context.fp)) 102 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
106 103
107extern void iwmmxt_task_disable(struct thread_info *); 104extern void iwmmxt_task_disable(struct thread_info *);
108extern void iwmmxt_task_copy(struct thread_info *, void *); 105extern void iwmmxt_task_copy(struct thread_info *, void *);
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index f23fac1938f3..ca4ccfc4b578 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -111,10 +111,20 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
111 111
112#define switch_to(prev,next,last) \ 112#define switch_to(prev,next,last) \
113do { \ 113do { \
114 last = __switch_to(prev,prev->thread_info,next->thread_info); \ 114 last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \
115} while (0) 115} while (0)
116 116
117/* 117/*
118 * On SMP systems, when the scheduler does migration-cost autodetection,
119 * it needs a way to flush as much of the CPU's caches as possible.
120 *
121 * TODO: fill this in!
122 */
123static inline void sched_cacheflush(void)
124{
125}
126
127/*
118 * Save the current interrupt enable state & disable IRQs 128 * Save the current interrupt enable state & disable IRQs
119 */ 129 */
120#define local_irq_save(x) \ 130#define local_irq_save(x) \
diff --git a/include/asm-arm26/thread_info.h b/include/asm-arm26/thread_info.h
index aff3e5699c64..a65e58a0a767 100644
--- a/include/asm-arm26/thread_info.h
+++ b/include/asm-arm26/thread_info.h
@@ -82,18 +82,15 @@ static inline struct thread_info *current_thread_info(void)
82 82
83/* FIXME - PAGE_SIZE < 32K */ 83/* FIXME - PAGE_SIZE < 32K */
84#define THREAD_SIZE (8*32768) // FIXME - this needs attention (see kernel/fork.c which gets a nice div by zero if this is lower than 8*32768 84#define THREAD_SIZE (8*32768) // FIXME - this needs attention (see kernel/fork.c which gets a nice div by zero if this is lower than 8*32768
85#define __get_user_regs(x) (((struct pt_regs *)((unsigned long)(x) + THREAD_SIZE - 8)) - 1) 85#define task_pt_regs(task) ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE - 8) - 1)
86 86
87extern struct thread_info *alloc_thread_info(struct task_struct *task); 87extern struct thread_info *alloc_thread_info(struct task_struct *task);
88extern void free_thread_info(struct thread_info *); 88extern void free_thread_info(struct thread_info *);
89 89
90#define get_thread_info(ti) get_task_struct((ti)->task)
91#define put_thread_info(ti) put_task_struct((ti)->task)
92
93#define thread_saved_pc(tsk) \ 90#define thread_saved_pc(tsk) \
94 ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc))) 91 ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
95#define thread_saved_fp(tsk) \ 92#define thread_saved_fp(tsk) \
96 ((unsigned long)((tsk)->thread_info->cpu_context.fp)) 93 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
97 94
98#else /* !__ASSEMBLY__ */ 95#else /* !__ASSEMBLY__ */
99 96
diff --git a/include/asm-cris/arch-v10/processor.h b/include/asm-cris/arch-v10/processor.h
index e23df8dc96e8..cc692c7a0660 100644
--- a/include/asm-cris/arch-v10/processor.h
+++ b/include/asm-cris/arch-v10/processor.h
@@ -40,7 +40,7 @@ struct thread_struct {
40#define KSTK_EIP(tsk) \ 40#define KSTK_EIP(tsk) \
41({ \ 41({ \
42 unsigned long eip = 0; \ 42 unsigned long eip = 0; \
43 unsigned long regs = (unsigned long)user_regs(tsk); \ 43 unsigned long regs = (unsigned long)task_pt_regs(tsk); \
44 if (regs > PAGE_SIZE && \ 44 if (regs > PAGE_SIZE && \
45 virt_addr_valid(regs)) \ 45 virt_addr_valid(regs)) \
46 eip = ((struct pt_regs *)regs)->irp; \ 46 eip = ((struct pt_regs *)regs)->irp; \
diff --git a/include/asm-cris/arch-v32/processor.h b/include/asm-cris/arch-v32/processor.h
index 8c939bf27987..32bf2e538ced 100644
--- a/include/asm-cris/arch-v32/processor.h
+++ b/include/asm-cris/arch-v32/processor.h
@@ -36,7 +36,7 @@ struct thread_struct {
36#define KSTK_EIP(tsk) \ 36#define KSTK_EIP(tsk) \
37({ \ 37({ \
38 unsigned long eip = 0; \ 38 unsigned long eip = 0; \
39 unsigned long regs = (unsigned long)user_regs(tsk); \ 39 unsigned long regs = (unsigned long)task_pt_regs(tsk); \
40 if (regs > PAGE_SIZE && virt_addr_valid(regs)) \ 40 if (regs > PAGE_SIZE && virt_addr_valid(regs)) \
41 eip = ((struct pt_regs *)regs)->erp; \ 41 eip = ((struct pt_regs *)regs)->erp; \
42 eip; \ 42 eip; \
diff --git a/include/asm-cris/processor.h b/include/asm-cris/processor.h
index dce41009eeb0..961e2bceadbc 100644
--- a/include/asm-cris/processor.h
+++ b/include/asm-cris/processor.h
@@ -45,7 +45,8 @@ struct task_struct;
45 * Dito but for the currently running task 45 * Dito but for the currently running task
46 */ 46 */
47 47
48#define current_regs() user_regs(current->thread_info) 48#define task_pt_regs(task) user_regs(task_thread_info(task))
49#define current_regs() task_pt_regs(current)
49 50
50static inline void prepare_to_copy(struct task_struct *tsk) 51static inline void prepare_to_copy(struct task_struct *tsk)
51{ 52{
diff --git a/include/asm-cris/thread_info.h b/include/asm-cris/thread_info.h
index cef0140fc104..7ad853c3f74e 100644
--- a/include/asm-cris/thread_info.h
+++ b/include/asm-cris/thread_info.h
@@ -69,8 +69,6 @@ struct thread_info {
69/* thread information allocation */ 69/* thread information allocation */
70#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 70#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
72#define get_thread_info(ti) get_task_struct((ti)->task)
73#define put_thread_info(ti) put_task_struct((ti)->task)
74 72
75#endif /* !__ASSEMBLY__ */ 73#endif /* !__ASSEMBLY__ */
76 74
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index 60f6b2aee76d..a5576e02dd1d 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -110,8 +110,6 @@ register struct thread_info *__current_thread_info asm("gr15");
110#endif 110#endif
111 111
112#define free_thread_info(info) kfree(info) 112#define free_thread_info(info) kfree(info)
113#define get_thread_info(ti) get_task_struct((ti)->task)
114#define put_thread_info(ti) put_task_struct((ti)->task)
115 113
116#else /* !__ASSEMBLY__ */ 114#else /* !__ASSEMBLY__ */
117 115
diff --git a/include/asm-h8300/thread_info.h b/include/asm-h8300/thread_info.h
index bfcc755c3bb1..45f09dc9caff 100644
--- a/include/asm-h8300/thread_info.h
+++ b/include/asm-h8300/thread_info.h
@@ -69,8 +69,6 @@ static inline struct thread_info *current_thread_info(void)
69#define alloc_thread_info(tsk) ((struct thread_info *) \ 69#define alloc_thread_info(tsk) ((struct thread_info *) \
70 __get_free_pages(GFP_KERNEL, 1)) 70 __get_free_pages(GFP_KERNEL, 1))
71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
72#define get_thread_info(ti) get_task_struct((ti)->task)
73#define put_thread_info(ti) put_task_struct((ti)->task)
74#endif /* __ASSEMBLY__ */ 72#endif /* __ASSEMBLY__ */
75 73
76/* 74/*
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index 6747006743f9..152d0baa576a 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -49,19 +49,19 @@ static inline void __save_init_fpu( struct task_struct *tsk )
49 X86_FEATURE_FXSR, 49 X86_FEATURE_FXSR,
50 "m" (tsk->thread.i387.fxsave) 50 "m" (tsk->thread.i387.fxsave)
51 :"memory"); 51 :"memory");
52 tsk->thread_info->status &= ~TS_USEDFPU; 52 task_thread_info(tsk)->status &= ~TS_USEDFPU;
53} 53}
54 54
55#define __unlazy_fpu( tsk ) do { \ 55#define __unlazy_fpu( tsk ) do { \
56 if ((tsk)->thread_info->status & TS_USEDFPU) \ 56 if (task_thread_info(tsk)->status & TS_USEDFPU) \
57 save_init_fpu( tsk ); \ 57 save_init_fpu( tsk ); \
58} while (0) 58} while (0)
59 59
60#define __clear_fpu( tsk ) \ 60#define __clear_fpu( tsk ) \
61do { \ 61do { \
62 if ((tsk)->thread_info->status & TS_USEDFPU) { \ 62 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
63 asm volatile("fnclex ; fwait"); \ 63 asm volatile("fnclex ; fwait"); \
64 (tsk)->thread_info->status &= ~TS_USEDFPU; \ 64 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
65 stts(); \ 65 stts(); \
66 } \ 66 } \
67} while (0) 67} while (0)
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 13ecf66b098c..feca5d961e2b 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -561,10 +561,20 @@ unsigned long get_wchan(struct task_struct *p);
561 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ 561 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
562}) 562})
563 563
564/*
565 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
566 * This is necessary to guarantee that the entire "struct pt_regs"
567 * is accessable even if the CPU haven't stored the SS/ESP registers
568 * on the stack (interrupt gate does not save these registers
569 * when switching to the same priv ring).
570 * Therefore beware: accessing the xss/esp fields of the
571 * "struct pt_regs" is possible, but they may contain the
572 * completely wrong values.
573 */
564#define task_pt_regs(task) \ 574#define task_pt_regs(task) \
565({ \ 575({ \
566 struct pt_regs *__regs__; \ 576 struct pt_regs *__regs__; \
567 __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \ 577 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
568 __regs__ - 1; \ 578 __regs__ - 1; \
569}) 579})
570 580
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 9c0593b7a94e..36a92ed6a9d0 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -548,6 +548,15 @@ void enable_hlt(void);
548extern int es7000_plat; 548extern int es7000_plat;
549void cpu_idle_wait(void); 549void cpu_idle_wait(void);
550 550
551/*
552 * On SMP systems, when the scheduler does migration-cost autodetection,
553 * it needs a way to flush as much of the CPU's caches as possible:
554 */
555static inline void sched_cacheflush(void)
556{
557 wbinvd();
558}
559
551extern unsigned long arch_align_stack(unsigned long sp); 560extern unsigned long arch_align_stack(unsigned long sp);
552 561
553#endif 562#endif
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8fbf791651bf..2493e77e8c30 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -111,8 +111,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
111#endif 111#endif
112 112
113#define free_thread_info(info) kfree(info) 113#define free_thread_info(info) kfree(info)
114#define get_thread_info(ti) get_task_struct((ti)->task)
115#define put_thread_info(ti) put_task_struct((ti)->task)
116 114
117#else /* !__ASSEMBLY__ */ 115#else /* !__ASSEMBLY__ */
118 116
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 0ec27c9e8e45..d7e19eb344b7 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -72,7 +72,6 @@ static inline int node_to_first_cpu(int node)
72 .max_interval = 32, \ 72 .max_interval = 32, \
73 .busy_factor = 32, \ 73 .busy_factor = 32, \
74 .imbalance_pct = 125, \ 74 .imbalance_pct = 125, \
75 .cache_hot_time = (10*1000000), \
76 .cache_nice_tries = 1, \ 75 .cache_nice_tries = 1, \
77 .busy_idx = 3, \ 76 .busy_idx = 3, \
78 .idle_idx = 1, \ 77 .idle_idx = 1, \
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h
index aaf11f4e9169..c0b19106665c 100644
--- a/include/asm-ia64/compat.h
+++ b/include/asm-ia64/compat.h
@@ -192,7 +192,7 @@ compat_ptr (compat_uptr_t uptr)
192static __inline__ void __user * 192static __inline__ void __user *
193compat_alloc_user_space (long len) 193compat_alloc_user_space (long len)
194{ 194{
195 struct pt_regs *regs = ia64_task_regs(current); 195 struct pt_regs *regs = task_pt_regs(current);
196 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); 196 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
197} 197}
198 198
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 94e07e727395..8c648bf72bbd 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -352,7 +352,7 @@ extern unsigned long get_wchan (struct task_struct *p);
352/* Return instruction pointer of blocked task TSK. */ 352/* Return instruction pointer of blocked task TSK. */
353#define KSTK_EIP(tsk) \ 353#define KSTK_EIP(tsk) \
354 ({ \ 354 ({ \
355 struct pt_regs *_regs = ia64_task_regs(tsk); \ 355 struct pt_regs *_regs = task_pt_regs(tsk); \
356 _regs->cr_iip + ia64_psr(_regs)->ri; \ 356 _regs->cr_iip + ia64_psr(_regs)->ri; \
357 }) 357 })
358 358
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
index 2c703d6e0c86..9471cdc3f4c0 100644
--- a/include/asm-ia64/ptrace.h
+++ b/include/asm-ia64/ptrace.h
@@ -248,7 +248,7 @@ struct switch_stack {
248}) 248})
249 249
250 /* given a pointer to a task_struct, return the user's pt_regs */ 250 /* given a pointer to a task_struct, return the user's pt_regs */
251# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) 251# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
252# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) 252# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
253# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0) 253# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
254# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs)) 254# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
@@ -271,7 +271,7 @@ struct switch_stack {
271 * 271 *
272 * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall. 272 * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
273 */ 273 */
274# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0) 274# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
275 275
276 struct task_struct; /* forward decl */ 276 struct task_struct; /* forward decl */
277 struct unw_frame_info; /* forward decl */ 277 struct unw_frame_info; /* forward decl */
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 635235fa1e32..80c5a234e259 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -219,14 +219,14 @@ extern void ia64_load_extra (struct task_struct *task);
219 219
220#define IA64_HAS_EXTRA_STATE(t) \ 220#define IA64_HAS_EXTRA_STATE(t) \
221 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ 221 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
222 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) 222 || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
223 223
224#define __switch_to(prev,next,last) do { \ 224#define __switch_to(prev,next,last) do { \
225 if (IA64_HAS_EXTRA_STATE(prev)) \ 225 if (IA64_HAS_EXTRA_STATE(prev)) \
226 ia64_save_extra(prev); \ 226 ia64_save_extra(prev); \
227 if (IA64_HAS_EXTRA_STATE(next)) \ 227 if (IA64_HAS_EXTRA_STATE(next)) \
228 ia64_load_extra(next); \ 228 ia64_load_extra(next); \
229 ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 229 ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
230 (last) = ia64_switch_to((next)); \ 230 (last) = ia64_switch_to((next)); \
231} while (0) 231} while (0)
232 232
@@ -238,8 +238,8 @@ extern void ia64_load_extra (struct task_struct *task);
238 * the latest fph state from another CPU. In other words: eager save, lazy restore. 238 * the latest fph state from another CPU. In other words: eager save, lazy restore.
239 */ 239 */
240# define switch_to(prev,next,last) do { \ 240# define switch_to(prev,next,last) do { \
241 if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ 241 if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
242 ia64_psr(ia64_task_regs(prev))->mfh = 0; \ 242 ia64_psr(task_pt_regs(prev))->mfh = 0; \
243 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ 243 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
244 __ia64_save_fpu((prev)->thread.fph); \ 244 __ia64_save_fpu((prev)->thread.fph); \
245 } \ 245 } \
@@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task_struct *task);
279#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) 279#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
280 280
281void cpu_idle_wait(void); 281void cpu_idle_wait(void);
282void sched_cacheflush(void);
282 283
283#define arch_align_stack(x) (x) 284#define arch_align_stack(x) (x)
284 285
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 171b2207bde4..653bb7f9a753 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -57,11 +57,20 @@ struct thread_info {
57/* how to get the thread information struct from C */ 57/* how to get the thread information struct from C */
58#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) 58#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
59#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 59#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
60#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
60#else 61#else
61#define current_thread_info() ((struct thread_info *) 0) 62#define current_thread_info() ((struct thread_info *) 0)
62#define alloc_thread_info(tsk) ((struct thread_info *) 0) 63#define alloc_thread_info(tsk) ((struct thread_info *) 0)
64#define task_thread_info(tsk) ((struct thread_info *) 0)
63#endif 65#endif
64#define free_thread_info(ti) /* nothing */ 66#define free_thread_info(ti) /* nothing */
67#define task_stack_page(tsk) ((void *)(tsk))
68
69#define __HAVE_THREAD_FUNCTIONS
70#define setup_thread_stack(p, org) \
71 *task_thread_info(p) = *task_thread_info(org); \
72 task_thread_info(p)->task = (p);
73#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
65 74
66#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 75#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
67#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER)) 76#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER))
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index f7c330467e7e..d8aae4da3978 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -55,7 +55,6 @@ void build_cpu_to_node_map(void);
55 .max_interval = 4, \ 55 .max_interval = 4, \
56 .busy_factor = 64, \ 56 .busy_factor = 64, \
57 .imbalance_pct = 125, \ 57 .imbalance_pct = 125, \
58 .cache_hot_time = (10*1000000), \
59 .per_cpu_gain = 100, \ 58 .per_cpu_gain = 100, \
60 .cache_nice_tries = 2, \ 59 .cache_nice_tries = 2, \
61 .busy_idx = 2, \ 60 .busy_idx = 2, \
@@ -81,7 +80,6 @@ void build_cpu_to_node_map(void);
81 .max_interval = 8*(min(num_online_cpus(), 32)), \ 80 .max_interval = 8*(min(num_online_cpus(), 32)), \
82 .busy_factor = 64, \ 81 .busy_factor = 64, \
83 .imbalance_pct = 125, \ 82 .imbalance_pct = 125, \
84 .cache_hot_time = (10*1000000), \
85 .cache_nice_tries = 2, \ 83 .cache_nice_tries = 2, \
86 .busy_idx = 3, \ 84 .busy_idx = 3, \
87 .idle_idx = 2, \ 85 .idle_idx = 2, \
diff --git a/include/asm-m32r/ptrace.h b/include/asm-m32r/ptrace.h
index 55cd7ecfde43..0d058b2d844e 100644
--- a/include/asm-m32r/ptrace.h
+++ b/include/asm-m32r/ptrace.h
@@ -163,6 +163,9 @@ extern void show_regs(struct pt_regs *);
163 163
164extern void withdraw_debug_trap(struct pt_regs *regs); 164extern void withdraw_debug_trap(struct pt_regs *regs);
165 165
166#define task_pt_regs(task) \
167 ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
168
166#endif /* __KERNEL */ 169#endif /* __KERNEL */
167 170
168#endif /* _ASM_M32R_PTRACE_H */ 171#endif /* _ASM_M32R_PTRACE_H */
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index dcf619a0a0b0..06c12a037cba 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -68,6 +68,16 @@
68 last = __last; \ 68 last = __last; \
69} while(0) 69} while(0)
70 70
71/*
72 * On SMP systems, when the scheduler does migration-cost autodetection,
73 * it needs a way to flush as much of the CPU's caches as possible.
74 *
75 * TODO: fill this in!
76 */
77static inline void sched_cacheflush(void)
78{
79}
80
71/* Interrupt Control */ 81/* Interrupt Control */
72#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 82#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
73#define local_irq_enable() \ 83#define local_irq_enable() \
diff --git a/include/asm-m32r/thread_info.h b/include/asm-m32r/thread_info.h
index 0f589363f619..22aff3222d22 100644
--- a/include/asm-m32r/thread_info.h
+++ b/include/asm-m32r/thread_info.h
@@ -110,8 +110,6 @@ static inline struct thread_info *current_thread_info(void)
110#endif 110#endif
111 111
112#define free_thread_info(info) kfree(info) 112#define free_thread_info(info) kfree(info)
113#define get_thread_info(ti) get_task_struct((ti)->task)
114#define put_thread_info(ti) put_task_struct((ti)->task)
115 113
116#define TI_FLAG_FAULT_CODE_SHIFT 28 114#define TI_FLAG_FAULT_CODE_SHIFT 28
117 115
diff --git a/include/asm-m68k/amigahw.h b/include/asm-m68k/amigahw.h
index 3ae5d8d55ba9..a16fe4e5a28a 100644
--- a/include/asm-m68k/amigahw.h
+++ b/include/asm-m68k/amigahw.h
@@ -274,7 +274,7 @@ struct CIA {
274#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase) 274#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase)
275 275
276#define CUSTOM_PHYSADDR (0xdff000) 276#define CUSTOM_PHYSADDR (0xdff000)
277#define custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR))) 277#define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
278 278
279#define CIAA_PHYSADDR (0xbfe001) 279#define CIAA_PHYSADDR (0xbfe001)
280#define CIAB_PHYSADDR (0xbfd000) 280#define CIAB_PHYSADDR (0xbfd000)
@@ -294,12 +294,12 @@ static inline void amifb_video_off(void)
294{ 294{
295 if (amiga_chipset == CS_ECS || amiga_chipset == CS_AGA) { 295 if (amiga_chipset == CS_ECS || amiga_chipset == CS_AGA) {
296 /* program Denise/Lisa for a higher maximum play rate */ 296 /* program Denise/Lisa for a higher maximum play rate */
297 custom.htotal = 113; /* 31 kHz */ 297 amiga_custom.htotal = 113; /* 31 kHz */
298 custom.vtotal = 223; /* 70 Hz */ 298 amiga_custom.vtotal = 223; /* 70 Hz */
299 custom.beamcon0 = 0x4390; /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */ 299 amiga_custom.beamcon0 = 0x4390; /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */
300 /* suspend the monitor */ 300 /* suspend the monitor */
301 custom.hsstrt = custom.hsstop = 116; 301 amiga_custom.hsstrt = amiga_custom.hsstop = 116;
302 custom.vsstrt = custom.vsstop = 226; 302 amiga_custom.vsstrt = amiga_custom.vsstop = 226;
303 amiga_audio_min_period = 57; 303 amiga_audio_min_period = 57;
304 } 304 }
305} 305}
diff --git a/include/asm-m68k/amigaints.h b/include/asm-m68k/amigaints.h
index 2aff4cfbf7b3..aa968d014bb6 100644
--- a/include/asm-m68k/amigaints.h
+++ b/include/asm-m68k/amigaints.h
@@ -109,8 +109,6 @@
109extern void amiga_do_irq(int irq, struct pt_regs *fp); 109extern void amiga_do_irq(int irq, struct pt_regs *fp);
110extern void amiga_do_irq_list(int irq, struct pt_regs *fp); 110extern void amiga_do_irq_list(int irq, struct pt_regs *fp);
111 111
112extern unsigned short amiga_intena_vals[];
113
114/* CIA interrupt control register bits */ 112/* CIA interrupt control register bits */
115 113
116#define CIA_ICR_TA 0x01 114#define CIA_ICR_TA 0x01
diff --git a/include/asm-m68k/checksum.h b/include/asm-m68k/checksum.h
index 78860c20db01..17280ef719f5 100644
--- a/include/asm-m68k/checksum.h
+++ b/include/asm-m68k/checksum.h
@@ -25,7 +25,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
25 * better 64-bit) boundary 25 * better 64-bit) boundary
26 */ 26 */
27 27
28extern unsigned int csum_partial_copy_from_user(const unsigned char *src, 28extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
29 unsigned char *dst, 29 unsigned char *dst,
30 int len, int sum, 30 int len, int sum,
31 int *csum_err); 31 int *csum_err);
diff --git a/include/asm-m68k/dsp56k.h b/include/asm-m68k/dsp56k.h
index ab3dd33e23a1..2d8c0c9f794b 100644
--- a/include/asm-m68k/dsp56k.h
+++ b/include/asm-m68k/dsp56k.h
@@ -13,7 +13,7 @@
13/* Used for uploading DSP binary code */ 13/* Used for uploading DSP binary code */
14struct dsp56k_upload { 14struct dsp56k_upload {
15 int len; 15 int len;
16 char *bin; 16 char __user *bin;
17}; 17};
18 18
19/* For the DSP host flags */ 19/* For the DSP host flags */
diff --git a/include/asm-m68k/floppy.h b/include/asm-m68k/floppy.h
index c6e708dd9f62..63a05ed95c17 100644
--- a/include/asm-m68k/floppy.h
+++ b/include/asm-m68k/floppy.h
@@ -46,7 +46,7 @@ asmlinkage irqreturn_t floppy_hardint(int irq, void *dev_id,
46 46
47static int virtual_dma_count=0; 47static int virtual_dma_count=0;
48static int virtual_dma_residue=0; 48static int virtual_dma_residue=0;
49static char *virtual_dma_addr=0; 49static char *virtual_dma_addr=NULL;
50static int virtual_dma_mode=0; 50static int virtual_dma_mode=0;
51static int doing_pdma=0; 51static int doing_pdma=0;
52 52
diff --git a/include/asm-m68k/hardirq.h b/include/asm-m68k/hardirq.h
index 728318bf7f0e..5e1c5826c83d 100644
--- a/include/asm-m68k/hardirq.h
+++ b/include/asm-m68k/hardirq.h
@@ -14,13 +14,4 @@ typedef struct {
14 14
15#define HARDIRQ_BITS 8 15#define HARDIRQ_BITS 8
16 16
17/*
18 * The hardirq mask has to be large enough to have
19 * space for potentially all IRQ sources in the system
20 * nesting on a single CPU:
21 */
22#if (1 << HARDIRQ_BITS) < NR_IRQS
23# error HARDIRQ_BITS is too low!
24#endif
25
26#endif 17#endif
diff --git a/include/asm-m68k/io.h b/include/asm-m68k/io.h
index 6bb8b0d8f99d..dcfaa352d34c 100644
--- a/include/asm-m68k/io.h
+++ b/include/asm-m68k/io.h
@@ -24,6 +24,7 @@
24#ifdef __KERNEL__ 24#ifdef __KERNEL__
25 25
26#include <linux/config.h> 26#include <linux/config.h>
27#include <linux/compiler.h>
27#include <asm/raw_io.h> 28#include <asm/raw_io.h>
28#include <asm/virtconvert.h> 29#include <asm/virtconvert.h>
29 30
@@ -120,68 +121,68 @@ extern int isa_sex;
120 * be compiled in so the case statement will be optimised away 121 * be compiled in so the case statement will be optimised away
121 */ 122 */
122 123
123static inline u8 *isa_itb(unsigned long addr) 124static inline u8 __iomem *isa_itb(unsigned long addr)
124{ 125{
125 switch(ISA_TYPE) 126 switch(ISA_TYPE)
126 { 127 {
127#ifdef CONFIG_Q40 128#ifdef CONFIG_Q40
128 case Q40_ISA: return (u8 *)Q40_ISA_IO_B(addr); 129 case Q40_ISA: return (u8 __iomem *)Q40_ISA_IO_B(addr);
129#endif 130#endif
130#ifdef CONFIG_GG2 131#ifdef CONFIG_GG2
131 case GG2_ISA: return (u8 *)GG2_ISA_IO_B(addr); 132 case GG2_ISA: return (u8 __iomem *)GG2_ISA_IO_B(addr);
132#endif 133#endif
133#ifdef CONFIG_AMIGA_PCMCIA 134#ifdef CONFIG_AMIGA_PCMCIA
134 case AG_ISA: return (u8 *)AG_ISA_IO_B(addr); 135 case AG_ISA: return (u8 __iomem *)AG_ISA_IO_B(addr);
135#endif 136#endif
136 default: return 0; /* avoid warnings, just in case */ 137 default: return NULL; /* avoid warnings, just in case */
137 } 138 }
138} 139}
139static inline u16 *isa_itw(unsigned long addr) 140static inline u16 __iomem *isa_itw(unsigned long addr)
140{ 141{
141 switch(ISA_TYPE) 142 switch(ISA_TYPE)
142 { 143 {
143#ifdef CONFIG_Q40 144#ifdef CONFIG_Q40
144 case Q40_ISA: return (u16 *)Q40_ISA_IO_W(addr); 145 case Q40_ISA: return (u16 __iomem *)Q40_ISA_IO_W(addr);
145#endif 146#endif
146#ifdef CONFIG_GG2 147#ifdef CONFIG_GG2
147 case GG2_ISA: return (u16 *)GG2_ISA_IO_W(addr); 148 case GG2_ISA: return (u16 __iomem *)GG2_ISA_IO_W(addr);
148#endif 149#endif
149#ifdef CONFIG_AMIGA_PCMCIA 150#ifdef CONFIG_AMIGA_PCMCIA
150 case AG_ISA: return (u16 *)AG_ISA_IO_W(addr); 151 case AG_ISA: return (u16 __iomem *)AG_ISA_IO_W(addr);
151#endif 152#endif
152 default: return 0; /* avoid warnings, just in case */ 153 default: return NULL; /* avoid warnings, just in case */
153 } 154 }
154} 155}
155static inline u8 *isa_mtb(unsigned long addr) 156static inline u8 __iomem *isa_mtb(unsigned long addr)
156{ 157{
157 switch(ISA_TYPE) 158 switch(ISA_TYPE)
158 { 159 {
159#ifdef CONFIG_Q40 160#ifdef CONFIG_Q40
160 case Q40_ISA: return (u8 *)Q40_ISA_MEM_B(addr); 161 case Q40_ISA: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
161#endif 162#endif
162#ifdef CONFIG_GG2 163#ifdef CONFIG_GG2
163 case GG2_ISA: return (u8 *)GG2_ISA_MEM_B(addr); 164 case GG2_ISA: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
164#endif 165#endif
165#ifdef CONFIG_AMIGA_PCMCIA 166#ifdef CONFIG_AMIGA_PCMCIA
166 case AG_ISA: return (u8 *)addr; 167 case AG_ISA: return (u8 __iomem *)addr;
167#endif 168#endif
168 default: return 0; /* avoid warnings, just in case */ 169 default: return NULL; /* avoid warnings, just in case */
169 } 170 }
170} 171}
171static inline u16 *isa_mtw(unsigned long addr) 172static inline u16 __iomem *isa_mtw(unsigned long addr)
172{ 173{
173 switch(ISA_TYPE) 174 switch(ISA_TYPE)
174 { 175 {
175#ifdef CONFIG_Q40 176#ifdef CONFIG_Q40
176 case Q40_ISA: return (u16 *)Q40_ISA_MEM_W(addr); 177 case Q40_ISA: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
177#endif 178#endif
178#ifdef CONFIG_GG2 179#ifdef CONFIG_GG2
179 case GG2_ISA: return (u16 *)GG2_ISA_MEM_W(addr); 180 case GG2_ISA: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
180#endif 181#endif
181#ifdef CONFIG_AMIGA_PCMCIA 182#ifdef CONFIG_AMIGA_PCMCIA
182 case AG_ISA: return (u16 *)addr; 183 case AG_ISA: return (u16 __iomem *)addr;
183#endif 184#endif
184 default: return 0; /* avoid warnings, just in case */ 185 default: return NULL; /* avoid warnings, just in case */
185 } 186 }
186} 187}
187 188
@@ -326,20 +327,20 @@ static inline void isa_delay(void)
326 327
327#define mmiowb() 328#define mmiowb()
328 329
329static inline void *ioremap(unsigned long physaddr, unsigned long size) 330static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
330{ 331{
331 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 332 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
332} 333}
333static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size) 334static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size)
334{ 335{
335 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 336 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
336} 337}
337static inline void *ioremap_writethrough(unsigned long physaddr, 338static inline void __iomem *ioremap_writethrough(unsigned long physaddr,
338 unsigned long size) 339 unsigned long size)
339{ 340{
340 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); 341 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
341} 342}
342static inline void *ioremap_fullcache(unsigned long physaddr, 343static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
343 unsigned long size) 344 unsigned long size)
344{ 345{
345 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 346 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index 127ad190cf2d..325c86f8512d 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -23,6 +23,15 @@
23#endif 23#endif
24 24
25/* 25/*
26 * The hardirq mask has to be large enough to have
27 * space for potentially all IRQ sources in the system
28 * nesting on a single CPU:
29 */
30#if (1 << HARDIRQ_BITS) < NR_IRQS
31# error HARDIRQ_BITS is too low!
32#endif
33
34/*
26 * Interrupt source definitions 35 * Interrupt source definitions
27 * General interrupt sources are the level 1-7. 36 * General interrupt sources are the level 1-7.
28 * Adding an interrupt service routine for one of these sources 37 * Adding an interrupt service routine for one of these sources
diff --git a/include/asm-m68k/machdep.h b/include/asm-m68k/machdep.h
index a0dd5c47002c..7d3fee342369 100644
--- a/include/asm-m68k/machdep.h
+++ b/include/asm-m68k/machdep.h
@@ -34,7 +34,6 @@ extern void (*mach_power_off)( void );
34extern unsigned long (*mach_hd_init) (unsigned long, unsigned long); 34extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
35extern void (*mach_hd_setup)(char *, int *); 35extern void (*mach_hd_setup)(char *, int *);
36extern long mach_max_dma_address; 36extern long mach_max_dma_address;
37extern void (*mach_floppy_setup)(char *, int *);
38extern void (*mach_heartbeat) (int); 37extern void (*mach_heartbeat) (int);
39extern void (*mach_l2_flush) (int); 38extern void (*mach_l2_flush) (int);
40extern void (*mach_beep) (unsigned int, unsigned int); 39extern void (*mach_beep) (unsigned int, unsigned int);
diff --git a/include/asm-m68k/raw_io.h b/include/asm-m68k/raw_io.h
index 041f0a87b25d..5439bcaa57c6 100644
--- a/include/asm-m68k/raw_io.h
+++ b/include/asm-m68k/raw_io.h
@@ -19,9 +19,9 @@
19#define IOMAP_NOCACHE_NONSER 2 19#define IOMAP_NOCACHE_NONSER 2
20#define IOMAP_WRITETHROUGH 3 20#define IOMAP_WRITETHROUGH 3
21 21
22extern void iounmap(void *addr); 22extern void iounmap(void __iomem *addr);
23 23
24extern void *__ioremap(unsigned long physaddr, unsigned long size, 24extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
25 int cacheflag); 25 int cacheflag);
26extern void __iounmap(void *addr, unsigned long size); 26extern void __iounmap(void *addr, unsigned long size);
27 27
@@ -30,21 +30,21 @@ extern void __iounmap(void *addr, unsigned long size);
30 * two accesses to memory, which may be undesirable for some devices. 30 * two accesses to memory, which may be undesirable for some devices.
31 */ 31 */
32#define in_8(addr) \ 32#define in_8(addr) \
33 ({ u8 __v = (*(volatile u8 *) (addr)); __v; }) 33 ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
34#define in_be16(addr) \ 34#define in_be16(addr) \
35 ({ u16 __v = (*(volatile u16 *) (addr)); __v; }) 35 ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
36#define in_be32(addr) \ 36#define in_be32(addr) \
37 ({ u32 __v = (*(volatile u32 *) (addr)); __v; }) 37 ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
38#define in_le16(addr) \ 38#define in_le16(addr) \
39 ({ u16 __v = le16_to_cpu(*(volatile u16 *) (addr)); __v; }) 39 ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
40#define in_le32(addr) \ 40#define in_le32(addr) \
41 ({ u32 __v = le32_to_cpu(*(volatile u32 *) (addr)); __v; }) 41 ({ u32 __v = le32_to_cpu(*(__force volatile u32 *) (addr)); __v; })
42 42
43#define out_8(addr,b) (void)((*(volatile u8 *) (addr)) = (b)) 43#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
44#define out_be16(addr,w) (void)((*(volatile u16 *) (addr)) = (w)) 44#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
45#define out_be32(addr,l) (void)((*(volatile u32 *) (addr)) = (l)) 45#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
46#define out_le16(addr,w) (void)((*(volatile u16 *) (addr)) = cpu_to_le16(w)) 46#define out_le16(addr,w) (void)((*(__force volatile u16 *) (addr)) = cpu_to_le16(w))
47#define out_le32(addr,l) (void)((*(volatile u32 *) (addr)) = cpu_to_le32(l)) 47#define out_le32(addr,l) (void)((*(__force volatile u32 *) (addr)) = cpu_to_le32(l))
48 48
49#define raw_inb in_8 49#define raw_inb in_8
50#define raw_inw in_be16 50#define raw_inw in_be16
@@ -54,7 +54,7 @@ extern void __iounmap(void *addr, unsigned long size);
54#define raw_outw(val,port) out_be16((port),(val)) 54#define raw_outw(val,port) out_be16((port),(val))
55#define raw_outl(val,port) out_be32((port),(val)) 55#define raw_outl(val,port) out_be32((port),(val))
56 56
57static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len) 57static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
58{ 58{
59 unsigned int i; 59 unsigned int i;
60 60
@@ -62,7 +62,7 @@ static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len)
62 *buf++ = in_8(port); 62 *buf++ = in_8(port);
63} 63}
64 64
65static inline void raw_outsb(volatile u8 *port, const u8 *buf, 65static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
66 unsigned int len) 66 unsigned int len)
67{ 67{
68 unsigned int i; 68 unsigned int i;
@@ -71,7 +71,7 @@ static inline void raw_outsb(volatile u8 *port, const u8 *buf,
71 out_8(port, *buf++); 71 out_8(port, *buf++);
72} 72}
73 73
74static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr) 74static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
75{ 75{
76 unsigned int tmp; 76 unsigned int tmp;
77 77
@@ -110,7 +110,7 @@ static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
110 } 110 }
111} 111}
112 112
113static inline void raw_outsw(volatile u16 *port, const u16 *buf, 113static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
114 unsigned int nr) 114 unsigned int nr)
115{ 115{
116 unsigned int tmp; 116 unsigned int tmp;
@@ -150,7 +150,7 @@ static inline void raw_outsw(volatile u16 *port, const u16 *buf,
150 } 150 }
151} 151}
152 152
153static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr) 153static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
154{ 154{
155 unsigned int tmp; 155 unsigned int tmp;
156 156
@@ -189,7 +189,7 @@ static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
189 } 189 }
190} 190}
191 191
192static inline void raw_outsl(volatile u32 *port, const u32 *buf, 192static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
193 unsigned int nr) 193 unsigned int nr)
194{ 194{
195 unsigned int tmp; 195 unsigned int tmp;
@@ -230,7 +230,7 @@ static inline void raw_outsl(volatile u32 *port, const u32 *buf,
230} 230}
231 231
232 232
233static inline void raw_insw_swapw(volatile u16 *port, u16 *buf, 233static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
234 unsigned int nr) 234 unsigned int nr)
235{ 235{
236 if ((nr) % 8) 236 if ((nr) % 8)
@@ -283,7 +283,7 @@ static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
283 : "d0", "a0", "a1", "d6"); 283 : "d0", "a0", "a1", "d6");
284} 284}
285 285
286static inline void raw_outsw_swapw(volatile u16 *port, const u16 *buf, 286static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
287 unsigned int nr) 287 unsigned int nr)
288{ 288{
289 if ((nr) % 8) 289 if ((nr) % 8)
diff --git a/include/asm-m68k/signal.h b/include/asm-m68k/signal.h
index a0cdf9082372..b7b7ea20caab 100644
--- a/include/asm-m68k/signal.h
+++ b/include/asm-m68k/signal.h
@@ -144,7 +144,7 @@ struct sigaction {
144#endif /* __KERNEL__ */ 144#endif /* __KERNEL__ */
145 145
146typedef struct sigaltstack { 146typedef struct sigaltstack {
147 void *ss_sp; 147 void __user *ss_sp;
148 int ss_flags; 148 int ss_flags;
149 size_t ss_size; 149 size_t ss_size;
150} stack_t; 150} stack_t;
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h
index e974bb072047..5156a28a18d8 100644
--- a/include/asm-m68k/sun3_pgtable.h
+++ b/include/asm-m68k/sun3_pgtable.h
@@ -211,7 +211,7 @@ static inline unsigned long pte_to_pgoff(pte_t pte)
211 return pte.pte & SUN3_PAGE_PGNUM_MASK; 211 return pte.pte & SUN3_PAGE_PGNUM_MASK;
212} 212}
213 213
214static inline pte_t pgoff_to_pte(inline unsigned off) 214static inline pte_t pgoff_to_pte(unsigned off)
215{ 215{
216 pte_t pte = { off + SUN3_PAGE_ACCESSED }; 216 pte_t pte = { off + SUN3_PAGE_ACCESSED };
217 return pte; 217 return pte;
diff --git a/include/asm-m68k/sun3ints.h b/include/asm-m68k/sun3ints.h
index fd838eb14213..bd038fccb64b 100644
--- a/include/asm-m68k/sun3ints.h
+++ b/include/asm-m68k/sun3ints.h
@@ -31,7 +31,6 @@ int sun3_request_irq(unsigned int irq,
31 ); 31 );
32extern void sun3_init_IRQ (void); 32extern void sun3_init_IRQ (void);
33extern irqreturn_t (*sun3_default_handler[]) (int, void *, struct pt_regs *); 33extern irqreturn_t (*sun3_default_handler[]) (int, void *, struct pt_regs *);
34extern irqreturn_t (*sun3_inthandler[]) (int, void *, struct pt_regs *);
35extern void sun3_free_irq (unsigned int irq, void *dev_id); 34extern void sun3_free_irq (unsigned int irq, void *dev_id);
36extern void sun3_enable_interrupts (void); 35extern void sun3_enable_interrupts (void);
37extern void sun3_disable_interrupts (void); 36extern void sun3_disable_interrupts (void);
diff --git a/include/asm-m68k/sun3xflop.h b/include/asm-m68k/sun3xflop.h
index fda1eccf10aa..98a9f79dab29 100644
--- a/include/asm-m68k/sun3xflop.h
+++ b/include/asm-m68k/sun3xflop.h
@@ -208,7 +208,7 @@ static int sun3xflop_request_irq(void)
208 208
209 if(!once) { 209 if(!once) {
210 once = 1; 210 once = 1;
211 error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", 0); 211 error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", NULL);
212 return ((error == 0) ? 0 : -1); 212 return ((error == 0) ? 0 : -1);
213 } else return 0; 213 } else return 0;
214} 214}
@@ -238,7 +238,7 @@ static int sun3xflop_init(void)
238 *sun3x_fdc.fcr_r = 0; 238 *sun3x_fdc.fcr_r = 0;
239 239
240 /* Success... */ 240 /* Success... */
241 floppy_set_flags(0, 1, FD_BROKEN_DCL); // I don't know how to detect this. 241 floppy_set_flags(NULL, 1, FD_BROKEN_DCL); // I don't know how to detect this.
242 allowed_drive_mask = 0x01; 242 allowed_drive_mask = 0x01;
243 return (int) SUN3X_FDC; 243 return (int) SUN3X_FDC;
244} 244}
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index 9532ca3c45cb..c4d622a57dfb 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -37,6 +37,7 @@ struct thread_info {
37#define init_stack (init_thread_union.stack) 37#define init_stack (init_thread_union.stack)
38 38
39#define task_thread_info(tsk) (&(tsk)->thread.info) 39#define task_thread_info(tsk) (&(tsk)->thread.info)
40#define task_stack_page(tsk) ((void *)(tsk)->thread_info)
40#define current_thread_info() task_thread_info(current) 41#define current_thread_info() task_thread_info(current)
41 42
42#define __HAVE_THREAD_FUNCTIONS 43#define __HAVE_THREAD_FUNCTIONS
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index f5cedf19cf68..2ffd87b0a769 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -42,6 +42,7 @@ struct exception_table_entry
42({ \ 42({ \
43 int __pu_err; \ 43 int __pu_err; \
44 typeof(*(ptr)) __pu_val = (x); \ 44 typeof(*(ptr)) __pu_val = (x); \
45 __chk_user_ptr(ptr); \
45 switch (sizeof (*(ptr))) { \ 46 switch (sizeof (*(ptr))) { \
46 case 1: \ 47 case 1: \
47 __put_user_asm(__pu_err, __pu_val, ptr, b); \ 48 __put_user_asm(__pu_err, __pu_val, ptr, b); \
@@ -91,6 +92,7 @@ __asm__ __volatile__ \
91({ \ 92({ \
92 int __gu_err; \ 93 int __gu_err; \
93 typeof(*(ptr)) __gu_val; \ 94 typeof(*(ptr)) __gu_val; \
95 __chk_user_ptr(ptr); \
94 switch (sizeof(*(ptr))) { \ 96 switch (sizeof(*(ptr))) { \
95 case 1: \ 97 case 1: \
96 __get_user_asm(__gu_err, __gu_val, ptr, b, "=d"); \ 98 __get_user_asm(__gu_err, __gu_val, ptr, b, "=d"); \
@@ -105,7 +107,7 @@ __asm__ __volatile__ \
105 __gu_err = __constant_copy_from_user(&__gu_val, ptr, 8); \ 107 __gu_err = __constant_copy_from_user(&__gu_val, ptr, 8); \
106 break; \ 108 break; \
107 default: \ 109 default: \
108 __gu_val = 0; \ 110 __gu_val = (typeof(*(ptr)))0; \
109 __gu_err = __get_user_bad(); \ 111 __gu_err = __get_user_bad(); \
110 break; \ 112 break; \
111 } \ 113 } \
@@ -134,7 +136,7 @@ __asm__ __volatile__ \
134 : "m"(*(ptr)), "i" (-EFAULT), "0"(0)) 136 : "m"(*(ptr)), "i" (-EFAULT), "0"(0))
135 137
136static inline unsigned long 138static inline unsigned long
137__generic_copy_from_user(void *to, const void *from, unsigned long n) 139__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
138{ 140{
139 unsigned long tmp; 141 unsigned long tmp;
140 __asm__ __volatile__ 142 __asm__ __volatile__
@@ -189,7 +191,7 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
189} 191}
190 192
191static inline unsigned long 193static inline unsigned long
192__generic_copy_to_user(void *to, const void *from, unsigned long n) 194__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
193{ 195{
194 unsigned long tmp; 196 unsigned long tmp;
195 __asm__ __volatile__ 197 __asm__ __volatile__
@@ -264,7 +266,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
264 : "d0", "memory") 266 : "d0", "memory")
265 267
266static inline unsigned long 268static inline unsigned long
267__constant_copy_from_user(void *to, const void *from, unsigned long n) 269__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
268{ 270{
269 switch (n) { 271 switch (n) {
270 case 0: 272 case 0:
@@ -520,7 +522,7 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
520#define __copy_from_user_inatomic __copy_from_user 522#define __copy_from_user_inatomic __copy_from_user
521 523
522static inline unsigned long 524static inline unsigned long
523__constant_copy_to_user(void *to, const void *from, unsigned long n) 525__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
524{ 526{
525 switch (n) { 527 switch (n) {
526 case 0: 528 case 0:
@@ -766,7 +768,7 @@ __constant_copy_to_user(void *to, const void *from, unsigned long n)
766 */ 768 */
767 769
768static inline long 770static inline long
769strncpy_from_user(char *dst, const char *src, long count) 771strncpy_from_user(char *dst, const char __user *src, long count)
770{ 772{
771 long res; 773 long res;
772 if (count == 0) return count; 774 if (count == 0) return count;
@@ -799,11 +801,11 @@ strncpy_from_user(char *dst, const char *src, long count)
799 * 801 *
800 * Return 0 on exception, a value greater than N if too long 802 * Return 0 on exception, a value greater than N if too long
801 */ 803 */
802static inline long strnlen_user(const char *src, long n) 804static inline long strnlen_user(const char __user *src, long n)
803{ 805{
804 long res; 806 long res;
805 807
806 res = -(long)src; 808 res = -(unsigned long)src;
807 __asm__ __volatile__ 809 __asm__ __volatile__
808 ("1:\n" 810 ("1:\n"
809 " tstl %2\n" 811 " tstl %2\n"
@@ -842,7 +844,7 @@ static inline long strnlen_user(const char *src, long n)
842 */ 844 */
843 845
844static inline unsigned long 846static inline unsigned long
845clear_user(void *to, unsigned long n) 847clear_user(void __user *to, unsigned long n)
846{ 848{
847 __asm__ __volatile__ 849 __asm__ __volatile__
848 (" tstl %1\n" 850 (" tstl %1\n"
diff --git a/include/asm-m68k/zorro.h b/include/asm-m68k/zorro.h
index cf816588bedb..5ce97c22b582 100644
--- a/include/asm-m68k/zorro.h
+++ b/include/asm-m68k/zorro.h
@@ -15,24 +15,24 @@
15#define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) 15#define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
16#define z_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) 16#define z_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
17 17
18static inline void *z_remap_nocache_ser(unsigned long physaddr, 18static inline void __iomem *z_remap_nocache_ser(unsigned long physaddr,
19 unsigned long size) 19 unsigned long size)
20{ 20{
21 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 21 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
22} 22}
23 23
24static inline void *z_remap_nocache_nonser(unsigned long physaddr, 24static inline void __iomem *z_remap_nocache_nonser(unsigned long physaddr,
25 unsigned long size) 25 unsigned long size)
26{ 26{
27 return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER); 27 return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER);
28} 28}
29 29
30static inline void *z_remap_writethrough(unsigned long physaddr, 30static inline void __iomem *z_remap_writethrough(unsigned long physaddr,
31 unsigned long size) 31 unsigned long size)
32{ 32{
33 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); 33 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
34} 34}
35static inline void *z_remap_fullcache(unsigned long physaddr, 35static inline void __iomem *z_remap_fullcache(unsigned long physaddr,
36 unsigned long size) 36 unsigned long size)
37{ 37{
38 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 38 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
diff --git a/include/asm-m68knommu/machdep.h b/include/asm-m68knommu/machdep.h
index 5a9f9c297f79..27c90afd3339 100644
--- a/include/asm-m68knommu/machdep.h
+++ b/include/asm-m68knommu/machdep.h
@@ -38,7 +38,6 @@ extern void (*mach_power_off)( void );
38extern unsigned long (*mach_hd_init) (unsigned long, unsigned long); 38extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
39extern void (*mach_hd_setup)(char *, int *); 39extern void (*mach_hd_setup)(char *, int *);
40extern long mach_max_dma_address; 40extern long mach_max_dma_address;
41extern void (*mach_floppy_setup)(char *, int *);
42extern void (*mach_floppy_eject)(void); 41extern void (*mach_floppy_eject)(void);
43extern void (*mach_heartbeat) (int); 42extern void (*mach_heartbeat) (int);
44extern void (*mach_l2_flush) (int); 43extern void (*mach_l2_flush) (int);
diff --git a/include/asm-m68knommu/thread_info.h b/include/asm-m68knommu/thread_info.h
index 7b9a3fa3af5d..b8f009edf2b2 100644
--- a/include/asm-m68knommu/thread_info.h
+++ b/include/asm-m68knommu/thread_info.h
@@ -75,8 +75,6 @@ static inline struct thread_info *current_thread_info(void)
75#define alloc_thread_info(tsk) ((struct thread_info *) \ 75#define alloc_thread_info(tsk) ((struct thread_info *) \
76 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER)) 76 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
77#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_SIZE_ORDER) 77#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_SIZE_ORDER)
78#define get_thread_info(ti) get_task_struct((ti)->task)
79#define put_thread_info(ti) put_task_struct((ti)->task)
80#endif /* __ASSEMBLY__ */ 78#endif /* __ASSEMBLY__ */
81 79
82#define PREEMPT_ACTIVE 0x4000000 80#define PREEMPT_ACTIVE 0x4000000
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h
index 82141c711c33..59d26b52ba32 100644
--- a/include/asm-mips/mach-ip27/topology.h
+++ b/include/asm-mips/mach-ip27/topology.h
@@ -27,7 +27,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
27 .max_interval = 32, \ 27 .max_interval = 32, \
28 .busy_factor = 32, \ 28 .busy_factor = 32, \
29 .imbalance_pct = 125, \ 29 .imbalance_pct = 125, \
30 .cache_hot_time = (10*1000), \
31 .cache_nice_tries = 1, \ 30 .cache_nice_tries = 1, \
32 .per_cpu_gain = 100, \ 31 .per_cpu_gain = 100, \
33 .flags = SD_LOAD_BALANCE \ 32 .flags = SD_LOAD_BALANCE \
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index de53055a62ae..39d2bd50fece 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -200,11 +200,11 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long
200 200
201unsigned long get_wchan(struct task_struct *p); 201unsigned long get_wchan(struct task_struct *p);
202 202
203#define __PT_REG(reg) ((long)&((struct pt_regs *)0)->reg - sizeof(struct pt_regs)) 203#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
204#define __KSTK_TOS(tsk) ((unsigned long)(tsk->thread_info) + THREAD_SIZE - 32) 204#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1)
205#define KSTK_EIP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_epc))) 205#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
206#define KSTK_ESP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(regs[29]))) 206#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
207#define KSTK_STATUS(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_status))) 207#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
208 208
209#define cpu_relax() barrier() 209#define cpu_relax() barrier()
210 210
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 330c4e497af3..e8e5d4143377 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -159,11 +159,21 @@ struct task_struct;
159do { \ 159do { \
160 if (cpu_has_dsp) \ 160 if (cpu_has_dsp) \
161 __save_dsp(prev); \ 161 __save_dsp(prev); \
162 (last) = resume(prev, next, next->thread_info); \ 162 (last) = resume(prev, next, task_thread_info(next)); \
163 if (cpu_has_dsp) \ 163 if (cpu_has_dsp) \
164 __restore_dsp(current); \ 164 __restore_dsp(current); \
165} while(0) 165} while(0)
166 166
167/*
168 * On SMP systems, when the scheduler does migration-cost autodetection,
169 * it needs a way to flush as much of the CPU's caches as possible.
170 *
171 * TODO: fill this in!
172 */
173static inline void sched_cacheflush(void)
174{
175}
176
167static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 177static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
168{ 178{
169 __u32 retval; 179 __u32 retval;
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index e6c24472e03f..1612b3fe1080 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -97,8 +97,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
97#endif 97#endif
98 98
99#define free_thread_info(info) kfree(info) 99#define free_thread_info(info) kfree(info)
100#define get_thread_info(ti) get_task_struct((ti)->task)
101#define put_thread_info(ti) put_task_struct((ti)->task)
102 100
103#endif /* !__ASSEMBLY__ */ 101#endif /* !__ASSEMBLY__ */
104 102
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index f3928d3a80cb..a5a973c0c07f 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
49 (last) = _switch_to(prev, next); \ 49 (last) = _switch_to(prev, next); \
50} while(0) 50} while(0)
51 51
52/*
53 * On SMP systems, when the scheduler does migration-cost autodetection,
54 * it needs a way to flush as much of the CPU's caches as possible.
55 *
56 * TODO: fill this in!
57 */
58static inline void sched_cacheflush(void)
59{
60}
52 61
53 62
54/* interrupt control */ 63/* interrupt control */
diff --git a/include/asm-parisc/thread_info.h b/include/asm-parisc/thread_info.h
index 57bbb76cb6c1..ac32f140b83a 100644
--- a/include/asm-parisc/thread_info.h
+++ b/include/asm-parisc/thread_info.h
@@ -43,9 +43,6 @@ struct thread_info {
43#define alloc_thread_info(tsk) ((struct thread_info *) \ 43#define alloc_thread_info(tsk) ((struct thread_info *) \
44 __get_free_pages(GFP_KERNEL, THREAD_ORDER)) 44 __get_free_pages(GFP_KERNEL, THREAD_ORDER))
45#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 45#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
46#define get_thread_info(ti) get_task_struct((ti)->task)
47#define put_thread_info(ti) put_task_struct((ti)->task)
48
49 46
50/* how to get the thread information struct from C */ 47/* how to get the thread information struct from C */
51#define current_thread_info() ((struct thread_info *)mfctl(30)) 48#define current_thread_info() ((struct thread_info *)mfctl(30))
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 4c888303e85b..9b822afa7d0e 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -183,6 +183,16 @@ struct thread_struct;
183extern struct task_struct *_switch(struct thread_struct *prev, 183extern struct task_struct *_switch(struct thread_struct *prev,
184 struct thread_struct *next); 184 struct thread_struct *next);
185 185
186/*
187 * On SMP systems, when the scheduler does migration-cost autodetection,
188 * it needs a way to flush as much of the CPU's caches as possible.
189 *
190 * TODO: fill this in!
191 */
192static inline void sched_cacheflush(void)
193{
194}
195
186extern unsigned int rtas_data; 196extern unsigned int rtas_data;
187extern int mem_init_done; /* set on boot once kmalloc can be called */ 197extern int mem_init_done; /* set on boot once kmalloc can be called */
188extern unsigned long memory_limit; 198extern unsigned long memory_limit;
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index ac1e80e6033e..7e09d7cda933 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -89,9 +89,6 @@ struct thread_info {
89 89
90#endif /* THREAD_SHIFT < PAGE_SHIFT */ 90#endif /* THREAD_SHIFT < PAGE_SHIFT */
91 91
92#define get_thread_info(ti) get_task_struct((ti)->task)
93#define put_thread_info(ti) put_task_struct((ti)->task)
94
95/* how to get the thread information struct from C */ 92/* how to get the thread information struct from C */
96static inline struct thread_info *current_thread_info(void) 93static inline struct thread_info *current_thread_info(void)
97{ 94{
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 9f3d4da261c4..1e19cd00af25 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -39,7 +39,6 @@ static inline int node_to_first_cpu(int node)
39 .max_interval = 32, \ 39 .max_interval = 32, \
40 .busy_factor = 32, \ 40 .busy_factor = 32, \
41 .imbalance_pct = 125, \ 41 .imbalance_pct = 125, \
42 .cache_hot_time = (10*1000000), \
43 .cache_nice_tries = 1, \ 42 .cache_nice_tries = 1, \
44 .per_cpu_gain = 100, \ 43 .per_cpu_gain = 100, \
45 .busy_idx = 3, \ 44 .busy_idx = 3, \
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index b97037348277..fb49c0c49ea1 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -131,6 +131,16 @@ extern struct task_struct *__switch_to(struct task_struct *,
131 struct task_struct *); 131 struct task_struct *);
132#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) 132#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
133 133
134/*
135 * On SMP systems, when the scheduler does migration-cost autodetection,
136 * it needs a way to flush as much of the CPU's caches as possible.
137 *
138 * TODO: fill this in!
139 */
140static inline void sched_cacheflush(void)
141{
142}
143
134struct thread_struct; 144struct thread_struct;
135extern struct task_struct *_switch(struct thread_struct *prev, 145extern struct task_struct *_switch(struct thread_struct *prev,
136 struct thread_struct *next); 146 struct thread_struct *next);
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
index 372d51cccd53..710646e64f7d 100644
--- a/include/asm-s390/elf.h
+++ b/include/asm-s390/elf.h
@@ -163,7 +163,7 @@ static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs)
163 163
164static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 164static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
165{ 165{
166 struct pt_regs *ptregs = __KSTK_PTREGS(tsk); 166 struct pt_regs *ptregs = task_pt_regs(tsk);
167 memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs)); 167 memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs));
168 memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs)); 168 memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
169 regs->orig_gpr2 = ptregs->orig_gpr2; 169 regs->orig_gpr2 = ptregs->orig_gpr2;
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 4ec652ebb3b1..c5cbc4bd8414 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -191,10 +191,10 @@ extern void show_registers(struct pt_regs *regs);
191extern void show_trace(struct task_struct *task, unsigned long *sp); 191extern void show_trace(struct task_struct *task, unsigned long *sp);
192 192
193unsigned long get_wchan(struct task_struct *p); 193unsigned long get_wchan(struct task_struct *p);
194#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \ 194#define task_pt_regs(tsk) ((struct pt_regs *) \
195 ((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs))) 195 (task_stack_page(tsk) + THREAD_SIZE) - 1)
196#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr) 196#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
197#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15]) 197#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
198 198
199/* 199/*
200 * Give up the time slice of the virtual PU. 200 * Give up the time slice of the virtual PU.
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 864cae7e1fd6..c7c3a9ad593f 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -104,6 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs)
104 prev = __switch_to(prev,next); \ 104 prev = __switch_to(prev,next); \
105} while (0) 105} while (0)
106 106
107/*
108 * On SMP systems, when the scheduler does migration-cost autodetection,
109 * it needs a way to flush as much of the CPU's caches as possible.
110 *
111 * TODO: fill this in!
112 */
113static inline void sched_cacheflush(void)
114{
115}
116
107#ifdef CONFIG_VIRT_CPU_ACCOUNTING 117#ifdef CONFIG_VIRT_CPU_ACCOUNTING
108extern void account_user_vtime(struct task_struct *); 118extern void account_user_vtime(struct task_struct *);
109extern void account_system_vtime(struct task_struct *); 119extern void account_system_vtime(struct task_struct *);
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index 6c18a3f24316..f3797a52c4ea 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -81,8 +81,6 @@ static inline struct thread_info *current_thread_info(void)
81#define alloc_thread_info(tsk) ((struct thread_info *) \ 81#define alloc_thread_info(tsk) ((struct thread_info *) \
82 __get_free_pages(GFP_KERNEL,THREAD_ORDER)) 82 __get_free_pages(GFP_KERNEL,THREAD_ORDER))
83#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER) 83#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
84#define get_thread_info(ti) get_task_struct((ti)->task)
85#define put_thread_info(ti) put_task_struct((ti)->task)
86 84
87#endif 85#endif
88 86
diff --git a/include/asm-sh/ptrace.h b/include/asm-sh/ptrace.h
index 0f75e16a7415..792fc35bd624 100644
--- a/include/asm-sh/ptrace.h
+++ b/include/asm-sh/ptrace.h
@@ -91,6 +91,16 @@ struct pt_dspregs {
91#define instruction_pointer(regs) ((regs)->pc) 91#define instruction_pointer(regs) ((regs)->pc)
92extern void show_regs(struct pt_regs *); 92extern void show_regs(struct pt_regs *);
93 93
94#ifdef CONFIG_SH_DSP
95#define task_pt_regs(task) \
96 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
97 - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1)
98#else
99#define task_pt_regs(task) \
100 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
101 - sizeof(unsigned long)) - 1)
102#endif
103
94static inline unsigned long profile_pc(struct pt_regs *regs) 104static inline unsigned long profile_pc(struct pt_regs *regs)
95{ 105{
96 unsigned long pc = instruction_pointer(regs); 106 unsigned long pc = instruction_pointer(regs);
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 28a3c2d8bcd7..bb0330499bdf 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -57,6 +57,16 @@
57 last = __last; \ 57 last = __last; \
58} while (0) 58} while (0)
59 59
60/*
61 * On SMP systems, when the scheduler does migration-cost autodetection,
62 * it needs a way to flush as much of the CPU's caches as possible.
63 *
64 * TODO: fill this in!
65 */
66static inline void sched_cacheflush(void)
67{
68}
69
60#define nop() __asm__ __volatile__ ("nop") 70#define nop() __asm__ __volatile__ ("nop")
61 71
62 72
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 46080cefaff8..85f0c11b4319 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -60,8 +60,6 @@ static inline struct thread_info *current_thread_info(void)
60#define THREAD_SIZE (2*PAGE_SIZE) 60#define THREAD_SIZE (2*PAGE_SIZE)
61#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 61#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
62#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 62#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
63#define get_thread_info(ti) get_task_struct((ti)->task)
64#define put_thread_info(ti) put_task_struct((ti)->task)
65 63
66#else /* !__ASSEMBLY__ */ 64#else /* !__ASSEMBLY__ */
67 65
diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h
index 10f024c6a2e3..1f825cb163c3 100644
--- a/include/asm-sh64/thread_info.h
+++ b/include/asm-sh64/thread_info.h
@@ -66,8 +66,6 @@ static inline struct thread_info *current_thread_info(void)
66 66
67#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 67#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
68#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 68#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
69#define get_thread_info(ti) get_task_struct((ti)->task)
70#define put_thread_info(ti) put_task_struct((ti)->task)
71 69
72#endif /* __ASSEMBLY__ */ 70#endif /* __ASSEMBLY__ */
73 71
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 1f6b71f9e1b6..58dd162927bb 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -155,7 +155,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
155 "here:\n" \ 155 "here:\n" \
156 : "=&r" (last) \ 156 : "=&r" (last) \
157 : "r" (&(current_set[hard_smp_processor_id()])), \ 157 : "r" (&(current_set[hard_smp_processor_id()])), \
158 "r" ((next)->thread_info), \ 158 "r" (task_thread_info(next)), \
159 "i" (TI_KPSR), \ 159 "i" (TI_KPSR), \
160 "i" (TI_KSP), \ 160 "i" (TI_KSP), \
161 "i" (TI_TASK) \ 161 "i" (TI_TASK) \
@@ -166,6 +166,16 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
166 } while(0) 166 } while(0)
167 167
168/* 168/*
169 * On SMP systems, when the scheduler does migration-cost autodetection,
170 * it needs a way to flush as much of the CPU's caches as possible.
171 *
172 * TODO: fill this in!
173 */
174static inline void sched_cacheflush(void)
175{
176}
177
178/*
169 * Changing the IRQ level on the Sparc. 179 * Changing the IRQ level on the Sparc.
170 */ 180 */
171extern void local_irq_restore(unsigned long); 181extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h
index ff6ccb3d24c6..65f060b040ab 100644
--- a/include/asm-sparc/thread_info.h
+++ b/include/asm-sparc/thread_info.h
@@ -92,9 +92,6 @@ BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
92BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) 92BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
93#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti) 93#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
94 94
95#define get_thread_info(ti) get_task_struct((ti)->task)
96#define put_thread_info(ti) put_task_struct((ti)->task)
97
98#endif /* __ASSEMBLY__ */ 95#endif /* __ASSEMBLY__ */
99 96
100/* 97/*
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 91458118277e..69539a8ab833 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -119,7 +119,7 @@ typedef struct {
119#endif 119#endif
120 120
121#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \ 121#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \
122 ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; }) 122 ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; })
123 123
124/* 124/*
125 * This is used to ensure we don't load something for the wrong architecture. 125 * This is used to ensure we don't load something for the wrong architecture.
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 08ba72d7722c..57ee7b306189 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -60,7 +60,7 @@ do { \
60 register unsigned long pgd_cache asm("o4"); \ 60 register unsigned long pgd_cache asm("o4"); \
61 paddr = __pa((__mm)->pgd); \ 61 paddr = __pa((__mm)->pgd); \
62 pgd_cache = 0UL; \ 62 pgd_cache = 0UL; \
63 if ((__tsk)->thread_info->flags & _TIF_32BIT) \ 63 if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
64 pgd_cache = get_pgd_cache((__mm)->pgd); \ 64 pgd_cache = get_pgd_cache((__mm)->pgd); \
65 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ 65 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
66 "mov %3, %%g4\n\t" \ 66 "mov %3, %%g4\n\t" \
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index 3169f3e2237e..cd8d9b4c8658 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
186 186
187extern unsigned long get_wchan(struct task_struct *task); 187extern unsigned long get_wchan(struct task_struct *task);
188 188
189#define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc) 189#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
190#define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP]) 190#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
191#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
191 192
192#define cpu_relax() barrier() 193#define cpu_relax() barrier()
193 194
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 309f1466b6fa..af254e581834 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -208,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
208 /* If you are tempted to conditionalize the following */ \ 208 /* If you are tempted to conditionalize the following */ \
209 /* so that ASI is only written if it changes, think again. */ \ 209 /* so that ASI is only written if it changes, think again. */ \
210 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 210 __asm__ __volatile__("wr %%g0, %0, %%asi" \
211 : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\ 211 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
212 __asm__ __volatile__( \ 212 __asm__ __volatile__( \
213 "mov %%g4, %%g7\n\t" \ 213 "mov %%g4, %%g7\n\t" \
214 "wrpr %%g0, 0x95, %%pstate\n\t" \ 214 "wrpr %%g0, 0x95, %%pstate\n\t" \
@@ -238,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
238 "b,a ret_from_syscall\n\t" \ 238 "b,a ret_from_syscall\n\t" \
239 "1:\n\t" \ 239 "1:\n\t" \
240 : "=&r" (last) \ 240 : "=&r" (last) \
241 : "0" (next->thread_info), \ 241 : "0" (task_thread_info(next)), \
242 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ 242 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
243 "i" (TI_CWP), "i" (TI_TASK) \ 243 "i" (TI_CWP), "i" (TI_TASK) \
244 : "cc", \ 244 : "cc", \
@@ -253,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
253 } \ 253 } \
254} while(0) 254} while(0)
255 255
256/*
257 * On SMP systems, when the scheduler does migration-cost autodetection,
258 * it needs a way to flush as much of the CPU's caches as possible.
259 *
260 * TODO: fill this in!
261 */
262static inline void sched_cacheflush(void)
263{
264}
265
256static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) 266static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
257{ 267{
258 unsigned long tmp1, tmp2; 268 unsigned long tmp1, tmp2;
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 97267f059ef5..705c71972c32 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -56,9 +56,6 @@ static inline struct thread_info *current_thread_info(void)
56 ((struct thread_info *) kmalloc(THREAD_SIZE, GFP_KERNEL)) 56 ((struct thread_info *) kmalloc(THREAD_SIZE, GFP_KERNEL))
57#define free_thread_info(ti) kfree(ti) 57#define free_thread_info(ti) kfree(ti)
58 58
59#define get_thread_info(ti) get_task_struct((ti)->task)
60#define put_thread_info(ti) put_task_struct((ti)->task)
61
62#endif 59#endif
63 60
64#define PREEMPT_ACTIVE 0x10000000 61#define PREEMPT_ACTIVE 0x10000000
diff --git a/include/asm-v850/processor.h b/include/asm-v850/processor.h
index 98f929427d3d..2d31308935a0 100644
--- a/include/asm-v850/processor.h
+++ b/include/asm-v850/processor.h
@@ -98,10 +98,10 @@ unsigned long get_wchan (struct task_struct *p);
98 98
99 99
100/* Return some info about the user process TASK. */ 100/* Return some info about the user process TASK. */
101#define task_tos(task) ((unsigned long)(task)->thread_info + THREAD_SIZE) 101#define task_tos(task) ((unsigned long)task_stack_page(task) + THREAD_SIZE)
102#define task_regs(task) ((struct pt_regs *)task_tos (task) - 1) 102#define task_pt_regs(task) ((struct pt_regs *)task_tos (task) - 1)
103#define task_sp(task) (task_regs (task)->gpr[GPR_SP]) 103#define task_sp(task) (task_pt_regs (task)->gpr[GPR_SP])
104#define task_pc(task) (task_regs (task)->pc) 104#define task_pc(task) (task_pt_regs (task)->pc)
105/* Grotty old names for some. */ 105/* Grotty old names for some. */
106#define KSTK_EIP(task) task_pc (task) 106#define KSTK_EIP(task) task_pc (task)
107#define KSTK_ESP(task) task_sp (task) 107#define KSTK_ESP(task) task_sp (task)
diff --git a/include/asm-v850/thread_info.h b/include/asm-v850/thread_info.h
index e4cfad94a553..82b8f2846207 100644
--- a/include/asm-v850/thread_info.h
+++ b/include/asm-v850/thread_info.h
@@ -58,8 +58,6 @@ struct thread_info {
58#define alloc_thread_info(tsk) ((struct thread_info *) \ 58#define alloc_thread_info(tsk) ((struct thread_info *) \
59 __get_free_pages(GFP_KERNEL, 1)) 59 __get_free_pages(GFP_KERNEL, 1))
60#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 60#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
61#define get_thread_info(ti) get_task_struct((ti)->task)
62#define put_thread_info(ti) put_task_struct((ti)->task)
63 61
64#endif /* __ASSEMBLY__ */ 62#endif /* __ASSEMBLY__ */
65 63
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
index 3863a7da372b..b37ab8218ef0 100644
--- a/include/asm-x86_64/compat.h
+++ b/include/asm-x86_64/compat.h
@@ -198,7 +198,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
198 198
199static __inline__ void __user *compat_alloc_user_space(long len) 199static __inline__ void __user *compat_alloc_user_space(long len)
200{ 200{
201 struct pt_regs *regs = (void *)current->thread.rsp0 - sizeof(struct pt_regs); 201 struct pt_regs *regs = task_pt_regs(current);
202 return (void __user *)regs->rsp - len; 202 return (void __user *)regs->rsp - len;
203} 203}
204 204
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index 57f7e1433849..876eb9a2fe78 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -30,7 +30,7 @@ extern int save_i387(struct _fpstate __user *buf);
30 */ 30 */
31 31
32#define unlazy_fpu(tsk) do { \ 32#define unlazy_fpu(tsk) do { \
33 if ((tsk)->thread_info->status & TS_USEDFPU) \ 33 if (task_thread_info(tsk)->status & TS_USEDFPU) \
34 save_init_fpu(tsk); \ 34 save_init_fpu(tsk); \
35} while (0) 35} while (0)
36 36
@@ -46,9 +46,9 @@ static inline void tolerant_fwait(void)
46} 46}
47 47
48#define clear_fpu(tsk) do { \ 48#define clear_fpu(tsk) do { \
49 if ((tsk)->thread_info->status & TS_USEDFPU) { \ 49 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
50 tolerant_fwait(); \ 50 tolerant_fwait(); \
51 (tsk)->thread_info->status &= ~TS_USEDFPU; \ 51 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
52 stts(); \ 52 stts(); \
53 } \ 53 } \
54} while (0) 54} while (0)
@@ -170,10 +170,10 @@ static inline void kernel_fpu_end(void)
170 preempt_enable(); 170 preempt_enable();
171} 171}
172 172
173static inline void save_init_fpu( struct task_struct *tsk ) 173static inline void save_init_fpu(struct task_struct *tsk)
174{ 174{
175 __fxsave_clear(tsk); 175 __fxsave_clear(tsk);
176 tsk->thread_info->status &= ~TS_USEDFPU; 176 task_thread_info(tsk)->status &= ~TS_USEDFPU;
177 stts(); 177 stts();
178} 178}
179 179
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 394dd729752d..87a282b1043a 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -321,8 +321,8 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
321#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) 321#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
322 322
323extern unsigned long get_wchan(struct task_struct *p); 323extern unsigned long get_wchan(struct task_struct *p);
324#define KSTK_EIP(tsk) \ 324#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
325 (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip) 325#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
326#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 326#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
327 327
328 328
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 38c1e8a69c9c..0eacbefb7dd0 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -193,6 +193,15 @@ static inline void write_cr4(unsigned long val)
193#define wbinvd() \ 193#define wbinvd() \
194 __asm__ __volatile__ ("wbinvd": : :"memory"); 194 __asm__ __volatile__ ("wbinvd": : :"memory");
195 195
196/*
197 * On SMP systems, when the scheduler does migration-cost autodetection,
198 * it needs a way to flush as much of the CPU's caches as possible.
199 */
200static inline void sched_cacheflush(void)
201{
202 wbinvd();
203}
204
196#endif /* __KERNEL__ */ 205#endif /* __KERNEL__ */
197 206
198#define nop() __asm__ __volatile__ ("nop") 207#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index eb7c5fda1870..4ac0e0a36934 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -76,8 +76,6 @@ static inline struct thread_info *stack_thread_info(void)
76#define alloc_thread_info(tsk) \ 76#define alloc_thread_info(tsk) \
77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) 77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
79#define get_thread_info(ti) get_task_struct((ti)->task)
80#define put_thread_info(ti) put_task_struct((ti)->task)
81 79
82#else /* !__ASSEMBLY__ */ 80#else /* !__ASSEMBLY__ */
83 81
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 7d82bc56b9fa..2fa7f27381b4 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -39,7 +39,6 @@ extern int __node_distance(int, int);
39 .max_interval = 32, \ 39 .max_interval = 32, \
40 .busy_factor = 32, \ 40 .busy_factor = 32, \
41 .imbalance_pct = 125, \ 41 .imbalance_pct = 125, \
42 .cache_hot_time = (10*1000000), \
43 .cache_nice_tries = 2, \ 42 .cache_nice_tries = 2, \
44 .busy_idx = 3, \ 43 .busy_idx = 3, \
45 .idle_idx = 2, \ 44 .idle_idx = 2, \
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
index 9cab5e4298b9..d1d72ad36f08 100644
--- a/include/asm-xtensa/processor.h
+++ b/include/asm-xtensa/processor.h
@@ -184,12 +184,12 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
184#define release_segments(mm) do { } while(0) 184#define release_segments(mm) do { } while(0)
185#define forget_segments() do { } while (0) 185#define forget_segments() do { } while (0)
186 186
187#define thread_saved_pc(tsk) (xtensa_pt_regs(tsk)->pc) 187#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
188 188
189extern unsigned long get_wchan(struct task_struct *p); 189extern unsigned long get_wchan(struct task_struct *p);
190 190
191#define KSTK_EIP(tsk) (xtensa_pt_regs(tsk)->pc) 191#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
192#define KSTK_ESP(tsk) (xtensa_pt_regs(tsk)->areg[1]) 192#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
193 193
194#define cpu_relax() do { } while (0) 194#define cpu_relax() do { } while (0)
195 195
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h
index aa4fd7fb3ce7..a5ac71a5205c 100644
--- a/include/asm-xtensa/ptrace.h
+++ b/include/asm-xtensa/ptrace.h
@@ -113,8 +113,8 @@ struct pt_regs {
113}; 113};
114 114
115#ifdef __KERNEL__ 115#ifdef __KERNEL__
116# define xtensa_pt_regs(tsk) ((struct pt_regs*) \ 116# define task_pt_regs(tsk) ((struct pt_regs*) \
117 (((long)(tsk)->thread_info + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4)) - 1) 117 (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
118# define user_mode(regs) (((regs)->ps & 0x00000020)!=0) 118# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
119# define instruction_pointer(regs) ((regs)->pc) 119# define instruction_pointer(regs) ((regs)->pc)
120extern void show_regs(struct pt_regs *); 120extern void show_regs(struct pt_regs *);
diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h
index af208d41fd82..5ae34ab71597 100644
--- a/include/asm-xtensa/thread_info.h
+++ b/include/asm-xtensa/thread_info.h
@@ -93,8 +93,6 @@ static inline struct thread_info *current_thread_info(void)
93/* thread information allocation */ 93/* thread information allocation */
94#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 94#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
95#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 95#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
96#define get_thread_info(ti) get_task_struct((ti)->task)
97#define put_thread_info(ti) put_task_struct((ti)->task)
98 96
99#else /* !__ASSEMBLY__ */ 97#else /* !__ASSEMBLY__ */
100 98
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3b74c4bf2934..a72e17135421 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -631,7 +631,14 @@ struct sched_domain {
631 631
632extern void partition_sched_domains(cpumask_t *partition1, 632extern void partition_sched_domains(cpumask_t *partition1,
633 cpumask_t *partition2); 633 cpumask_t *partition2);
634#endif /* CONFIG_SMP */ 634
635/*
636 * Maximum cache size the migration-costs auto-tuning code will
637 * search from:
638 */
639extern unsigned int max_cache_size;
640
641#endif /* CONFIG_SMP */
635 642
636 643
637struct io_context; /* See blkdev.h */ 644struct io_context; /* See blkdev.h */
@@ -689,9 +696,12 @@ struct task_struct {
689 696
690 int lock_depth; /* BKL lock depth */ 697 int lock_depth; /* BKL lock depth */
691 698
692#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 699#if defined(CONFIG_SMP)
700 int last_waker_cpu; /* CPU that last woke this task up */
701#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
693 int oncpu; 702 int oncpu;
694#endif 703#endif
704#endif
695 int prio, static_prio; 705 int prio, static_prio;
696 struct list_head run_list; 706 struct list_head run_list;
697 prio_array_t *array; 707 prio_array_t *array;
@@ -1230,6 +1240,7 @@ static inline void task_unlock(struct task_struct *p)
1230#ifndef __HAVE_THREAD_FUNCTIONS 1240#ifndef __HAVE_THREAD_FUNCTIONS
1231 1241
1232#define task_thread_info(task) (task)->thread_info 1242#define task_thread_info(task) (task)->thread_info
1243#define task_stack_page(task) ((void*)((task)->thread_info))
1233 1244
1234static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 1245static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1235{ 1246{
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 3df1d474e5c5..315a5163d6a0 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -86,7 +86,6 @@
86 .max_interval = 2, \ 86 .max_interval = 2, \
87 .busy_factor = 8, \ 87 .busy_factor = 8, \
88 .imbalance_pct = 110, \ 88 .imbalance_pct = 110, \
89 .cache_hot_time = 0, \
90 .cache_nice_tries = 0, \ 89 .cache_nice_tries = 0, \
91 .per_cpu_gain = 25, \ 90 .per_cpu_gain = 25, \
92 .busy_idx = 0, \ 91 .busy_idx = 0, \
@@ -117,7 +116,6 @@
117 .max_interval = 4, \ 116 .max_interval = 4, \
118 .busy_factor = 64, \ 117 .busy_factor = 64, \
119 .imbalance_pct = 125, \ 118 .imbalance_pct = 125, \
120 .cache_hot_time = (5*1000000/2), \
121 .cache_nice_tries = 1, \ 119 .cache_nice_tries = 1, \
122 .per_cpu_gain = 100, \ 120 .per_cpu_gain = 100, \
123 .busy_idx = 2, \ 121 .busy_idx = 2, \