aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorVineet Gupta <Vineet.Gupta1@synopsys.com>2013-11-12 18:08:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-12 22:09:13 -0500
commit616c05d110bb4ef8203f49c9d2476874077c2f6a (patch)
treeeb5bc88a430509fcdb8afe8ee9f344c903d1894d /arch
parent261adc9a609dbfde815337889b9e2c8728959ab8 (diff)
sh: move fpu_counter into ARCH specific thread_struct
Only a couple of arches (sh/x86) use fpu_counter in task_struct so it can be moved out into ARCH specific thread_struct, reducing the size of task_struct for other arches. Compile tested sh defconfig + sh4-linux-gcc (4.6.3) Signed-off-by: Vineet Gupta <vgupta@synopsys.com> Cc: Paul Mundt <paul.mundt@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/fpu.h2
-rw-r--r--arch/sh/include/asm/processor_32.h10
-rw-r--r--arch/sh/include/asm/processor_64.h10
-rw-r--r--arch/sh/kernel/cpu/fpu.c2
-rw-r--r--arch/sh/kernel/process_32.c6
5 files changed, 25 insertions, 5 deletions
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 06c4281aab65..09fc2bc8a790 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -46,7 +46,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
46 save_fpu(tsk); 46 save_fpu(tsk);
47 release_fpu(regs); 47 release_fpu(regs);
48 } else 48 } else
49 tsk->fpu_counter = 0; 49 tsk->thread.fpu_counter = 0;
50} 50}
51 51
52static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) 52static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index e699a12cdcca..18e0377f72bb 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -111,6 +111,16 @@ struct thread_struct {
111 111
112 /* Extended processor state */ 112 /* Extended processor state */
113 union thread_xstate *xstate; 113 union thread_xstate *xstate;
114
115 /*
116 * fpu_counter contains the number of consecutive context switches
117 * that the FPU is used. If this is over a threshold, the lazy fpu
118 * saving becomes unlazy to save the trap. This is an unsigned char
119 * so that after 256 times the counter wraps and the behavior turns
120 * lazy again; this to deal with bursty apps that only use FPU for
121 * a short time
122 */
123 unsigned char fpu_counter;
114}; 124};
115 125
116#define INIT_THREAD { \ 126#define INIT_THREAD { \
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 1cc7d3197143..eedd4f625d07 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -126,6 +126,16 @@ struct thread_struct {
126 126
127 /* floating point info */ 127 /* floating point info */
128 union thread_xstate *xstate; 128 union thread_xstate *xstate;
129
130 /*
131 * fpu_counter contains the number of consecutive context switches
132 * that the FPU is used. If this is over a threshold, the lazy fpu
133 * saving becomes unlazy to save the trap. This is an unsigned char
134 * so that after 256 times the counter wraps and the behavior turns
135 * lazy again; this to deal with bursty apps that only use FPU for
136 * a short time
137 */
138 unsigned char fpu_counter;
129}; 139};
130 140
131#define INIT_MMAP \ 141#define INIT_MMAP \
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
index f8f7af51c128..4e332244ea75 100644
--- a/arch/sh/kernel/cpu/fpu.c
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -44,7 +44,7 @@ void __fpu_state_restore(void)
44 restore_fpu(tsk); 44 restore_fpu(tsk);
45 45
46 task_thread_info(tsk)->status |= TS_USEDFPU; 46 task_thread_info(tsk)->status |= TS_USEDFPU;
47 tsk->fpu_counter++; 47 tsk->thread.fpu_counter++;
48} 48}
49 49
50void fpu_state_restore(struct pt_regs *regs) 50void fpu_state_restore(struct pt_regs *regs)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index ebd3933005b4..2885fc9d9dcd 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
156#endif 156#endif
157 ti->addr_limit = KERNEL_DS; 157 ti->addr_limit = KERNEL_DS;
158 ti->status &= ~TS_USEDFPU; 158 ti->status &= ~TS_USEDFPU;
159 p->fpu_counter = 0; 159 p->thread.fpu_counter = 0;
160 return 0; 160 return 0;
161 } 161 }
162 *childregs = *current_pt_regs(); 162 *childregs = *current_pt_regs();
@@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
189 unlazy_fpu(prev, task_pt_regs(prev)); 189 unlazy_fpu(prev, task_pt_regs(prev));
190 190
191 /* we're going to use this soon, after a few expensive things */ 191 /* we're going to use this soon, after a few expensive things */
192 if (next->fpu_counter > 5) 192 if (next->thread.fpu_counter > 5)
193 prefetch(next_t->xstate); 193 prefetch(next_t->xstate);
194 194
195#ifdef CONFIG_MMU 195#ifdef CONFIG_MMU
@@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
207 * restore of the math state immediately to avoid the trap; the 207 * restore of the math state immediately to avoid the trap; the
208 * chances of needing FPU soon are obviously high now 208 * chances of needing FPU soon are obviously high now
209 */ 209 */
210 if (next->fpu_counter > 5) 210 if (next->thread.fpu_counter > 5)
211 __fpu_state_restore(); 211 __fpu_state_restore();
212 212
213 return prev; 213 return prev;