diff options
author | Graf Yang <graf.yang@analog.com> | 2008-10-08 05:30:01 -0400 |
---|---|---|
committer | Bryan Wu <cooloney@kernel.org> | 2008-10-08 05:30:01 -0400 |
commit | ca87b7ad00a620f259048fdfb27dc2a5384c1e4e (patch) | |
tree | 1653559472f74c189cf62d03085b6ee040738021 | |
parent | 7d98c881eed9e19767bc77ffd650d0041b4f41ec (diff) |
Blackfin arch: add CONFIG_APP_STACKS_L1 to enable or disable putting kernel stacks in L1
use CONFIG_APP_STACKS_L1 to enable or disable putting kernel stacks in L1,
default is enabled, SMP kernel need turn it off
Signed-off-by: Graf Yang <graf.yang@analog.com>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
-rw-r--r-- | arch/blackfin/Kconfig | 9 | ||||
-rw-r--r-- | arch/blackfin/include/asm/mmu_context.h | 79 |
2 files changed, 51 insertions, 37 deletions
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index c507a92cb289..9d936a3986c8 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -629,6 +629,15 @@ config CPLB_SWITCH_TAB_L1 | |||
629 | If enabled, the CPLB Switch Tables are linked | 629 | If enabled, the CPLB Switch Tables are linked |
630 | into L1 data memory. (less latency) | 630 | into L1 data memory. (less latency) |
631 | 631 | ||
632 | config APP_STACK_L1 | ||
633 | bool "Support locating application stack in L1 Scratch Memory" | ||
634 | default y | ||
635 | help | ||
636 | If enabled the application stack can be located in L1 | ||
637 | scratch memory (less latency). | ||
638 | |||
639 | Currently only works with FLAT binaries. | ||
640 | |||
632 | comment "Speed Optimizations" | 641 | comment "Speed Optimizations" |
633 | config BFIN_INS_LOWOVERHEAD | 642 | config BFIN_INS_LOWOVERHEAD |
634 | bool "ins[bwl] low overhead, higher interrupt latency" | 643 | bool "ins[bwl] low overhead, higher interrupt latency" |
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h index 8529552a981f..35593dda2a4d 100644 --- a/arch/blackfin/include/asm/mmu_context.h +++ b/arch/blackfin/include/asm/mmu_context.h | |||
@@ -45,49 +45,12 @@ extern unsigned long l1_stack_len; | |||
45 | extern int l1sram_free(const void*); | 45 | extern int l1sram_free(const void*); |
46 | extern void *l1sram_alloc_max(void*); | 46 | extern void *l1sram_alloc_max(void*); |
47 | 47 | ||
48 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | /* Called when creating a new context during fork() or execve(). */ | ||
53 | static inline int | ||
54 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
55 | { | ||
56 | #ifdef CONFIG_MPU | ||
57 | unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order); | ||
58 | mm->context.page_rwx_mask = (unsigned long *)p; | ||
59 | memset(mm->context.page_rwx_mask, 0, | ||
60 | page_mask_nelts * 3 * sizeof(long)); | ||
61 | #endif | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static inline void free_l1stack(void) | 48 | static inline void free_l1stack(void) |
66 | { | 49 | { |
67 | nr_l1stack_tasks--; | 50 | nr_l1stack_tasks--; |
68 | if (nr_l1stack_tasks == 0) | 51 | if (nr_l1stack_tasks == 0) |
69 | l1sram_free(l1_stack_base); | 52 | l1sram_free(l1_stack_base); |
70 | } | 53 | } |
71 | static inline void destroy_context(struct mm_struct *mm) | ||
72 | { | ||
73 | struct sram_list_struct *tmp; | ||
74 | |||
75 | if (current_l1_stack_save == mm->context.l1_stack_save) | ||
76 | current_l1_stack_save = NULL; | ||
77 | if (mm->context.l1_stack_save) | ||
78 | free_l1stack(); | ||
79 | |||
80 | while ((tmp = mm->context.sram_list)) { | ||
81 | mm->context.sram_list = tmp->next; | ||
82 | sram_free(tmp->addr); | ||
83 | kfree(tmp); | ||
84 | } | ||
85 | #ifdef CONFIG_MPU | ||
86 | if (current_rwx_mask == mm->context.page_rwx_mask) | ||
87 | current_rwx_mask = NULL; | ||
88 | free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); | ||
89 | #endif | ||
90 | } | ||
91 | 54 | ||
92 | static inline unsigned long | 55 | static inline unsigned long |
93 | alloc_l1stack(unsigned long length, unsigned long *stack_base) | 56 | alloc_l1stack(unsigned long length, unsigned long *stack_base) |
@@ -134,6 +97,7 @@ static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_m | |||
134 | } | 97 | } |
135 | #endif | 98 | #endif |
136 | 99 | ||
100 | #ifdef CONFIG_APP_STACK_L1 | ||
137 | /* L1 stack switching. */ | 101 | /* L1 stack switching. */ |
138 | if (!next_mm->context.l1_stack_save) | 102 | if (!next_mm->context.l1_stack_save) |
139 | return; | 103 | return; |
@@ -144,6 +108,7 @@ static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_m | |||
144 | } | 108 | } |
145 | current_l1_stack_save = next_mm->context.l1_stack_save; | 109 | current_l1_stack_save = next_mm->context.l1_stack_save; |
146 | memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); | 110 | memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); |
111 | #endif | ||
147 | } | 112 | } |
148 | 113 | ||
149 | #ifdef CONFIG_MPU | 114 | #ifdef CONFIG_MPU |
@@ -180,4 +145,44 @@ static inline void update_protections(struct mm_struct *mm) | |||
180 | } | 145 | } |
181 | #endif | 146 | #endif |
182 | 147 | ||
148 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
149 | { | ||
150 | } | ||
151 | |||
152 | /* Called when creating a new context during fork() or execve(). */ | ||
153 | static inline int | ||
154 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
155 | { | ||
156 | #ifdef CONFIG_MPU | ||
157 | unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order); | ||
158 | mm->context.page_rwx_mask = (unsigned long *)p; | ||
159 | memset(mm->context.page_rwx_mask, 0, | ||
160 | page_mask_nelts * 3 * sizeof(long)); | ||
161 | #endif | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static inline void destroy_context(struct mm_struct *mm) | ||
166 | { | ||
167 | struct sram_list_struct *tmp; | ||
168 | |||
169 | #ifdef CONFIG_APP_STACK_L1 | ||
170 | if (current_l1_stack_save == mm->context.l1_stack_save) | ||
171 | current_l1_stack_save = 0; | ||
172 | if (mm->context.l1_stack_save) | ||
173 | free_l1stack(); | ||
174 | #endif | ||
175 | |||
176 | while ((tmp = mm->context.sram_list)) { | ||
177 | mm->context.sram_list = tmp->next; | ||
178 | sram_free(tmp->addr); | ||
179 | kfree(tmp); | ||
180 | } | ||
181 | #ifdef CONFIG_MPU | ||
182 | if (current_rwx_mask == mm->context.page_rwx_mask) | ||
183 | current_rwx_mask = NULL; | ||
184 | free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); | ||
185 | #endif | ||
186 | } | ||
187 | |||
183 | #endif | 188 | #endif |