diff options
author | Sonic Zhang <sonic.zhang@analog.com> | 2008-07-19 02:51:31 -0400 |
---|---|---|
committer | Bryan Wu <cooloney@kernel.org> | 2008-07-19 02:51:31 -0400 |
commit | 5d481f497559245ecfb1b95cafe39bfbf037fda5 (patch) | |
tree | 2fda653110eee285def1a063bde65c3a45e7bc5f /arch/blackfin/mm | |
parent | 1a8caeebe3689ad4ef67d7ff5d4143f7748deedd (diff) |
Blackfin arch: change L1 malloc to base on slab cache and lists.
Remove the sram piece limitation and improve the performance to
alloc/free sram piece data.
Signed-off-by: Sonic Zhang <sonic.zhang@analog.com>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
Diffstat (limited to 'arch/blackfin/mm')
-rw-r--r-- | arch/blackfin/mm/blackfin_sram.c | 395 | ||||
-rw-r--r-- | arch/blackfin/mm/blackfin_sram.h | 4 | ||||
-rw-r--r-- | arch/blackfin/mm/init.c | 12 |
3 files changed, 248 insertions, 163 deletions
diff --git a/arch/blackfin/mm/blackfin_sram.c b/arch/blackfin/mm/blackfin_sram.c index 8f6fdc245330..b58cf196d7cc 100644 --- a/arch/blackfin/mm/blackfin_sram.c +++ b/arch/blackfin/mm/blackfin_sram.c | |||
@@ -41,215 +41,276 @@ | |||
41 | #include <asm/blackfin.h> | 41 | #include <asm/blackfin.h> |
42 | #include "blackfin_sram.h" | 42 | #include "blackfin_sram.h" |
43 | 43 | ||
44 | spinlock_t l1sram_lock, l1_data_sram_lock, l1_inst_sram_lock; | 44 | static spinlock_t l1sram_lock, l1_data_sram_lock, l1_inst_sram_lock; |
45 | |||
46 | #if CONFIG_L1_MAX_PIECE < 16 | ||
47 | #undef CONFIG_L1_MAX_PIECE | ||
48 | #define CONFIG_L1_MAX_PIECE 16 | ||
49 | #endif | ||
50 | |||
51 | #if CONFIG_L1_MAX_PIECE > 1024 | ||
52 | #undef CONFIG_L1_MAX_PIECE | ||
53 | #define CONFIG_L1_MAX_PIECE 1024 | ||
54 | #endif | ||
55 | |||
56 | #define SRAM_SLT_NULL 0 | ||
57 | #define SRAM_SLT_FREE 1 | ||
58 | #define SRAM_SLT_ALLOCATED 2 | ||
59 | 45 | ||
60 | /* the data structure for L1 scratchpad and DATA SRAM */ | 46 | /* the data structure for L1 scratchpad and DATA SRAM */ |
61 | struct l1_sram_piece { | 47 | struct sram_piece { |
62 | void *paddr; | 48 | void *paddr; |
63 | int size; | 49 | int size; |
64 | int flag; | ||
65 | pid_t pid; | 50 | pid_t pid; |
51 | struct sram_piece *next; | ||
66 | }; | 52 | }; |
67 | 53 | ||
68 | static struct l1_sram_piece l1_ssram[CONFIG_L1_MAX_PIECE]; | 54 | static struct sram_piece free_l1_ssram_head, used_l1_ssram_head; |
69 | 55 | ||
70 | #if L1_DATA_A_LENGTH != 0 | 56 | #if L1_DATA_A_LENGTH != 0 |
71 | static struct l1_sram_piece l1_data_A_sram[CONFIG_L1_MAX_PIECE]; | 57 | static struct sram_piece free_l1_data_A_sram_head, used_l1_data_A_sram_head; |
72 | #endif | 58 | #endif |
73 | 59 | ||
74 | #if L1_DATA_B_LENGTH != 0 | 60 | #if L1_DATA_B_LENGTH != 0 |
75 | static struct l1_sram_piece l1_data_B_sram[CONFIG_L1_MAX_PIECE]; | 61 | static struct sram_piece free_l1_data_B_sram_head, used_l1_data_B_sram_head; |
76 | #endif | 62 | #endif |
77 | 63 | ||
78 | #if L1_CODE_LENGTH != 0 | 64 | #if L1_CODE_LENGTH != 0 |
79 | static struct l1_sram_piece l1_inst_sram[CONFIG_L1_MAX_PIECE]; | 65 | static struct sram_piece free_l1_inst_sram_head, used_l1_inst_sram_head; |
80 | #endif | 66 | #endif |
81 | 67 | ||
68 | static struct kmem_cache *sram_piece_cache; | ||
69 | |||
82 | /* L1 Scratchpad SRAM initialization function */ | 70 | /* L1 Scratchpad SRAM initialization function */ |
83 | void __init l1sram_init(void) | 71 | static void __init l1sram_init(void) |
84 | { | 72 | { |
85 | printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n", | 73 | free_l1_ssram_head.next = |
86 | L1_SCRATCH_LENGTH >> 10); | 74 | kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); |
75 | if (!free_l1_ssram_head.next) { | ||
76 | printk(KERN_INFO"Fail to initialize Scratchpad data SRAM.\n"); | ||
77 | return; | ||
78 | } | ||
87 | 79 | ||
88 | memset(&l1_ssram, 0x00, sizeof(l1_ssram)); | 80 | free_l1_ssram_head.next->paddr = (void *)L1_SCRATCH_START; |
89 | l1_ssram[0].paddr = (void *)L1_SCRATCH_START; | 81 | free_l1_ssram_head.next->size = L1_SCRATCH_LENGTH; |
90 | l1_ssram[0].size = L1_SCRATCH_LENGTH; | 82 | free_l1_ssram_head.next->pid = 0; |
91 | l1_ssram[0].flag = SRAM_SLT_FREE; | 83 | free_l1_ssram_head.next->next = NULL; |
84 | |||
85 | used_l1_ssram_head.next = NULL; | ||
92 | 86 | ||
93 | /* mutex initialize */ | 87 | /* mutex initialize */ |
94 | spin_lock_init(&l1sram_lock); | 88 | spin_lock_init(&l1sram_lock); |
89 | |||
90 | printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n", | ||
91 | L1_SCRATCH_LENGTH >> 10); | ||
95 | } | 92 | } |
96 | 93 | ||
97 | void __init l1_data_sram_init(void) | 94 | static void __init l1_data_sram_init(void) |
98 | { | 95 | { |
99 | #if L1_DATA_A_LENGTH != 0 | 96 | #if L1_DATA_A_LENGTH != 0 |
100 | memset(&l1_data_A_sram, 0x00, sizeof(l1_data_A_sram)); | 97 | free_l1_data_A_sram_head.next = |
101 | l1_data_A_sram[0].paddr = (void *)L1_DATA_A_START + | 98 | kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); |
102 | (_ebss_l1 - _sdata_l1); | 99 | if (!free_l1_data_A_sram_head.next) { |
103 | l1_data_A_sram[0].size = L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1); | 100 | printk(KERN_INFO"Fail to initialize Data A SRAM.\n"); |
104 | l1_data_A_sram[0].flag = SRAM_SLT_FREE; | 101 | return; |
102 | } | ||
103 | |||
104 | free_l1_data_A_sram_head.next->paddr = | ||
105 | (void *)L1_DATA_A_START + (_ebss_l1 - _sdata_l1); | ||
106 | free_l1_data_A_sram_head.next->size = | ||
107 | L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1); | ||
108 | free_l1_data_A_sram_head.next->pid = 0; | ||
109 | free_l1_data_A_sram_head.next->next = NULL; | ||
110 | |||
111 | used_l1_data_A_sram_head.next = NULL; | ||
105 | 112 | ||
106 | printk(KERN_INFO "Blackfin Data A SRAM: %d KB (%d KB free)\n", | 113 | printk(KERN_INFO "Blackfin Data A SRAM: %d KB (%d KB free)\n", |
107 | L1_DATA_A_LENGTH >> 10, l1_data_A_sram[0].size >> 10); | 114 | L1_DATA_A_LENGTH >> 10, |
115 | free_l1_data_A_sram_head.next->size >> 10); | ||
108 | #endif | 116 | #endif |
109 | #if L1_DATA_B_LENGTH != 0 | 117 | #if L1_DATA_B_LENGTH != 0 |
110 | memset(&l1_data_B_sram, 0x00, sizeof(l1_data_B_sram)); | 118 | free_l1_data_B_sram_head.next = |
111 | l1_data_B_sram[0].paddr = (void *)L1_DATA_B_START + | 119 | kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); |
112 | (_ebss_b_l1 - _sdata_b_l1); | 120 | if (!free_l1_data_B_sram_head.next) { |
113 | l1_data_B_sram[0].size = L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1); | 121 | printk(KERN_INFO"Fail to initialize Data B SRAM.\n"); |
114 | l1_data_B_sram[0].flag = SRAM_SLT_FREE; | 122 | return; |
123 | } | ||
124 | |||
125 | free_l1_data_B_sram_head.next->paddr = | ||
126 | (void *)L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1); | ||
127 | free_l1_data_B_sram_head.next->size = | ||
128 | L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1); | ||
129 | free_l1_data_B_sram_head.next->pid = 0; | ||
130 | free_l1_data_B_sram_head.next->next = NULL; | ||
131 | |||
132 | used_l1_data_B_sram_head.next = NULL; | ||
115 | 133 | ||
116 | printk(KERN_INFO "Blackfin Data B SRAM: %d KB (%d KB free)\n", | 134 | printk(KERN_INFO "Blackfin Data B SRAM: %d KB (%d KB free)\n", |
117 | L1_DATA_B_LENGTH >> 10, l1_data_B_sram[0].size >> 10); | 135 | L1_DATA_B_LENGTH >> 10, |
136 | free_l1_data_B_sram_head.next->size >> 10); | ||
118 | #endif | 137 | #endif |
119 | 138 | ||
120 | /* mutex initialize */ | 139 | /* mutex initialize */ |
121 | spin_lock_init(&l1_data_sram_lock); | 140 | spin_lock_init(&l1_data_sram_lock); |
122 | } | 141 | } |
123 | 142 | ||
124 | void __init l1_inst_sram_init(void) | 143 | static void __init l1_inst_sram_init(void) |
125 | { | 144 | { |
126 | #if L1_CODE_LENGTH != 0 | 145 | #if L1_CODE_LENGTH != 0 |
127 | memset(&l1_inst_sram, 0x00, sizeof(l1_inst_sram)); | 146 | free_l1_inst_sram_head.next = |
128 | l1_inst_sram[0].paddr = (void *)L1_CODE_START + (_etext_l1 - _stext_l1); | 147 | kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); |
129 | l1_inst_sram[0].size = L1_CODE_LENGTH - (_etext_l1 - _stext_l1); | 148 | if (!free_l1_inst_sram_head.next) { |
130 | l1_inst_sram[0].flag = SRAM_SLT_FREE; | 149 | printk(KERN_INFO"Fail to initialize Instruction SRAM.\n"); |
150 | return; | ||
151 | } | ||
152 | |||
153 | free_l1_inst_sram_head.next->paddr = | ||
154 | (void *)L1_CODE_START + (_etext_l1 - _stext_l1); | ||
155 | free_l1_inst_sram_head.next->size = | ||
156 | L1_CODE_LENGTH - (_etext_l1 - _stext_l1); | ||
157 | free_l1_inst_sram_head.next->pid = 0; | ||
158 | free_l1_inst_sram_head.next->next = NULL; | ||
159 | |||
160 | used_l1_inst_sram_head.next = NULL; | ||
131 | 161 | ||
132 | printk(KERN_INFO "Blackfin Instruction SRAM: %d KB (%d KB free)\n", | 162 | printk(KERN_INFO "Blackfin Instruction SRAM: %d KB (%d KB free)\n", |
133 | L1_CODE_LENGTH >> 10, l1_inst_sram[0].size >> 10); | 163 | L1_CODE_LENGTH >> 10, |
164 | free_l1_inst_sram_head.next->size >> 10); | ||
134 | #endif | 165 | #endif |
135 | 166 | ||
136 | /* mutex initialize */ | 167 | /* mutex initialize */ |
137 | spin_lock_init(&l1_inst_sram_lock); | 168 | spin_lock_init(&l1_inst_sram_lock); |
138 | } | 169 | } |
139 | 170 | ||
171 | void __init bfin_sram_init(void) | ||
172 | { | ||
173 | sram_piece_cache = kmem_cache_create("sram_piece_cache", | ||
174 | sizeof(struct sram_piece), | ||
175 | 0, SLAB_PANIC, NULL); | ||
176 | |||
177 | l1sram_init(); | ||
178 | l1_data_sram_init(); | ||
179 | l1_inst_sram_init(); | ||
180 | } | ||
181 | |||
140 | /* L1 memory allocate function */ | 182 | /* L1 memory allocate function */ |
141 | static void *_l1_sram_alloc(size_t size, struct l1_sram_piece *pfree, int count) | 183 | static void *_l1_sram_alloc(size_t size, struct sram_piece *pfree_head, |
184 | struct sram_piece *pused_head) | ||
142 | { | 185 | { |
143 | int i, index = 0; | 186 | struct sram_piece *pslot, *plast, *pavail; |
144 | void *addr = NULL; | ||
145 | 187 | ||
146 | if (size <= 0) | 188 | if (size <= 0 || !pfree_head || !pused_head) |
147 | return NULL; | 189 | return NULL; |
148 | 190 | ||
149 | /* Align the size */ | 191 | /* Align the size */ |
150 | size = (size + 3) & ~3; | 192 | size = (size + 3) & ~3; |
151 | 193 | ||
152 | /* not use the good method to match the best slot !!! */ | 194 | pslot = pfree_head->next; |
153 | /* search an available memory slot */ | 195 | plast = pfree_head; |
154 | for (i = 0; i < count; i++) { | 196 | |
155 | if ((pfree[i].flag == SRAM_SLT_FREE) | 197 | /* search an available piece slot */ |
156 | && (pfree[i].size >= size)) { | 198 | while (pslot != NULL && size > pslot->size) { |
157 | addr = pfree[i].paddr; | 199 | plast = pslot; |
158 | pfree[i].flag = SRAM_SLT_ALLOCATED; | 200 | pslot = pslot->next; |
159 | pfree[i].pid = current->pid; | ||
160 | index = i; | ||
161 | break; | ||
162 | } | ||
163 | } | 201 | } |
164 | if (i >= count) | 202 | |
203 | if (!pslot) | ||
165 | return NULL; | 204 | return NULL; |
166 | 205 | ||
167 | /* updated the NULL memory slot !!! */ | 206 | if (pslot->size == size) { |
168 | if (pfree[i].size > size) { | 207 | plast->next = pslot->next; |
169 | for (i = 0; i < count; i++) { | 208 | pavail = pslot; |
170 | if (pfree[i].flag == SRAM_SLT_NULL) { | 209 | } else { |
171 | pfree[i].pid = 0; | 210 | pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); |
172 | pfree[i].flag = SRAM_SLT_FREE; | 211 | |
173 | pfree[i].paddr = addr + size; | 212 | if (!pavail) |
174 | pfree[i].size = pfree[index].size - size; | 213 | return NULL; |
175 | pfree[index].size = size; | 214 | |
176 | break; | 215 | pavail->paddr = pslot->paddr; |
177 | } | 216 | pavail->size = size; |
178 | } | 217 | pslot->paddr += size; |
218 | pslot->size -= size; | ||
179 | } | 219 | } |
180 | 220 | ||
181 | return addr; | 221 | pavail->pid = current->pid; |
222 | |||
223 | pslot = pused_head->next; | ||
224 | plast = pused_head; | ||
225 | |||
226 | /* insert new piece into used piece list !!! */ | ||
227 | while (pslot != NULL && pavail->paddr < pslot->paddr) { | ||
228 | plast = pslot; | ||
229 | pslot = pslot->next; | ||
230 | } | ||
231 | |||
232 | pavail->next = pslot; | ||
233 | plast->next = pavail; | ||
234 | |||
235 | return pavail->paddr; | ||
182 | } | 236 | } |
183 | 237 | ||
184 | /* Allocate the largest available block. */ | 238 | /* Allocate the largest available block. */ |
185 | static void *_l1_sram_alloc_max(struct l1_sram_piece *pfree, int count, | 239 | static void *_l1_sram_alloc_max(struct sram_piece *pfree_head, |
240 | struct sram_piece *pused_head, | ||
186 | unsigned long *psize) | 241 | unsigned long *psize) |
187 | { | 242 | { |
188 | unsigned long best = 0; | 243 | struct sram_piece *pslot, *pmax; |
189 | int i, index = -1; | 244 | |
190 | void *addr = NULL; | 245 | if (!pfree_head || !pused_head) |
246 | return NULL; | ||
247 | |||
248 | pmax = pslot = pfree_head->next; | ||
191 | 249 | ||
192 | /* search an available memory slot */ | 250 | /* search an available piece slot */ |
193 | for (i = 0; i < count; i++) { | 251 | while (pslot != NULL) { |
194 | if (pfree[i].flag == SRAM_SLT_FREE && pfree[i].size > best) { | 252 | if (pslot->size > pmax->size) |
195 | addr = pfree[i].paddr; | 253 | pmax = pslot; |
196 | index = i; | 254 | pslot = pslot->next; |
197 | best = pfree[i].size; | ||
198 | } | ||
199 | } | 255 | } |
200 | if (index < 0) | 256 | |
257 | if (!pmax) | ||
201 | return NULL; | 258 | return NULL; |
202 | *psize = best; | ||
203 | 259 | ||
204 | pfree[index].pid = current->pid; | 260 | *psize = pmax->size; |
205 | pfree[index].flag = SRAM_SLT_ALLOCATED; | 261 | |
206 | return addr; | 262 | return _l1_sram_alloc(*psize, pfree_head, pused_head); |
207 | } | 263 | } |
208 | 264 | ||
209 | /* L1 memory free function */ | 265 | /* L1 memory free function */ |
210 | static int _l1_sram_free(const void *addr, | 266 | static int _l1_sram_free(const void *addr, |
211 | struct l1_sram_piece *pfree, | 267 | struct sram_piece *pfree_head, |
212 | int count) | 268 | struct sram_piece *pused_head) |
213 | { | 269 | { |
214 | int i, index = 0; | 270 | struct sram_piece *pslot, *plast, *pavail; |
271 | |||
272 | if (!pfree_head || !pused_head) | ||
273 | return -1; | ||
215 | 274 | ||
216 | /* search the relevant memory slot */ | 275 | /* search the relevant memory slot */ |
217 | for (i = 0; i < count; i++) { | 276 | pslot = pused_head->next; |
218 | if (pfree[i].paddr == addr) { | 277 | plast = pused_head; |
219 | if (pfree[i].flag != SRAM_SLT_ALLOCATED) { | 278 | |
220 | /* error log */ | 279 | /* search an available piece slot */ |
221 | return -1; | 280 | while (pslot != NULL && pslot->paddr != addr) { |
222 | } | 281 | plast = pslot; |
223 | index = i; | 282 | pslot = pslot->next; |
224 | break; | ||
225 | } | ||
226 | } | 283 | } |
227 | if (i >= count) | 284 | |
285 | if (!pslot) | ||
228 | return -1; | 286 | return -1; |
229 | 287 | ||
230 | pfree[index].pid = 0; | 288 | plast->next = pslot->next; |
231 | pfree[index].flag = SRAM_SLT_FREE; | 289 | pavail = pslot; |
232 | 290 | pavail->pid = 0; | |
233 | /* link the next address slot */ | 291 | |
234 | for (i = 0; i < count; i++) { | 292 | /* insert free pieces back to the free list */ |
235 | if (((pfree[index].paddr + pfree[index].size) == pfree[i].paddr) | 293 | pslot = pfree_head->next; |
236 | && (pfree[i].flag == SRAM_SLT_FREE)) { | 294 | plast = pfree_head; |
237 | pfree[i].pid = 0; | 295 | |
238 | pfree[i].flag = SRAM_SLT_NULL; | 296 | while (pslot != NULL && addr > pslot->paddr) { |
239 | pfree[index].size += pfree[i].size; | 297 | plast = pslot; |
240 | pfree[index].flag = SRAM_SLT_FREE; | 298 | pslot = pslot->next; |
241 | break; | 299 | } |
242 | } | 300 | |
301 | if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) { | ||
302 | plast->size += pavail->size; | ||
303 | kmem_cache_free(sram_piece_cache, pavail); | ||
304 | } else { | ||
305 | pavail->next = plast; | ||
306 | plast->next = pavail; | ||
307 | plast = pavail; | ||
243 | } | 308 | } |
244 | 309 | ||
245 | /* link the last address slot */ | 310 | if (pslot && plast->paddr + plast->size == pslot->paddr) { |
246 | for (i = 0; i < count; i++) { | 311 | plast->size += pslot->size; |
247 | if (((pfree[i].paddr + pfree[i].size) == pfree[index].paddr) && | 312 | plast->next = pslot->next; |
248 | (pfree[i].flag == SRAM_SLT_FREE)) { | 313 | kmem_cache_free(sram_piece_cache, pslot); |
249 | pfree[index].flag = SRAM_SLT_NULL; | ||
250 | pfree[i].size += pfree[index].size; | ||
251 | break; | ||
252 | } | ||
253 | } | 314 | } |
254 | 315 | ||
255 | return 0; | 316 | return 0; |
@@ -287,7 +348,8 @@ void *l1_data_A_sram_alloc(size_t size) | |||
287 | spin_lock_irqsave(&l1_data_sram_lock, flags); | 348 | spin_lock_irqsave(&l1_data_sram_lock, flags); |
288 | 349 | ||
289 | #if L1_DATA_A_LENGTH != 0 | 350 | #if L1_DATA_A_LENGTH != 0 |
290 | addr = _l1_sram_alloc(size, l1_data_A_sram, ARRAY_SIZE(l1_data_A_sram)); | 351 | addr = _l1_sram_alloc(size, &free_l1_data_A_sram_head, |
352 | &used_l1_data_A_sram_head); | ||
291 | #endif | 353 | #endif |
292 | 354 | ||
293 | /* add mutex operation */ | 355 | /* add mutex operation */ |
@@ -309,8 +371,8 @@ int l1_data_A_sram_free(const void *addr) | |||
309 | spin_lock_irqsave(&l1_data_sram_lock, flags); | 371 | spin_lock_irqsave(&l1_data_sram_lock, flags); |
310 | 372 | ||
311 | #if L1_DATA_A_LENGTH != 0 | 373 | #if L1_DATA_A_LENGTH != 0 |
312 | ret = _l1_sram_free(addr, | 374 | ret = _l1_sram_free(addr, &free_l1_data_A_sram_head, |
313 | l1_data_A_sram, ARRAY_SIZE(l1_data_A_sram)); | 375 | &used_l1_data_A_sram_head); |
314 | #else | 376 | #else |
315 | ret = -1; | 377 | ret = -1; |
316 | #endif | 378 | #endif |
@@ -331,7 +393,8 @@ void *l1_data_B_sram_alloc(size_t size) | |||
331 | /* add mutex operation */ | 393 | /* add mutex operation */ |
332 | spin_lock_irqsave(&l1_data_sram_lock, flags); | 394 | spin_lock_irqsave(&l1_data_sram_lock, flags); |
333 | 395 | ||
334 | addr = _l1_sram_alloc(size, l1_data_B_sram, ARRAY_SIZE(l1_data_B_sram)); | 396 | addr = _l1_sram_alloc(size, &free_l1_data_B_sram_head, |
397 | &used_l1_data_B_sram_head); | ||
335 | 398 | ||
336 | /* add mutex operation */ | 399 | /* add mutex operation */ |
337 | spin_unlock_irqrestore(&l1_data_sram_lock, flags); | 400 | spin_unlock_irqrestore(&l1_data_sram_lock, flags); |
@@ -355,7 +418,8 @@ int l1_data_B_sram_free(const void *addr) | |||
355 | /* add mutex operation */ | 418 | /* add mutex operation */ |
356 | spin_lock_irqsave(&l1_data_sram_lock, flags); | 419 | spin_lock_irqsave(&l1_data_sram_lock, flags); |
357 | 420 | ||
358 | ret = _l1_sram_free(addr, l1_data_B_sram, ARRAY_SIZE(l1_data_B_sram)); | 421 | ret = _l1_sram_free(addr, &free_l1_data_B_sram_head, |
422 | &used_l1_data_B_sram_head); | ||
359 | 423 | ||
360 | /* add mutex operation */ | 424 | /* add mutex operation */ |
361 | spin_unlock_irqrestore(&l1_data_sram_lock, flags); | 425 | spin_unlock_irqrestore(&l1_data_sram_lock, flags); |
@@ -408,7 +472,8 @@ void *l1_inst_sram_alloc(size_t size) | |||
408 | /* add mutex operation */ | 472 | /* add mutex operation */ |
409 | spin_lock_irqsave(&l1_inst_sram_lock, flags); | 473 | spin_lock_irqsave(&l1_inst_sram_lock, flags); |
410 | 474 | ||
411 | addr = _l1_sram_alloc(size, l1_inst_sram, ARRAY_SIZE(l1_inst_sram)); | 475 | addr = _l1_sram_alloc(size, &free_l1_inst_sram_head, |
476 | &used_l1_inst_sram_head); | ||
412 | 477 | ||
413 | /* add mutex operation */ | 478 | /* add mutex operation */ |
414 | spin_unlock_irqrestore(&l1_inst_sram_lock, flags); | 479 | spin_unlock_irqrestore(&l1_inst_sram_lock, flags); |
@@ -432,7 +497,8 @@ int l1_inst_sram_free(const void *addr) | |||
432 | /* add mutex operation */ | 497 | /* add mutex operation */ |
433 | spin_lock_irqsave(&l1_inst_sram_lock, flags); | 498 | spin_lock_irqsave(&l1_inst_sram_lock, flags); |
434 | 499 | ||
435 | ret = _l1_sram_free(addr, l1_inst_sram, ARRAY_SIZE(l1_inst_sram)); | 500 | ret = _l1_sram_free(addr, &free_l1_inst_sram_head, |
501 | &used_l1_inst_sram_head); | ||
436 | 502 | ||
437 | /* add mutex operation */ | 503 | /* add mutex operation */ |
438 | spin_unlock_irqrestore(&l1_inst_sram_lock, flags); | 504 | spin_unlock_irqrestore(&l1_inst_sram_lock, flags); |
@@ -453,7 +519,8 @@ void *l1sram_alloc(size_t size) | |||
453 | /* add mutex operation */ | 519 | /* add mutex operation */ |
454 | spin_lock_irqsave(&l1sram_lock, flags); | 520 | spin_lock_irqsave(&l1sram_lock, flags); |
455 | 521 | ||
456 | addr = _l1_sram_alloc(size, l1_ssram, ARRAY_SIZE(l1_ssram)); | 522 | addr = _l1_sram_alloc(size, &free_l1_ssram_head, |
523 | &used_l1_ssram_head); | ||
457 | 524 | ||
458 | /* add mutex operation */ | 525 | /* add mutex operation */ |
459 | spin_unlock_irqrestore(&l1sram_lock, flags); | 526 | spin_unlock_irqrestore(&l1sram_lock, flags); |
@@ -470,7 +537,8 @@ void *l1sram_alloc_max(size_t *psize) | |||
470 | /* add mutex operation */ | 537 | /* add mutex operation */ |
471 | spin_lock_irqsave(&l1sram_lock, flags); | 538 | spin_lock_irqsave(&l1sram_lock, flags); |
472 | 539 | ||
473 | addr = _l1_sram_alloc_max(l1_ssram, ARRAY_SIZE(l1_ssram), psize); | 540 | addr = _l1_sram_alloc_max(&free_l1_ssram_head, |
541 | &used_l1_ssram_head, psize); | ||
474 | 542 | ||
475 | /* add mutex operation */ | 543 | /* add mutex operation */ |
476 | spin_unlock_irqrestore(&l1sram_lock, flags); | 544 | spin_unlock_irqrestore(&l1sram_lock, flags); |
@@ -487,7 +555,8 @@ int l1sram_free(const void *addr) | |||
487 | /* add mutex operation */ | 555 | /* add mutex operation */ |
488 | spin_lock_irqsave(&l1sram_lock, flags); | 556 | spin_lock_irqsave(&l1sram_lock, flags); |
489 | 557 | ||
490 | ret = _l1_sram_free(addr, l1_ssram, ARRAY_SIZE(l1_ssram)); | 558 | ret = _l1_sram_free(addr, &free_l1_ssram_head, |
559 | &used_l1_ssram_head); | ||
491 | 560 | ||
492 | /* add mutex operation */ | 561 | /* add mutex operation */ |
493 | spin_unlock_irqrestore(&l1sram_lock, flags); | 562 | spin_unlock_irqrestore(&l1sram_lock, flags); |
@@ -553,28 +622,38 @@ EXPORT_SYMBOL(sram_alloc_with_lsl); | |||
553 | * (including newline). | 622 | * (including newline). |
554 | */ | 623 | */ |
555 | static int _l1sram_proc_read(char *buf, int *len, int count, const char *desc, | 624 | static int _l1sram_proc_read(char *buf, int *len, int count, const char *desc, |
556 | struct l1_sram_piece *pfree, const int array_size) | 625 | struct sram_piece *pfree_head, |
626 | struct sram_piece *pused_head) | ||
557 | { | 627 | { |
558 | int i; | 628 | struct sram_piece *pslot; |
629 | |||
630 | if (!pfree_head || !pused_head) | ||
631 | return -1; | ||
559 | 632 | ||
560 | *len += sprintf(&buf[*len], "--- L1 %-14s Size PID State \n", desc); | 633 | *len += sprintf(&buf[*len], "--- L1 %-14s Size PID State \n", desc); |
561 | for (i = 0; i < array_size && *len < count; ++i) { | 634 | |
562 | const char *alloc_type; | 635 | /* search the relevant memory slot */ |
563 | switch (pfree[i].flag) { | 636 | pslot = pused_head->next; |
564 | case SRAM_SLT_NULL: alloc_type = "NULL"; break; | 637 | |
565 | case SRAM_SLT_FREE: alloc_type = "FREE"; break; | 638 | while (pslot != NULL) { |
566 | case SRAM_SLT_ALLOCATED: alloc_type = "ALLOCATED"; break; | ||
567 | default: alloc_type = "????"; break; | ||
568 | } | ||
569 | /* if we've got a lot of space to cover, omit things */ | ||
570 | if ((PAGE_SIZE - 1024) < (CONFIG_L1_MAX_PIECE + 1) * 4 * 44 && | ||
571 | pfree[i].size == 0) | ||
572 | continue; | ||
573 | *len += sprintf(&buf[*len], "%p-%p %8i %5i %-10s\n", | 639 | *len += sprintf(&buf[*len], "%p-%p %8i %5i %-10s\n", |
574 | pfree[i].paddr, pfree[i].paddr + pfree[i].size, | 640 | pslot->paddr, pslot->paddr + pslot->size, |
575 | pfree[i].size, pfree[i].pid, alloc_type); | 641 | pslot->size, pslot->pid, "ALLOCATED"); |
642 | |||
643 | pslot = pslot->next; | ||
644 | } | ||
645 | |||
646 | pslot = pfree_head->next; | ||
647 | |||
648 | while (pslot != NULL) { | ||
649 | *len += sprintf(&buf[*len], "%p-%p %8i %5i %-10s\n", | ||
650 | pslot->paddr, pslot->paddr + pslot->size, | ||
651 | pslot->size, pslot->pid, "FREE"); | ||
652 | |||
653 | pslot = pslot->next; | ||
576 | } | 654 | } |
577 | return (i != array_size); | 655 | |
656 | return 0; | ||
578 | } | 657 | } |
579 | static int l1sram_proc_read(char *buf, char **start, off_t offset, int count, | 658 | static int l1sram_proc_read(char *buf, char **start, off_t offset, int count, |
580 | int *eof, void *data) | 659 | int *eof, void *data) |
@@ -582,21 +661,23 @@ static int l1sram_proc_read(char *buf, char **start, off_t offset, int count, | |||
582 | int len = 0; | 661 | int len = 0; |
583 | 662 | ||
584 | if (_l1sram_proc_read(buf, &len, count, "Scratchpad", | 663 | if (_l1sram_proc_read(buf, &len, count, "Scratchpad", |
585 | l1_ssram, ARRAY_SIZE(l1_ssram))) | 664 | &free_l1_ssram_head, &used_l1_ssram_head)) |
586 | goto not_done; | 665 | goto not_done; |
587 | #if L1_DATA_A_LENGTH != 0 | 666 | #if L1_DATA_A_LENGTH != 0 |
588 | if (_l1sram_proc_read(buf, &len, count, "Data A", | 667 | if (_l1sram_proc_read(buf, &len, count, "Data A", |
589 | l1_data_A_sram, ARRAY_SIZE(l1_data_A_sram))) | 668 | &free_l1_data_A_sram_head, |
669 | &used_l1_data_A_sram_head)) | ||
590 | goto not_done; | 670 | goto not_done; |
591 | #endif | 671 | #endif |
592 | #if L1_DATA_B_LENGTH != 0 | 672 | #if L1_DATA_B_LENGTH != 0 |
593 | if (_l1sram_proc_read(buf, &len, count, "Data B", | 673 | if (_l1sram_proc_read(buf, &len, count, "Data B", |
594 | l1_data_B_sram, ARRAY_SIZE(l1_data_B_sram))) | 674 | &free_l1_data_B_sram_head, |
675 | &used_l1_data_B_sram_head)) | ||
595 | goto not_done; | 676 | goto not_done; |
596 | #endif | 677 | #endif |
597 | #if L1_CODE_LENGTH != 0 | 678 | #if L1_CODE_LENGTH != 0 |
598 | if (_l1sram_proc_read(buf, &len, count, "Instruction", | 679 | if (_l1sram_proc_read(buf, &len, count, "Instruction", |
599 | l1_inst_sram, ARRAY_SIZE(l1_inst_sram))) | 680 | &free_l1_inst_sram_head, &used_l1_inst_sram_head)) |
600 | goto not_done; | 681 | goto not_done; |
601 | #endif | 682 | #endif |
602 | 683 | ||
diff --git a/arch/blackfin/mm/blackfin_sram.h b/arch/blackfin/mm/blackfin_sram.h index 0fb73b78dd60..8cb0945563f9 100644 --- a/arch/blackfin/mm/blackfin_sram.h +++ b/arch/blackfin/mm/blackfin_sram.h | |||
@@ -30,9 +30,7 @@ | |||
30 | #ifndef __BLACKFIN_SRAM_H__ | 30 | #ifndef __BLACKFIN_SRAM_H__ |
31 | #define __BLACKFIN_SRAM_H__ | 31 | #define __BLACKFIN_SRAM_H__ |
32 | 32 | ||
33 | extern void l1sram_init(void); | 33 | extern void bfin_sram_init(void); |
34 | extern void l1_inst_sram_init(void); | ||
35 | extern void l1_data_sram_init(void); | ||
36 | extern void *l1sram_alloc(size_t); | 34 | extern void *l1sram_alloc(size_t); |
37 | 35 | ||
38 | #endif | 36 | #endif |
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c index ec3141fefd20..4aab21f44096 100644 --- a/arch/blackfin/mm/init.c +++ b/arch/blackfin/mm/init.c | |||
@@ -164,11 +164,14 @@ void __init mem_init(void) | |||
164 | "(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n", | 164 | "(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n", |
165 | (unsigned long) freepages << (PAGE_SHIFT-10), _ramend >> 10, | 165 | (unsigned long) freepages << (PAGE_SHIFT-10), _ramend >> 10, |
166 | initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); | 166 | initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); |
167 | } | ||
168 | |||
169 | static int __init sram_init(void) | ||
170 | { | ||
171 | unsigned long tmp; | ||
167 | 172 | ||
168 | /* Initialize the blackfin L1 Memory. */ | 173 | /* Initialize the blackfin L1 Memory. */ |
169 | l1sram_init(); | 174 | bfin_sram_init(); |
170 | l1_data_sram_init(); | ||
171 | l1_inst_sram_init(); | ||
172 | 175 | ||
173 | /* Allocate this once; never free it. We assume this gives us a | 176 | /* Allocate this once; never free it. We assume this gives us a |
174 | pointer to the start of L1 scratchpad memory; panic if it | 177 | pointer to the start of L1 scratchpad memory; panic if it |
@@ -179,7 +182,10 @@ void __init mem_init(void) | |||
179 | tmp, (unsigned long)L1_SCRATCH_TASK_INFO); | 182 | tmp, (unsigned long)L1_SCRATCH_TASK_INFO); |
180 | panic("No L1, time to give up\n"); | 183 | panic("No L1, time to give up\n"); |
181 | } | 184 | } |
185 | |||
186 | return 0; | ||
182 | } | 187 | } |
188 | pure_initcall(sram_init); | ||
183 | 189 | ||
184 | static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) | 190 | static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) |
185 | { | 191 | { |