aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mm
diff options
context:
space:
mode:
authorGraf Yang <graf.yang@analog.com>2008-11-18 04:48:22 -0500
committerBryan Wu <cooloney@kernel.org>2008-11-18 04:48:22 -0500
commit8f65873e47784a390949f0d61e5692dbf2a8253e (patch)
tree4d9509bf5e52ebac190d79de04b783829d44f49e /arch/blackfin/mm
parentb8a989893cbdeb6c97a7b5af5f38fb0e480235f9 (diff)
Blackfin arch: SMP supporting patchset: Blackfin kernel and memory management code
Blackfin dual core BF561 processor can support SMP like features. https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like In this patch, we provide SMP extend to Blackfin kernel and memory management code Singed-off-by: Graf Yang <graf.yang@analog.com> Signed-off-by: Mike Frysinger <vapier.adi@gmail.com> Signed-off-by: Bryan Wu <cooloney@kernel.org>
Diffstat (limited to 'arch/blackfin/mm')
-rw-r--r--arch/blackfin/mm/init.c60
-rw-r--r--arch/blackfin/mm/sram-alloc.c336
2 files changed, 240 insertions, 156 deletions
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index bc240abb8745..57d306b9c56d 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -31,7 +31,8 @@
31#include <linux/bootmem.h> 31#include <linux/bootmem.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <asm/bfin-global.h> 33#include <asm/bfin-global.h>
34#include <asm/l1layout.h> 34#include <asm/pda.h>
35#include <asm/cplbinit.h>
35#include "blackfin_sram.h" 36#include "blackfin_sram.h"
36 37
37/* 38/*
@@ -53,6 +54,11 @@ static unsigned long empty_bad_page;
53 54
54unsigned long empty_zero_page; 55unsigned long empty_zero_page;
55 56
57extern unsigned long exception_stack[NR_CPUS][1024];
58
59struct blackfin_pda cpu_pda[NR_CPUS];
60EXPORT_SYMBOL(cpu_pda);
61
56/* 62/*
57 * paging_init() continues the virtual memory environment setup which 63 * paging_init() continues the virtual memory environment setup which
58 * was begun by the code in arch/head.S. 64 * was begun by the code in arch/head.S.
@@ -98,6 +104,42 @@ void __init paging_init(void)
98 } 104 }
99} 105}
100 106
107asmlinkage void init_pda(void)
108{
109 unsigned int cpu = raw_smp_processor_id();
110
111 /* Initialize the PDA fields holding references to other parts
112 of the memory. The content of such memory is still
113 undefined at the time of the call, we are only setting up
114 valid pointers to it. */
115 memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu]));
116
117 cpu_pda[0].next = &cpu_pda[1];
118 cpu_pda[1].next = &cpu_pda[0];
119
120 cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
121
122#ifdef CONFIG_MPU
123#else
124 cpu_pda[cpu].ipdt = ipdt_tables[cpu];
125 cpu_pda[cpu].dpdt = dpdt_tables[cpu];
126#ifdef CONFIG_CPLB_INFO
127 cpu_pda[cpu].ipdt_swapcount = ipdt_swapcount_tables[cpu];
128 cpu_pda[cpu].dpdt_swapcount = dpdt_swapcount_tables[cpu];
129#endif
130#endif
131
132#ifdef CONFIG_SMP
133 cpu_pda[cpu].imask = 0x1f;
134#endif
135}
136
137void __cpuinit reserve_pda(void)
138{
139 printk(KERN_INFO "PDA for CPU%u reserved at %p\n", smp_processor_id(),
140 &cpu_pda[smp_processor_id()]);
141}
142
101void __init mem_init(void) 143void __init mem_init(void)
102{ 144{
103 unsigned int codek = 0, datak = 0, initk = 0; 145 unsigned int codek = 0, datak = 0, initk = 0;
@@ -141,21 +183,13 @@ void __init mem_init(void)
141 183
142static int __init sram_init(void) 184static int __init sram_init(void)
143{ 185{
144 unsigned long tmp;
145
146 /* Initialize the blackfin L1 Memory. */ 186 /* Initialize the blackfin L1 Memory. */
147 bfin_sram_init(); 187 bfin_sram_init();
148 188
149 /* Allocate this once; never free it. We assume this gives us a 189 /* Reserve the PDA space for the boot CPU right after we
150 pointer to the start of L1 scratchpad memory; panic if it 190 * initialized the scratch memory allocator.
151 doesn't. */ 191 */
152 tmp = (unsigned long)l1sram_alloc(sizeof(struct l1_scratch_task_info)); 192 reserve_pda();
153 if (tmp != (unsigned long)L1_SCRATCH_TASK_INFO) {
154 printk(KERN_EMERG "mem_init(): Did not get the right address from l1sram_alloc: %08lx != %08lx\n",
155 tmp, (unsigned long)L1_SCRATCH_TASK_INFO);
156 panic("No L1, time to give up\n");
157 }
158
159 return 0; 193 return 0;
160} 194}
161pure_initcall(sram_init); 195pure_initcall(sram_init);
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index cc6f336e7313..8f82b4c92d07 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -41,8 +41,10 @@
41#include <asm/blackfin.h> 41#include <asm/blackfin.h>
42#include "blackfin_sram.h" 42#include "blackfin_sram.h"
43 43
44static spinlock_t l1sram_lock, l1_data_sram_lock, l1_inst_sram_lock; 44static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp;
45static spinlock_t l2_sram_lock; 45static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp;
46static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp;
47static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
46 48
47/* the data structure for L1 scratchpad and DATA SRAM */ 49/* the data structure for L1 scratchpad and DATA SRAM */
48struct sram_piece { 50struct sram_piece {
@@ -52,18 +54,22 @@ struct sram_piece {
52 struct sram_piece *next; 54 struct sram_piece *next;
53}; 55};
54 56
55static struct sram_piece free_l1_ssram_head, used_l1_ssram_head; 57static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
58static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
56 59
57#if L1_DATA_A_LENGTH != 0 60#if L1_DATA_A_LENGTH != 0
58static struct sram_piece free_l1_data_A_sram_head, used_l1_data_A_sram_head; 61static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
62static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
59#endif 63#endif
60 64
61#if L1_DATA_B_LENGTH != 0 65#if L1_DATA_B_LENGTH != 0
62static struct sram_piece free_l1_data_B_sram_head, used_l1_data_B_sram_head; 66static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
67static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
63#endif 68#endif
64 69
65#if L1_CODE_LENGTH != 0 70#if L1_CODE_LENGTH != 0
66static struct sram_piece free_l1_inst_sram_head, used_l1_inst_sram_head; 71static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
72static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
67#endif 73#endif
68 74
69#if L2_LENGTH != 0 75#if L2_LENGTH != 0
@@ -75,102 +81,115 @@ static struct kmem_cache *sram_piece_cache;
75/* L1 Scratchpad SRAM initialization function */ 81/* L1 Scratchpad SRAM initialization function */
76static void __init l1sram_init(void) 82static void __init l1sram_init(void)
77{ 83{
78 free_l1_ssram_head.next = 84 unsigned int cpu;
79 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 85 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
80 if (!free_l1_ssram_head.next) { 86 per_cpu(free_l1_ssram_head, cpu).next =
81 printk(KERN_INFO "Failed to initialize Scratchpad data SRAM\n"); 87 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
82 return; 88 if (!per_cpu(free_l1_ssram_head, cpu).next) {
89 printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
90 return;
91 }
92
93 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu);
94 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH;
95 per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
96 per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
97
98 per_cpu(used_l1_ssram_head, cpu).next = NULL;
99
100 /* mutex initialize */
101 spin_lock_init(&per_cpu(l1sram_lock, cpu));
102 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
103 L1_SCRATCH_LENGTH >> 10);
83 } 104 }
84
85 free_l1_ssram_head.next->paddr = (void *)L1_SCRATCH_START;
86 free_l1_ssram_head.next->size = L1_SCRATCH_LENGTH;
87 free_l1_ssram_head.next->pid = 0;
88 free_l1_ssram_head.next->next = NULL;
89
90 used_l1_ssram_head.next = NULL;
91
92 /* mutex initialize */
93 spin_lock_init(&l1sram_lock);
94
95 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
96 L1_SCRATCH_LENGTH >> 10);
97} 105}
98 106
99static void __init l1_data_sram_init(void) 107static void __init l1_data_sram_init(void)
100{ 108{
109 unsigned int cpu;
101#if L1_DATA_A_LENGTH != 0 110#if L1_DATA_A_LENGTH != 0
102 free_l1_data_A_sram_head.next = 111 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
103 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 112 per_cpu(free_l1_data_A_sram_head, cpu).next =
104 if (!free_l1_data_A_sram_head.next) { 113 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
105 printk(KERN_INFO "Failed to initialize L1 Data A SRAM\n"); 114 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
106 return; 115 printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
116 return;
117 }
118
119 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
120 (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
121 per_cpu(free_l1_data_A_sram_head, cpu).next->size =
122 L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
123 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
124 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
125
126 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
127
128 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
129 L1_DATA_A_LENGTH >> 10,
130 per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
107 } 131 }
108
109 free_l1_data_A_sram_head.next->paddr =
110 (void *)L1_DATA_A_START + (_ebss_l1 - _sdata_l1);
111 free_l1_data_A_sram_head.next->size =
112 L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
113 free_l1_data_A_sram_head.next->pid = 0;
114 free_l1_data_A_sram_head.next->next = NULL;
115
116 used_l1_data_A_sram_head.next = NULL;
117
118 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
119 L1_DATA_A_LENGTH >> 10,
120 free_l1_data_A_sram_head.next->size >> 10);
121#endif 132#endif
122#if L1_DATA_B_LENGTH != 0 133#if L1_DATA_B_LENGTH != 0
123 free_l1_data_B_sram_head.next = 134 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
124 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 135 per_cpu(free_l1_data_B_sram_head, cpu).next =
125 if (!free_l1_data_B_sram_head.next) { 136 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
126 printk(KERN_INFO "Failed to initialize L1 Data B SRAM\n"); 137 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
127 return; 138 printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
139 return;
140 }
141
142 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
143 (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
144 per_cpu(free_l1_data_B_sram_head, cpu).next->size =
145 L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
146 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
147 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
148
149 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
150
151 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
152 L1_DATA_B_LENGTH >> 10,
153 per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
154 /* mutex initialize */
128 } 155 }
129
130 free_l1_data_B_sram_head.next->paddr =
131 (void *)L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1);
132 free_l1_data_B_sram_head.next->size =
133 L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
134 free_l1_data_B_sram_head.next->pid = 0;
135 free_l1_data_B_sram_head.next->next = NULL;
136
137 used_l1_data_B_sram_head.next = NULL;
138
139 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
140 L1_DATA_B_LENGTH >> 10,
141 free_l1_data_B_sram_head.next->size >> 10);
142#endif 156#endif
143 157
144 /* mutex initialize */ 158#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
145 spin_lock_init(&l1_data_sram_lock); 159 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
160 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
161#endif
146} 162}
147 163
148static void __init l1_inst_sram_init(void) 164static void __init l1_inst_sram_init(void)
149{ 165{
150#if L1_CODE_LENGTH != 0 166#if L1_CODE_LENGTH != 0
151 free_l1_inst_sram_head.next = 167 unsigned int cpu;
152 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 168 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
153 if (!free_l1_inst_sram_head.next) { 169 per_cpu(free_l1_inst_sram_head, cpu).next =
154 printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n"); 170 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
155 return; 171 if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
172 printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
173 return;
174 }
175
176 per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
177 (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
178 per_cpu(free_l1_inst_sram_head, cpu).next->size =
179 L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
180 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
181 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
182
183 per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
184
185 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
186 L1_CODE_LENGTH >> 10,
187 per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
188
189 /* mutex initialize */
190 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
156 } 191 }
157
158 free_l1_inst_sram_head.next->paddr =
159 (void *)L1_CODE_START + (_etext_l1 - _stext_l1);
160 free_l1_inst_sram_head.next->size =
161 L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
162 free_l1_inst_sram_head.next->pid = 0;
163 free_l1_inst_sram_head.next->next = NULL;
164
165 used_l1_inst_sram_head.next = NULL;
166
167 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
168 L1_CODE_LENGTH >> 10,
169 free_l1_inst_sram_head.next->size >> 10);
170#endif 192#endif
171
172 /* mutex initialize */
173 spin_lock_init(&l1_inst_sram_lock);
174} 193}
175 194
176static void __init l2_sram_init(void) 195static void __init l2_sram_init(void)
@@ -179,7 +198,7 @@ static void __init l2_sram_init(void)
179 free_l2_sram_head.next = 198 free_l2_sram_head.next =
180 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 199 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
181 if (!free_l2_sram_head.next) { 200 if (!free_l2_sram_head.next) {
182 printk(KERN_INFO "Failed to initialize L2 SRAM\n"); 201 printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
183 return; 202 return;
184 } 203 }
185 204
@@ -200,6 +219,7 @@ static void __init l2_sram_init(void)
200 /* mutex initialize */ 219 /* mutex initialize */
201 spin_lock_init(&l2_sram_lock); 220 spin_lock_init(&l2_sram_lock);
202} 221}
222
203void __init bfin_sram_init(void) 223void __init bfin_sram_init(void)
204{ 224{
205 sram_piece_cache = kmem_cache_create("sram_piece_cache", 225 sram_piece_cache = kmem_cache_create("sram_piece_cache",
@@ -353,20 +373,20 @@ int sram_free(const void *addr)
353{ 373{
354 374
355#if L1_CODE_LENGTH != 0 375#if L1_CODE_LENGTH != 0
356 if (addr >= (void *)L1_CODE_START 376 if (addr >= (void *)get_l1_code_start()
357 && addr < (void *)(L1_CODE_START + L1_CODE_LENGTH)) 377 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
358 return l1_inst_sram_free(addr); 378 return l1_inst_sram_free(addr);
359 else 379 else
360#endif 380#endif
361#if L1_DATA_A_LENGTH != 0 381#if L1_DATA_A_LENGTH != 0
362 if (addr >= (void *)L1_DATA_A_START 382 if (addr >= (void *)get_l1_data_a_start()
363 && addr < (void *)(L1_DATA_A_START + L1_DATA_A_LENGTH)) 383 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
364 return l1_data_A_sram_free(addr); 384 return l1_data_A_sram_free(addr);
365 else 385 else
366#endif 386#endif
367#if L1_DATA_B_LENGTH != 0 387#if L1_DATA_B_LENGTH != 0
368 if (addr >= (void *)L1_DATA_B_START 388 if (addr >= (void *)get_l1_data_b_start()
369 && addr < (void *)(L1_DATA_B_START + L1_DATA_B_LENGTH)) 389 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
370 return l1_data_B_sram_free(addr); 390 return l1_data_B_sram_free(addr);
371 else 391 else
372#endif 392#endif
@@ -384,17 +404,20 @@ void *l1_data_A_sram_alloc(size_t size)
384{ 404{
385 unsigned long flags; 405 unsigned long flags;
386 void *addr = NULL; 406 void *addr = NULL;
407 unsigned int cpu;
387 408
409 cpu = get_cpu();
388 /* add mutex operation */ 410 /* add mutex operation */
389 spin_lock_irqsave(&l1_data_sram_lock, flags); 411 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
390 412
391#if L1_DATA_A_LENGTH != 0 413#if L1_DATA_A_LENGTH != 0
392 addr = _sram_alloc(size, &free_l1_data_A_sram_head, 414 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
393 &used_l1_data_A_sram_head); 415 &per_cpu(used_l1_data_A_sram_head, cpu));
394#endif 416#endif
395 417
396 /* add mutex operation */ 418 /* add mutex operation */
397 spin_unlock_irqrestore(&l1_data_sram_lock, flags); 419 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
420 put_cpu();
398 421
399 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n", 422 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
400 (long unsigned int)addr, size); 423 (long unsigned int)addr, size);
@@ -407,19 +430,22 @@ int l1_data_A_sram_free(const void *addr)
407{ 430{
408 unsigned long flags; 431 unsigned long flags;
409 int ret; 432 int ret;
433 unsigned int cpu;
410 434
435 cpu = get_cpu();
411 /* add mutex operation */ 436 /* add mutex operation */
412 spin_lock_irqsave(&l1_data_sram_lock, flags); 437 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
413 438
414#if L1_DATA_A_LENGTH != 0 439#if L1_DATA_A_LENGTH != 0
415 ret = _sram_free(addr, &free_l1_data_A_sram_head, 440 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
416 &used_l1_data_A_sram_head); 441 &per_cpu(used_l1_data_A_sram_head, cpu));
417#else 442#else
418 ret = -1; 443 ret = -1;
419#endif 444#endif
420 445
421 /* add mutex operation */ 446 /* add mutex operation */
422 spin_unlock_irqrestore(&l1_data_sram_lock, flags); 447 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
448 put_cpu();
423 449
424 return ret; 450 return ret;
425} 451}
@@ -430,15 +456,18 @@ void *l1_data_B_sram_alloc(size_t size)
430#if L1_DATA_B_LENGTH != 0 456#if L1_DATA_B_LENGTH != 0
431 unsigned long flags; 457 unsigned long flags;
432 void *addr; 458 void *addr;
459 unsigned int cpu;
433 460
461 cpu = get_cpu();
434 /* add mutex operation */ 462 /* add mutex operation */
435 spin_lock_irqsave(&l1_data_sram_lock, flags); 463 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
436 464
437 addr = _sram_alloc(size, &free_l1_data_B_sram_head, 465 addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
438 &used_l1_data_B_sram_head); 466 &per_cpu(used_l1_data_B_sram_head, cpu));
439 467
440 /* add mutex operation */ 468 /* add mutex operation */
441 spin_unlock_irqrestore(&l1_data_sram_lock, flags); 469 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
470 put_cpu();
442 471
443 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n", 472 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
444 (long unsigned int)addr, size); 473 (long unsigned int)addr, size);
@@ -455,15 +484,18 @@ int l1_data_B_sram_free(const void *addr)
455#if L1_DATA_B_LENGTH != 0 484#if L1_DATA_B_LENGTH != 0
456 unsigned long flags; 485 unsigned long flags;
457 int ret; 486 int ret;
487 unsigned int cpu;
458 488
489 cpu = get_cpu();
459 /* add mutex operation */ 490 /* add mutex operation */
460 spin_lock_irqsave(&l1_data_sram_lock, flags); 491 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
461 492
462 ret = _sram_free(addr, &free_l1_data_B_sram_head, 493 ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
463 &used_l1_data_B_sram_head); 494 &per_cpu(used_l1_data_B_sram_head, cpu));
464 495
465 /* add mutex operation */ 496 /* add mutex operation */
466 spin_unlock_irqrestore(&l1_data_sram_lock, flags); 497 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
498 put_cpu();
467 499
468 return ret; 500 return ret;
469#else 501#else
@@ -509,15 +541,18 @@ void *l1_inst_sram_alloc(size_t size)
509#if L1_CODE_LENGTH != 0 541#if L1_CODE_LENGTH != 0
510 unsigned long flags; 542 unsigned long flags;
511 void *addr; 543 void *addr;
544 unsigned int cpu;
512 545
546 cpu = get_cpu();
513 /* add mutex operation */ 547 /* add mutex operation */
514 spin_lock_irqsave(&l1_inst_sram_lock, flags); 548 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
515 549
516 addr = _sram_alloc(size, &free_l1_inst_sram_head, 550 addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
517 &used_l1_inst_sram_head); 551 &per_cpu(used_l1_inst_sram_head, cpu));
518 552
519 /* add mutex operation */ 553 /* add mutex operation */
520 spin_unlock_irqrestore(&l1_inst_sram_lock, flags); 554 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
555 put_cpu();
521 556
522 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n", 557 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
523 (long unsigned int)addr, size); 558 (long unsigned int)addr, size);
@@ -534,15 +569,18 @@ int l1_inst_sram_free(const void *addr)
534#if L1_CODE_LENGTH != 0 569#if L1_CODE_LENGTH != 0
535 unsigned long flags; 570 unsigned long flags;
536 int ret; 571 int ret;
572 unsigned int cpu;
537 573
574 cpu = get_cpu();
538 /* add mutex operation */ 575 /* add mutex operation */
539 spin_lock_irqsave(&l1_inst_sram_lock, flags); 576 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
540 577
541 ret = _sram_free(addr, &free_l1_inst_sram_head, 578 ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
542 &used_l1_inst_sram_head); 579 &per_cpu(used_l1_inst_sram_head, cpu));
543 580
544 /* add mutex operation */ 581 /* add mutex operation */
545 spin_unlock_irqrestore(&l1_inst_sram_lock, flags); 582 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
583 put_cpu();
546 584
547 return ret; 585 return ret;
548#else 586#else
@@ -556,15 +594,18 @@ void *l1sram_alloc(size_t size)
556{ 594{
557 unsigned long flags; 595 unsigned long flags;
558 void *addr; 596 void *addr;
597 unsigned int cpu;
559 598
599 cpu = get_cpu();
560 /* add mutex operation */ 600 /* add mutex operation */
561 spin_lock_irqsave(&l1sram_lock, flags); 601 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
562 602
563 addr = _sram_alloc(size, &free_l1_ssram_head, 603 addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
564 &used_l1_ssram_head); 604 &per_cpu(used_l1_ssram_head, cpu));
565 605
566 /* add mutex operation */ 606 /* add mutex operation */
567 spin_unlock_irqrestore(&l1sram_lock, flags); 607 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
608 put_cpu();
568 609
569 return addr; 610 return addr;
570} 611}
@@ -574,15 +615,18 @@ void *l1sram_alloc_max(size_t *psize)
574{ 615{
575 unsigned long flags; 616 unsigned long flags;
576 void *addr; 617 void *addr;
618 unsigned int cpu;
577 619
620 cpu = get_cpu();
578 /* add mutex operation */ 621 /* add mutex operation */
579 spin_lock_irqsave(&l1sram_lock, flags); 622 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
580 623
581 addr = _sram_alloc_max(&free_l1_ssram_head, 624 addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
582 &used_l1_ssram_head, psize); 625 &per_cpu(used_l1_ssram_head, cpu), psize);
583 626
584 /* add mutex operation */ 627 /* add mutex operation */
585 spin_unlock_irqrestore(&l1sram_lock, flags); 628 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
629 put_cpu();
586 630
587 return addr; 631 return addr;
588} 632}
@@ -592,15 +636,18 @@ int l1sram_free(const void *addr)
592{ 636{
593 unsigned long flags; 637 unsigned long flags;
594 int ret; 638 int ret;
639 unsigned int cpu;
595 640
641 cpu = get_cpu();
596 /* add mutex operation */ 642 /* add mutex operation */
597 spin_lock_irqsave(&l1sram_lock, flags); 643 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
598 644
599 ret = _sram_free(addr, &free_l1_ssram_head, 645 ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
600 &used_l1_ssram_head); 646 &per_cpu(used_l1_ssram_head, cpu));
601 647
602 /* add mutex operation */ 648 /* add mutex operation */
603 spin_unlock_irqrestore(&l1sram_lock, flags); 649 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
650 put_cpu();
604 651
605 return ret; 652 return ret;
606} 653}
@@ -761,33 +808,36 @@ static int sram_proc_read(char *buf, char **start, off_t offset, int count,
761 int *eof, void *data) 808 int *eof, void *data)
762{ 809{
763 int len = 0; 810 int len = 0;
811 unsigned int cpu;
764 812
765 if (_sram_proc_read(buf, &len, count, "Scratchpad", 813 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
766 &free_l1_ssram_head, &used_l1_ssram_head)) 814 if (_sram_proc_read(buf, &len, count, "Scratchpad",
767 goto not_done; 815 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
816 goto not_done;
768#if L1_DATA_A_LENGTH != 0 817#if L1_DATA_A_LENGTH != 0
769 if (_sram_proc_read(buf, &len, count, "L1 Data A", 818 if (_sram_proc_read(buf, &len, count, "L1 Data A",
770 &free_l1_data_A_sram_head, 819 &per_cpu(free_l1_data_A_sram_head, cpu),
771 &used_l1_data_A_sram_head)) 820 &per_cpu(used_l1_data_A_sram_head, cpu)))
772 goto not_done; 821 goto not_done;
773#endif 822#endif
774#if L1_DATA_B_LENGTH != 0 823#if L1_DATA_B_LENGTH != 0
775 if (_sram_proc_read(buf, &len, count, "L1 Data B", 824 if (_sram_proc_read(buf, &len, count, "L1 Data B",
776 &free_l1_data_B_sram_head, 825 &per_cpu(free_l1_data_B_sram_head, cpu),
777 &used_l1_data_B_sram_head)) 826 &per_cpu(used_l1_data_B_sram_head, cpu)))
778 goto not_done; 827 goto not_done;
779#endif 828#endif
780#if L1_CODE_LENGTH != 0 829#if L1_CODE_LENGTH != 0
781 if (_sram_proc_read(buf, &len, count, "L1 Instruction", 830 if (_sram_proc_read(buf, &len, count, "L1 Instruction",
782 &free_l1_inst_sram_head, &used_l1_inst_sram_head)) 831 &per_cpu(free_l1_inst_sram_head, cpu),
783 goto not_done; 832 &per_cpu(used_l1_inst_sram_head, cpu)))
833 goto not_done;
784#endif 834#endif
835 }
785#if L2_LENGTH != 0 836#if L2_LENGTH != 0
786 if (_sram_proc_read(buf, &len, count, "L2", 837 if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
787 &free_l2_sram_head, &used_l2_sram_head)) 838 &used_l2_sram_head))
788 goto not_done; 839 goto not_done;
789#endif 840#endif
790
791 *eof = 1; 841 *eof = 1;
792 not_done: 842 not_done:
793 return len; 843 return len;