diff options
author | Bernd Schmidt <bernds_cb1@t-online.de> | 2009-01-07 10:14:38 -0500 |
---|---|---|
committer | Bryan Wu <cooloney@kernel.org> | 2009-01-07 10:14:38 -0500 |
commit | dbdf20db537a5369c65330f878ad4905020a8bfa (patch) | |
tree | c7fa553755e2d75a6e98d3f32fbe41fab9f72609 /arch/blackfin/kernel/cplb-nompu/cplbinit.c | |
parent | 6651ece9e257302ee695ee76e69a4427f7033235 (diff) |
Blackfin arch: Faster C implementation of no-MPU CPLB handler
This is a mixture ofcMichael McTernan's patch and the existing cplb-mpu code.
We ditch the old cplb-nompu implementation, which is a good example of
why a good algorithm in a HLL is preferrable to a bad algorithm written in
assembly. Rather than try to construct a table of all posible CPLBs and
search it, we just create a (smaller) table of memory regions and
their attributes. Some of the data structures are now unified for both
the mpu and nompu cases. A lot of needless complexity in cplbinit.c is
removed.
Further optimizations:
* compile cplbmgr.c with a lot of -ffixed-reg options, and omit saving
these registers on the stack when entering a CPLB exception.
* lose cli/nop/nop/sti sequences for some workarounds - these don't
* make
sense in an exception context
Additional code unification should be possible after this.
[Mike Frysinger <vapier.adi@gmail.com>:
- convert CPP if statements to C if statements
- remove redundant statements
- use a do...while loop rather than a for loop to get slightly better
optimization and to avoid gcc "may be used uninitialized" warnings ...
we know that the [id]cplb_nr_bounds variables will never be 0, so this
is OK
- the no-mpu code was the last user of MAX_MEM_SIZE and with that rewritten,
we can punt it
- add some BUG_ON() checks to make sure we dont overflow the small
cplb_bounds array
- add i/d cplb entries for the bootrom because there is functions/data in
there we want to access
- we do not need a NULL trailing entry as any time we access the bounds
arrays, we use the nr_bounds variable
]
Signed-off-by: Michael McTernan <mmcternan@airvana.com>
Signed-off-by: Mike Frysinger <vapier.adi@gmail.com>
Signed-off-by: Bernd Schmidt <bernds_cb1@t-online.de>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
Diffstat (limited to 'arch/blackfin/kernel/cplb-nompu/cplbinit.c')
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/cplbinit.c | 498 |
1 files changed, 112 insertions, 386 deletions
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c index 4c010ba50a80..0e28f7595733 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c | |||
@@ -29,417 +29,143 @@ | |||
29 | #include <asm/cplbinit.h> | 29 | #include <asm/cplbinit.h> |
30 | #include <asm/mem_map.h> | 30 | #include <asm/mem_map.h> |
31 | 31 | ||
32 | u_long icplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1]; | 32 | struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR; |
33 | u_long dcplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1]; | 33 | struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR; |
34 | 34 | ||
35 | #ifdef CONFIG_CPLB_SWITCH_TAB_L1 | 35 | int first_switched_icplb PDT_ATTR; |
36 | #define PDT_ATTR __attribute__((l1_data)) | 36 | int first_switched_dcplb PDT_ATTR; |
37 | #else | ||
38 | #define PDT_ATTR | ||
39 | #endif | ||
40 | |||
41 | u_long ipdt_tables[NR_CPUS][MAX_SWITCH_I_CPLBS+1] PDT_ATTR; | ||
42 | u_long dpdt_tables[NR_CPUS][MAX_SWITCH_D_CPLBS+1] PDT_ATTR; | ||
43 | #ifdef CONFIG_CPLB_INFO | ||
44 | u_long ipdt_swapcount_tables[NR_CPUS][MAX_SWITCH_I_CPLBS] PDT_ATTR; | ||
45 | u_long dpdt_swapcount_tables[NR_CPUS][MAX_SWITCH_D_CPLBS] PDT_ATTR; | ||
46 | #endif | ||
47 | 37 | ||
48 | struct s_cplb { | 38 | struct cplb_boundary dcplb_bounds[9] PDT_ATTR; |
49 | struct cplb_tab init_i; | 39 | struct cplb_boundary icplb_bounds[7] PDT_ATTR; |
50 | struct cplb_tab init_d; | ||
51 | struct cplb_tab switch_i; | ||
52 | struct cplb_tab switch_d; | ||
53 | }; | ||
54 | 40 | ||
55 | #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) | 41 | int icplb_nr_bounds PDT_ATTR; |
56 | static struct cplb_desc cplb_data[] = { | 42 | int dcplb_nr_bounds PDT_ATTR; |
57 | { | ||
58 | .start = 0, | ||
59 | .end = SIZE_1K, | ||
60 | .psize = SIZE_1K, | ||
61 | .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB, | ||
62 | .i_conf = SDRAM_OOPS, | ||
63 | .d_conf = SDRAM_OOPS, | ||
64 | #if defined(CONFIG_DEBUG_HUNT_FOR_ZERO) | ||
65 | .valid = 1, | ||
66 | #else | ||
67 | .valid = 0, | ||
68 | #endif | ||
69 | .name = "Zero Pointer Guard Page", | ||
70 | }, | ||
71 | { | ||
72 | .start = 0, /* dyanmic */ | ||
73 | .end = 0, /* dynamic */ | ||
74 | .psize = SIZE_4M, | ||
75 | .attr = INITIAL_T | SWITCH_T | I_CPLB, | ||
76 | .i_conf = L1_IMEMORY, | ||
77 | .d_conf = 0, | ||
78 | .valid = 1, | ||
79 | .name = "L1 I-Memory", | ||
80 | }, | ||
81 | { | ||
82 | .start = 0, /* dynamic */ | ||
83 | .end = 0, /* dynamic */ | ||
84 | .psize = SIZE_4M, | ||
85 | .attr = INITIAL_T | SWITCH_T | D_CPLB, | ||
86 | .i_conf = 0, | ||
87 | .d_conf = L1_DMEMORY, | ||
88 | #if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0)) | ||
89 | .valid = 1, | ||
90 | #else | ||
91 | .valid = 0, | ||
92 | #endif | ||
93 | .name = "L1 D-Memory", | ||
94 | }, | ||
95 | { | ||
96 | .start = L2_START, | ||
97 | .end = L2_START + L2_LENGTH, | ||
98 | .psize = SIZE_1M, | ||
99 | .attr = L2_ATTR, | ||
100 | .i_conf = L2_IMEMORY, | ||
101 | .d_conf = L2_DMEMORY, | ||
102 | .valid = (L2_LENGTH > 0), | ||
103 | .name = "L2 Memory", | ||
104 | }, | ||
105 | { | ||
106 | .start = 0, | ||
107 | .end = 0, /* dynamic */ | ||
108 | .psize = 0, | ||
109 | .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB, | ||
110 | .i_conf = SDRAM_IGENERIC, | ||
111 | .d_conf = SDRAM_DGENERIC, | ||
112 | .valid = 1, | ||
113 | .name = "Kernel Memory", | ||
114 | }, | ||
115 | { | ||
116 | .start = 0, /* dynamic */ | ||
117 | .end = 0, /* dynamic */ | ||
118 | .psize = 0, | ||
119 | .attr = INITIAL_T | SWITCH_T | D_CPLB, | ||
120 | .i_conf = SDRAM_IGENERIC, | ||
121 | .d_conf = SDRAM_DNON_CHBL, | ||
122 | .valid = 1, | ||
123 | .name = "uClinux MTD Memory", | ||
124 | }, | ||
125 | { | ||
126 | .start = 0, /* dynamic */ | ||
127 | .end = 0, /* dynamic */ | ||
128 | .psize = SIZE_1M, | ||
129 | .attr = INITIAL_T | SWITCH_T | D_CPLB, | ||
130 | .d_conf = SDRAM_DNON_CHBL, | ||
131 | .valid = 1, | ||
132 | .name = "Uncached DMA Zone", | ||
133 | }, | ||
134 | { | ||
135 | .start = 0, /* dynamic */ | ||
136 | .end = 0, /* dynamic */ | ||
137 | .psize = 0, | ||
138 | .attr = SWITCH_T | D_CPLB, | ||
139 | .i_conf = 0, /* dynamic */ | ||
140 | .d_conf = 0, /* dynamic */ | ||
141 | .valid = 1, | ||
142 | .name = "Reserved Memory", | ||
143 | }, | ||
144 | { | ||
145 | .start = ASYNC_BANK0_BASE, | ||
146 | .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE, | ||
147 | .psize = 0, | ||
148 | .attr = SWITCH_T | D_CPLB, | ||
149 | .d_conf = SDRAM_EBIU, | ||
150 | .valid = 1, | ||
151 | .name = "Asynchronous Memory Banks", | ||
152 | }, | ||
153 | { | ||
154 | .start = BOOT_ROM_START, | ||
155 | .end = BOOT_ROM_START + BOOT_ROM_LENGTH, | ||
156 | .psize = SIZE_1M, | ||
157 | .attr = SWITCH_T | I_CPLB | D_CPLB, | ||
158 | .i_conf = SDRAM_IGENERIC, | ||
159 | .d_conf = SDRAM_DGENERIC, | ||
160 | .valid = 1, | ||
161 | .name = "On-Chip BootROM", | ||
162 | }, | ||
163 | }; | ||
164 | 43 | ||
165 | static bool __init lock_kernel_check(u32 start, u32 end) | 44 | void __init generate_cplb_tables_cpu(unsigned int cpu) |
166 | { | 45 | { |
167 | if (start >= (u32)__init_begin || end <= (u32)_stext) | 46 | int i_d, i_i; |
168 | return false; | 47 | unsigned long addr; |
169 | |||
170 | /* This cplb block overlapped with kernel area. */ | ||
171 | return true; | ||
172 | } | ||
173 | 48 | ||
174 | static void __init | 49 | struct cplb_entry *d_tbl = dcplb_tbl[cpu]; |
175 | fill_cplbtab(struct cplb_tab *table, | 50 | struct cplb_entry *i_tbl = icplb_tbl[cpu]; |
176 | unsigned long start, unsigned long end, | ||
177 | unsigned long block_size, unsigned long cplb_data) | ||
178 | { | ||
179 | int i; | ||
180 | 51 | ||
181 | switch (block_size) { | 52 | printk(KERN_INFO "NOMPU: setting up cplb tables\n"); |
182 | case SIZE_4M: | ||
183 | i = 3; | ||
184 | break; | ||
185 | case SIZE_1M: | ||
186 | i = 2; | ||
187 | break; | ||
188 | case SIZE_4K: | ||
189 | i = 1; | ||
190 | break; | ||
191 | case SIZE_1K: | ||
192 | default: | ||
193 | i = 0; | ||
194 | break; | ||
195 | } | ||
196 | 53 | ||
197 | cplb_data = (cplb_data & ~(3 << 16)) | (i << 16); | 54 | i_d = i_i = 0; |
198 | 55 | ||
199 | while ((start < end) && (table->pos < table->size)) { | 56 | /* Set up the zero page. */ |
57 | d_tbl[i_d].addr = 0; | ||
58 | d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; | ||
200 | 59 | ||
201 | table->tab[table->pos++] = start; | 60 | /* Cover kernel memory with 4M pages. */ |
61 | addr = 0; | ||
202 | 62 | ||
203 | if (lock_kernel_check(start, start + block_size)) | 63 | for (; addr < memory_start; addr += 4 * 1024 * 1024) { |
204 | table->tab[table->pos++] = | 64 | d_tbl[i_d].addr = addr; |
205 | cplb_data | CPLB_LOCK | CPLB_DIRTY; | 65 | d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; |
206 | else | 66 | i_tbl[i_i].addr = addr; |
207 | table->tab[table->pos++] = cplb_data; | 67 | i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; |
68 | } | ||
208 | 69 | ||
209 | start += block_size; | 70 | /* Cover L1 memory. One 4M area for code and data each is enough. */ |
71 | if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { | ||
72 | d_tbl[i_d].addr = L1_DATA_A_START; | ||
73 | d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB; | ||
210 | } | 74 | } |
211 | } | 75 | i_tbl[i_i].addr = L1_CODE_START; |
76 | i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB; | ||
212 | 77 | ||
213 | static void __init close_cplbtab(struct cplb_tab *table) | 78 | first_switched_dcplb = i_d; |
214 | { | 79 | first_switched_icplb = i_i; |
215 | while (table->pos < table->size) | ||
216 | table->tab[table->pos++] = 0; | ||
217 | } | ||
218 | 80 | ||
219 | /* helper function */ | 81 | BUG_ON(first_switched_dcplb > MAX_CPLBS); |
220 | static void __init | 82 | BUG_ON(first_switched_icplb > MAX_CPLBS); |
221 | __fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end) | ||
222 | { | ||
223 | if (cplb_data[i].psize) { | ||
224 | fill_cplbtab(t, | ||
225 | cplb_data[i].start, | ||
226 | cplb_data[i].end, | ||
227 | cplb_data[i].psize, | ||
228 | cplb_data[i].i_conf); | ||
229 | } else { | ||
230 | #if defined(CONFIG_BFIN_ICACHE) | ||
231 | if (ANOMALY_05000263 && i == SDRAM_KERN) { | ||
232 | fill_cplbtab(t, | ||
233 | cplb_data[i].start, | ||
234 | cplb_data[i].end, | ||
235 | SIZE_4M, | ||
236 | cplb_data[i].i_conf); | ||
237 | } else | ||
238 | #endif | ||
239 | { | ||
240 | fill_cplbtab(t, | ||
241 | cplb_data[i].start, | ||
242 | a_start, | ||
243 | SIZE_1M, | ||
244 | cplb_data[i].i_conf); | ||
245 | fill_cplbtab(t, | ||
246 | a_start, | ||
247 | a_end, | ||
248 | SIZE_4M, | ||
249 | cplb_data[i].i_conf); | ||
250 | fill_cplbtab(t, a_end, | ||
251 | cplb_data[i].end, | ||
252 | SIZE_1M, | ||
253 | cplb_data[i].i_conf); | ||
254 | } | ||
255 | } | ||
256 | } | ||
257 | 83 | ||
258 | static void __init | 84 | while (i_d < MAX_CPLBS) |
259 | __fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end) | 85 | d_tbl[i_d++].data = 0; |
260 | { | 86 | while (i_i < MAX_CPLBS) |
261 | if (cplb_data[i].psize) { | 87 | i_tbl[i_i++].data = 0; |
262 | fill_cplbtab(t, | ||
263 | cplb_data[i].start, | ||
264 | cplb_data[i].end, | ||
265 | cplb_data[i].psize, | ||
266 | cplb_data[i].d_conf); | ||
267 | } else { | ||
268 | fill_cplbtab(t, | ||
269 | cplb_data[i].start, | ||
270 | a_start, SIZE_1M, | ||
271 | cplb_data[i].d_conf); | ||
272 | fill_cplbtab(t, a_start, | ||
273 | a_end, SIZE_4M, | ||
274 | cplb_data[i].d_conf); | ||
275 | fill_cplbtab(t, a_end, | ||
276 | cplb_data[i].end, | ||
277 | SIZE_1M, | ||
278 | cplb_data[i].d_conf); | ||
279 | } | ||
280 | } | 88 | } |
281 | 89 | ||
282 | void __init generate_cplb_tables_cpu(unsigned int cpu) | 90 | void __init generate_cplb_tables_all(void) |
283 | { | 91 | { |
92 | int i_d, i_i; | ||
284 | 93 | ||
285 | u16 i, j, process; | 94 | i_d = 0; |
286 | u32 a_start, a_end, as, ae, as_1m; | 95 | /* Normal RAM, including MTD FS. */ |
287 | |||
288 | struct cplb_tab *t_i = NULL; | ||
289 | struct cplb_tab *t_d = NULL; | ||
290 | struct s_cplb cplb; | ||
291 | |||
292 | printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n"); | ||
293 | |||
294 | cplb.init_i.size = CPLB_TBL_ENTRIES; | ||
295 | cplb.init_d.size = CPLB_TBL_ENTRIES; | ||
296 | cplb.switch_i.size = MAX_SWITCH_I_CPLBS; | ||
297 | cplb.switch_d.size = MAX_SWITCH_D_CPLBS; | ||
298 | |||
299 | cplb.init_i.pos = 0; | ||
300 | cplb.init_d.pos = 0; | ||
301 | cplb.switch_i.pos = 0; | ||
302 | cplb.switch_d.pos = 0; | ||
303 | |||
304 | cplb.init_i.tab = icplb_tables[cpu]; | ||
305 | cplb.init_d.tab = dcplb_tables[cpu]; | ||
306 | cplb.switch_i.tab = ipdt_tables[cpu]; | ||
307 | cplb.switch_d.tab = dpdt_tables[cpu]; | ||
308 | |||
309 | cplb_data[L1I_MEM].start = get_l1_code_start_cpu(cpu); | ||
310 | cplb_data[L1I_MEM].end = cplb_data[L1I_MEM].start + L1_CODE_LENGTH; | ||
311 | cplb_data[L1D_MEM].start = get_l1_data_a_start_cpu(cpu); | ||
312 | cplb_data[L1D_MEM].end = get_l1_data_b_start_cpu(cpu) + L1_DATA_B_LENGTH; | ||
313 | cplb_data[SDRAM_KERN].end = memory_end; | ||
314 | |||
315 | #ifdef CONFIG_MTD_UCLINUX | 96 | #ifdef CONFIG_MTD_UCLINUX |
316 | cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start; | 97 | dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size; |
317 | cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size; | ||
318 | cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0; | ||
319 | # if defined(CONFIG_ROMFS_FS) | ||
320 | cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB; | ||
321 | |||
322 | /* | ||
323 | * The ROMFS_FS size is often not multiple of 1MB. | ||
324 | * This can cause multiple CPLB sets covering the same memory area. | ||
325 | * This will then cause multiple CPLB hit exceptions. | ||
326 | * Workaround: We ensure a contiguous memory area by extending the kernel | ||
327 | * memory section over the mtd section. | ||
328 | * For ROMFS_FS memory must be covered with ICPLBs anyways. | ||
329 | * So there is no difference between kernel and mtd memory setup. | ||
330 | */ | ||
331 | |||
332 | cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;; | ||
333 | cplb_data[SDRAM_RAM_MTD].valid = 0; | ||
334 | |||
335 | # endif | ||
336 | #else | 98 | #else |
337 | cplb_data[SDRAM_RAM_MTD].valid = 0; | 99 | dcplb_bounds[i_d].eaddr = memory_end; |
338 | #endif | 100 | #endif |
101 | dcplb_bounds[i_d++].data = SDRAM_DGENERIC; | ||
102 | /* DMA uncached region. */ | ||
103 | if (DMA_UNCACHED_REGION) { | ||
104 | dcplb_bounds[i_d].eaddr = _ramend; | ||
105 | dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL; | ||
106 | } | ||
107 | if (_ramend != physical_mem_end) { | ||
108 | /* Reserved memory. */ | ||
109 | dcplb_bounds[i_d].eaddr = physical_mem_end; | ||
110 | dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ? | ||
111 | SDRAM_DGENERIC : SDRAM_DNON_CHBL); | ||
112 | } | ||
113 | /* Addressing hole up to the async bank. */ | ||
114 | dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE; | ||
115 | dcplb_bounds[i_d++].data = 0; | ||
116 | /* ASYNC banks. */ | ||
117 | dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE; | ||
118 | dcplb_bounds[i_d++].data = SDRAM_EBIU; | ||
119 | /* Addressing hole up to BootROM. */ | ||
120 | dcplb_bounds[i_d].eaddr = BOOT_ROM_START; | ||
121 | dcplb_bounds[i_d++].data = 0; | ||
122 | /* BootROM -- largest one should be less than 1 meg. */ | ||
123 | dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024); | ||
124 | dcplb_bounds[i_d++].data = SDRAM_DGENERIC; | ||
125 | if (L2_LENGTH) { | ||
126 | /* Addressing hole up to L2 SRAM. */ | ||
127 | dcplb_bounds[i_d].eaddr = L2_START; | ||
128 | dcplb_bounds[i_d++].data = 0; | ||
129 | /* L2 SRAM. */ | ||
130 | dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH; | ||
131 | dcplb_bounds[i_d++].data = L2_DMEMORY; | ||
132 | } | ||
133 | dcplb_nr_bounds = i_d; | ||
134 | BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds)); | ||
339 | 135 | ||
340 | cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION; | 136 | i_i = 0; |
341 | cplb_data[SDRAM_DMAZ].end = _ramend; | 137 | /* Normal RAM, including MTD FS. */ |
342 | |||
343 | cplb_data[RES_MEM].start = _ramend; | ||
344 | cplb_data[RES_MEM].end = physical_mem_end; | ||
345 | |||
346 | if (reserved_mem_dcache_on) | ||
347 | cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC; | ||
348 | else | ||
349 | cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL; | ||
350 | |||
351 | if (reserved_mem_icache_on) | ||
352 | cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC; | ||
353 | else | ||
354 | cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL; | ||
355 | |||
356 | for (i = ZERO_P; i < ARRAY_SIZE(cplb_data); ++i) { | ||
357 | if (!cplb_data[i].valid) | ||
358 | continue; | ||
359 | |||
360 | as_1m = cplb_data[i].start % SIZE_1M; | ||
361 | |||
362 | /* We need to make sure all sections are properly 1M aligned | ||
363 | * However between Kernel Memory and the Kernel mtd section, depending on the | ||
364 | * rootfs size, there can be overlapping memory areas. | ||
365 | */ | ||
366 | |||
367 | if (as_1m && i != L1I_MEM && i != L1D_MEM) { | ||
368 | #ifdef CONFIG_MTD_UCLINUX | 138 | #ifdef CONFIG_MTD_UCLINUX |
369 | if (i == SDRAM_RAM_MTD) { | 139 | icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size; |
370 | if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start) | 140 | #else |
371 | cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M; | 141 | icplb_bounds[i_i].eaddr = memory_end; |
372 | else | ||
373 | cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)); | ||
374 | } else | ||
375 | #endif | 142 | #endif |
376 | printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n", | 143 | icplb_bounds[i_i++].data = SDRAM_IGENERIC; |
377 | cplb_data[i].name, cplb_data[i].start); | 144 | /* DMA uncached region. */ |
378 | } | 145 | if (DMA_UNCACHED_REGION) { |
379 | 146 | icplb_bounds[i_i].eaddr = _ramend; | |
380 | as = cplb_data[i].start % SIZE_4M; | 147 | icplb_bounds[i_i++].data = 0; |
381 | ae = cplb_data[i].end % SIZE_4M; | ||
382 | |||
383 | if (as) | ||
384 | a_start = cplb_data[i].start + (SIZE_4M - (as)); | ||
385 | else | ||
386 | a_start = cplb_data[i].start; | ||
387 | |||
388 | a_end = cplb_data[i].end - ae; | ||
389 | |||
390 | for (j = INITIAL_T; j <= SWITCH_T; j++) { | ||
391 | |||
392 | switch (j) { | ||
393 | case INITIAL_T: | ||
394 | if (cplb_data[i].attr & INITIAL_T) { | ||
395 | t_i = &cplb.init_i; | ||
396 | t_d = &cplb.init_d; | ||
397 | process = 1; | ||
398 | } else | ||
399 | process = 0; | ||
400 | break; | ||
401 | case SWITCH_T: | ||
402 | if (cplb_data[i].attr & SWITCH_T) { | ||
403 | t_i = &cplb.switch_i; | ||
404 | t_d = &cplb.switch_d; | ||
405 | process = 1; | ||
406 | } else | ||
407 | process = 0; | ||
408 | break; | ||
409 | default: | ||
410 | process = 0; | ||
411 | break; | ||
412 | } | ||
413 | |||
414 | if (!process) | ||
415 | continue; | ||
416 | if (cplb_data[i].attr & I_CPLB) | ||
417 | __fill_code_cplbtab(t_i, i, a_start, a_end); | ||
418 | |||
419 | if (cplb_data[i].attr & D_CPLB) | ||
420 | __fill_data_cplbtab(t_d, i, a_start, a_end); | ||
421 | } | ||
422 | } | 148 | } |
423 | 149 | if (_ramend != physical_mem_end) { | |
424 | /* make sure we locked the kernel start */ | 150 | /* Reserved memory. */ |
425 | BUG_ON(cplb.init_i.pos < 2 + cplb_data[ZERO_P].valid); | 151 | icplb_bounds[i_i].eaddr = physical_mem_end; |
426 | BUG_ON(cplb.init_d.pos < 1 + cplb_data[ZERO_P].valid + cplb_data[L1D_MEM].valid); | 152 | icplb_bounds[i_i++].data = (reserved_mem_icache_on ? |
427 | 153 | SDRAM_IGENERIC : SDRAM_INON_CHBL); | |
428 | /* make sure we didnt overflow the table */ | 154 | } |
429 | BUG_ON(cplb.init_i.size < cplb.init_i.pos); | 155 | /* Addressing hole up to BootROM. */ |
430 | BUG_ON(cplb.init_d.size < cplb.init_d.pos); | 156 | icplb_bounds[i_i].eaddr = BOOT_ROM_START; |
431 | BUG_ON(cplb.switch_i.size < cplb.switch_i.pos); | 157 | icplb_bounds[i_i++].data = 0; |
432 | BUG_ON(cplb.switch_d.size < cplb.switch_d.pos); | 158 | /* BootROM -- largest one should be less than 1 meg. */ |
433 | 159 | icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024); | |
434 | /* close tables */ | 160 | icplb_bounds[i_i++].data = SDRAM_IGENERIC; |
435 | close_cplbtab(&cplb.init_i); | 161 | if (L2_LENGTH) { |
436 | close_cplbtab(&cplb.init_d); | 162 | /* Addressing hole up to L2 SRAM, including the async bank. */ |
437 | 163 | icplb_bounds[i_i].eaddr = L2_START; | |
438 | cplb.init_i.tab[cplb.init_i.pos] = -1; | 164 | icplb_bounds[i_i++].data = 0; |
439 | cplb.init_d.tab[cplb.init_d.pos] = -1; | 165 | /* L2 SRAM. */ |
440 | cplb.switch_i.tab[cplb.switch_i.pos] = -1; | 166 | icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH; |
441 | cplb.switch_d.tab[cplb.switch_d.pos] = -1; | 167 | icplb_bounds[i_i++].data = L2_IMEMORY; |
442 | 168 | } | |
169 | icplb_nr_bounds = i_i; | ||
170 | BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds)); | ||
443 | } | 171 | } |
444 | |||
445 | #endif | ||