aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/cplb-nompu/cplbinit.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel/cplb-nompu/cplbinit.c')
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index fd9a2f31e686..282a7919821b 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -89,15 +89,25 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
89 89
90void __init generate_cplb_tables_all(void) 90void __init generate_cplb_tables_all(void)
91{ 91{
92 unsigned long uncached_end;
92 int i_d, i_i; 93 int i_d, i_i;
93 94
94 i_d = 0; 95 i_d = 0;
95 /* Normal RAM, including MTD FS. */ 96 /* Normal RAM, including MTD FS. */
96#ifdef CONFIG_MTD_UCLINUX 97#ifdef CONFIG_MTD_UCLINUX
97 dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size; 98 uncached_end = memory_mtd_start + mtd_size;
98#else 99#else
99 dcplb_bounds[i_d].eaddr = memory_end; 100 uncached_end = memory_end;
100#endif 101#endif
102 /*
103 * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
104 * so that we don't have to use 4kB pages and cause CPLB thrashing
105 */
106 if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
107 ((_ramend - uncached_end) >= 1 * 1024 * 1024))
108 dcplb_bounds[i_d].eaddr = uncached_end;
109 else
110 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024);
101 dcplb_bounds[i_d++].data = SDRAM_DGENERIC; 111 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
102 /* DMA uncached region. */ 112 /* DMA uncached region. */
103 if (DMA_UNCACHED_REGION) { 113 if (DMA_UNCACHED_REGION) {
@@ -135,18 +145,15 @@ void __init generate_cplb_tables_all(void)
135 145
136 i_i = 0; 146 i_i = 0;
137 /* Normal RAM, including MTD FS. */ 147 /* Normal RAM, including MTD FS. */
138#ifdef CONFIG_MTD_UCLINUX 148 icplb_bounds[i_i].eaddr = uncached_end;
139 icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
140#else
141 icplb_bounds[i_i].eaddr = memory_end;
142#endif
143 icplb_bounds[i_i++].data = SDRAM_IGENERIC; 149 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
144 /* DMA uncached region. */
145 if (DMA_UNCACHED_REGION) {
146 icplb_bounds[i_i].eaddr = _ramend;
147 icplb_bounds[i_i++].data = 0;
148 }
149 if (_ramend != physical_mem_end) { 150 if (_ramend != physical_mem_end) {
151 /* DMA uncached region. */
152 if (DMA_UNCACHED_REGION) {
153 /* Normally this hole is caught by the async below. */
154 icplb_bounds[i_i].eaddr = _ramend;
155 icplb_bounds[i_i++].data = 0;
156 }
150 /* Reserved memory. */ 157 /* Reserved memory. */
151 icplb_bounds[i_i].eaddr = physical_mem_end; 158 icplb_bounds[i_i].eaddr = physical_mem_end;
152 icplb_bounds[i_i++].data = (reserved_mem_icache_on ? 159 icplb_bounds[i_i++].data = (reserved_mem_icache_on ?