aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc/mm
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2007-10-31 02:41:20 -0400
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2007-11-01 08:15:59 -0400
commitbd942ba3db60d3bd4e21febbe7c5e339d973d5a8 (patch)
treeff306770ca15454fb73c040a7fd47a027c7f89f2 /arch/ppc/mm
parentb98ac05d5e460301fbea24cceed0f2a601c82e22 (diff)
[POWERPC] ppc405 Fix arithmatic rollover bug when memory size under 16M
mmu_mapin_ram() loops over total_lowmem to setup page tables. However, if total_lowmem is less that 16M, the subtraction rolls over and results in a number just under 4G (because total_lowmem is an unsigned value). This patch rejigs the loop from countup to countdown to eliminate the bug. Special thanks to Magnus Hjorth who wrote the original patch to fix this bug. This patch improves on his by making the loop code simpler (which also eliminates the possibility of another rollover at the high end) and also applies the change to arch/powerpc. Signed-off-by: Grant Likely <grant.likely@secretlab.ca> Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch/ppc/mm')
-rw-r--r--arch/ppc/mm/4xx_mmu.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/ppc/mm/4xx_mmu.c b/arch/ppc/mm/4xx_mmu.c
index 838e09db71d9..ea785dbaac7c 100644
--- a/arch/ppc/mm/4xx_mmu.c
+++ b/arch/ppc/mm/4xx_mmu.c
@@ -99,13 +99,12 @@ unsigned long __init mmu_mapin_ram(void)
99 99
100 v = KERNELBASE; 100 v = KERNELBASE;
101 p = PPC_MEMSTART; 101 p = PPC_MEMSTART;
102 s = 0; 102 s = total_lowmem;
103 103
104 if (__map_without_ltlbs) { 104 if (__map_without_ltlbs)
105 return s; 105 return 0;
106 }
107 106
108 while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) { 107 while (s >= LARGE_PAGE_SIZE_16M) {
109 pmd_t *pmdp; 108 pmd_t *pmdp;
110 unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; 109 unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
111 110
@@ -117,10 +116,10 @@ unsigned long __init mmu_mapin_ram(void)
117 116
118 v += LARGE_PAGE_SIZE_16M; 117 v += LARGE_PAGE_SIZE_16M;
119 p += LARGE_PAGE_SIZE_16M; 118 p += LARGE_PAGE_SIZE_16M;
120 s += LARGE_PAGE_SIZE_16M; 119 s -= LARGE_PAGE_SIZE_16M;
121 } 120 }
122 121
123 while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) { 122 while (s >= LARGE_PAGE_SIZE_4M) {
124 pmd_t *pmdp; 123 pmd_t *pmdp;
125 unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; 124 unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
126 125
@@ -129,8 +128,8 @@ unsigned long __init mmu_mapin_ram(void)
129 128
130 v += LARGE_PAGE_SIZE_4M; 129 v += LARGE_PAGE_SIZE_4M;
131 p += LARGE_PAGE_SIZE_4M; 130 p += LARGE_PAGE_SIZE_4M;
132 s += LARGE_PAGE_SIZE_4M; 131 s -= LARGE_PAGE_SIZE_4M;
133 } 132 }
134 133
135 return s; 134 return total_lowmem - s;
136} 135}