aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-03-08 18:57:03 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:14:23 -0500
commit17b0e199a10184d8c5bbbd79a4cee993bb1fb257 (patch)
treec5cf39d072cf908e5b03216e1e1698bf85e86877 /arch/sparc64/mm/init.c
parentd1112018b4bc82adf5c8a9c15a08954328f023ae (diff)
[SPARC64]: Fix 32-bit truncation which broke sparsemem.
The page->flags manipulations done by the D-cache dirty state tracking was broken because the constants were not marked with "UL" to make them 64-bit, which means we were clobbering the upper 32-bits of page->flags all the time. This doesn't jive well with sparsemem which stores the section and indexing information in the top 32-bits of page->flags. This is yet another sparc64 bug which has been with us forever. While we're here, tidy up some things in bootmem_init() and paginig_init(): 1) Pass min_low_pfn to init_bootmem_node(), it's identical to (phys_base >> PAGE_SHIFT) but we should use consistent with the variable names we print in CONFIG_BOOTMEM_DEBUG 2) max_mapnr, although no longer used, was being set inaccurately, we shouldn't subtract pfn_base any more. 3) All the games with phys_base in the zones_*[] arrays we pass to free_area_init_node() are no longer necessary. Thanks to Josh Grebe and Fabbione for the bug reports and testing. Fix also verified locally on an SB2500 which had a memory layout that triggered the same problem. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 5f67b53b3a5b..b40f6477dea0 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -205,8 +205,8 @@ inline void flush_dcache_page_impl(struct page *page)
205} 205}
206 206
207#define PG_dcache_dirty PG_arch_1 207#define PG_dcache_dirty PG_arch_1
208#define PG_dcache_cpu_shift 24 208#define PG_dcache_cpu_shift 24UL
209#define PG_dcache_cpu_mask (256 - 1) 209#define PG_dcache_cpu_mask (256UL - 1UL)
210 210
211#if NR_CPUS > 256 211#if NR_CPUS > 256
212#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus 212#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
@@ -901,8 +901,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
901 min_low_pfn, bootmap_pfn, max_low_pfn); 901 min_low_pfn, bootmap_pfn, max_low_pfn);
902#endif 902#endif
903 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, 903 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
904 (phys_base >> PAGE_SHIFT), 904 min_low_pfn, end_pfn);
905 end_pfn);
906 905
907 /* Now register the available physical memory with the 906 /* Now register the available physical memory with the
908 * allocator. 907 * allocator.
@@ -1311,25 +1310,24 @@ void __init paging_init(void)
1311 pages_avail = 0; 1310 pages_avail = 0;
1312 last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); 1311 last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
1313 1312
1314 max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); 1313 max_mapnr = last_valid_pfn;
1315 1314
1316 kernel_physical_mapping_init(); 1315 kernel_physical_mapping_init();
1317 1316
1318 { 1317 {
1319 unsigned long zones_size[MAX_NR_ZONES]; 1318 unsigned long zones_size[MAX_NR_ZONES];
1320 unsigned long zholes_size[MAX_NR_ZONES]; 1319 unsigned long zholes_size[MAX_NR_ZONES];
1321 unsigned long npages;
1322 int znum; 1320 int znum;
1323 1321
1324 for (znum = 0; znum < MAX_NR_ZONES; znum++) 1322 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1325 zones_size[znum] = zholes_size[znum] = 0; 1323 zones_size[znum] = zholes_size[znum] = 0;
1326 1324
1327 npages = end_pfn - (phys_base >> PAGE_SHIFT); 1325 zones_size[ZONE_DMA] = end_pfn;
1328 zones_size[ZONE_DMA] = npages; 1326 zholes_size[ZONE_DMA] = end_pfn - pages_avail;
1329 zholes_size[ZONE_DMA] = npages - pages_avail;
1330 1327
1331 free_area_init_node(0, &contig_page_data, zones_size, 1328 free_area_init_node(0, &contig_page_data, zones_size,
1332 phys_base >> PAGE_SHIFT, zholes_size); 1329 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
1330 zholes_size);
1333 } 1331 }
1334 1332
1335 device_scan(); 1333 device_scan();