aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/page_alloc.c20
2 files changed, 15 insertions, 7 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 5d6e4c2000dc..33a28bfde158 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -134,7 +134,7 @@ static int sync_page(void *word)
134 struct address_space *mapping; 134 struct address_space *mapping;
135 struct page *page; 135 struct page *page;
136 136
137 page = container_of((page_flags_t *)word, struct page, flags); 137 page = container_of((unsigned long *)word, struct page, flags);
138 138
139 /* 139 /*
140 * page_mapping() is being called without PG_locked held. 140 * page_mapping() is being called without PG_locked held.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3c5cf664abd2..104e69ca55e0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -60,8 +60,11 @@ long nr_swap_pages;
60 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 60 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
61 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 61 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
62 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 62 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
63 *
64 * TBD: should special case ZONE_DMA32 machines here - in those we normally
65 * don't need any ZONE_NORMAL reservation
63 */ 66 */
64int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 }; 67int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
65 68
66EXPORT_SYMBOL(totalram_pages); 69EXPORT_SYMBOL(totalram_pages);
67 70
@@ -72,7 +75,7 @@ EXPORT_SYMBOL(totalram_pages);
72struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; 75struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
73EXPORT_SYMBOL(zone_table); 76EXPORT_SYMBOL(zone_table);
74 77
75static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; 78static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
76int min_free_kbytes = 1024; 79int min_free_kbytes = 1024;
77 80
78unsigned long __initdata nr_kernel_pages; 81unsigned long __initdata nr_kernel_pages;
@@ -124,7 +127,7 @@ static void bad_page(const char *function, struct page *page)
124 printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n", 127 printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
125 function, current->comm, page); 128 function, current->comm, page);
126 printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", 129 printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
127 (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags, 130 (int)(2*sizeof(unsigned long)), (unsigned long)page->flags,
128 page->mapping, page_mapcount(page), page_count(page)); 131 page->mapping, page_mapcount(page), page_count(page));
129 printk(KERN_EMERG "Backtrace:\n"); 132 printk(KERN_EMERG "Backtrace:\n");
130 dump_stack(); 133 dump_stack();
@@ -1421,6 +1424,10 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli
1421 zone = pgdat->node_zones + ZONE_NORMAL; 1424 zone = pgdat->node_zones + ZONE_NORMAL;
1422 if (zone->present_pages) 1425 if (zone->present_pages)
1423 zonelist->zones[j++] = zone; 1426 zonelist->zones[j++] = zone;
1427 case ZONE_DMA32:
1428 zone = pgdat->node_zones + ZONE_DMA32;
1429 if (zone->present_pages)
1430 zonelist->zones[j++] = zone;
1424 case ZONE_DMA: 1431 case ZONE_DMA:
1425 zone = pgdat->node_zones + ZONE_DMA; 1432 zone = pgdat->node_zones + ZONE_DMA;
1426 if (zone->present_pages) 1433 if (zone->present_pages)
@@ -1435,6 +1442,8 @@ static inline int highest_zone(int zone_bits)
1435 int res = ZONE_NORMAL; 1442 int res = ZONE_NORMAL;
1436 if (zone_bits & (__force int)__GFP_HIGHMEM) 1443 if (zone_bits & (__force int)__GFP_HIGHMEM)
1437 res = ZONE_HIGHMEM; 1444 res = ZONE_HIGHMEM;
1445 if (zone_bits & (__force int)__GFP_DMA32)
1446 res = ZONE_DMA32;
1438 if (zone_bits & (__force int)__GFP_DMA) 1447 if (zone_bits & (__force int)__GFP_DMA)
1439 res = ZONE_DMA; 1448 res = ZONE_DMA;
1440 return res; 1449 return res;
@@ -1846,11 +1855,10 @@ static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
1846 if (process_zones(cpu)) 1855 if (process_zones(cpu))
1847 ret = NOTIFY_BAD; 1856 ret = NOTIFY_BAD;
1848 break; 1857 break;
1849#ifdef CONFIG_HOTPLUG_CPU 1858 case CPU_UP_CANCELED:
1850 case CPU_DEAD: 1859 case CPU_DEAD:
1851 free_zone_pagesets(cpu); 1860 free_zone_pagesets(cpu);
1852 break; 1861 break;
1853#endif
1854 default: 1862 default:
1855 break; 1863 break;
1856 } 1864 }
@@ -1955,7 +1963,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
1955 if (zholes_size) 1963 if (zholes_size)
1956 realsize -= zholes_size[j]; 1964 realsize -= zholes_size[j];
1957 1965
1958 if (j == ZONE_DMA || j == ZONE_NORMAL) 1966 if (j < ZONE_HIGHMEM)
1959 nr_kernel_pages += realsize; 1967 nr_kernel_pages += realsize;
1960 nr_all_pages += realsize; 1968 nr_all_pages += realsize;
1961 1969