aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <haveblue@us.ibm.com>2005-10-29 21:16:50 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:44 -0400
commited8ece2ec8d3c2031b1a1a0737568bb0d49454e0 (patch)
tree5fb5b994ad3fcdc4ab2bb2c906d88c02a8395807
parent2774812f417db562f0d659d2c1b5755ba35d2770 (diff)
[PATCH] memory hotplug prep: break out zone initialization
If a zone is empty at boot-time and then hot-added to later, it needs to run the same init code that would have been run on it at boot. This patch breaks out zone table and per-cpu-pages functions for use by the hotplug code. You can almost see all of the free_area_init_core() function on one page now. :) Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/page_alloc.c98
1 files changed, 58 insertions, 40 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a2995a5d012c..9a2fa8110afc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1875,6 +1875,60 @@ void __init setup_per_cpu_pageset()
1875 1875
1876#endif 1876#endif
1877 1877
1878static __devinit
1879void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
1880{
1881 int i;
1882 struct pglist_data *pgdat = zone->zone_pgdat;
1883
1884 /*
1885 * The per-page waitqueue mechanism uses hashed waitqueues
1886 * per zone.
1887 */
1888 zone->wait_table_size = wait_table_size(zone_size_pages);
1889 zone->wait_table_bits = wait_table_bits(zone->wait_table_size);
1890 zone->wait_table = (wait_queue_head_t *)
1891 alloc_bootmem_node(pgdat, zone->wait_table_size
1892 * sizeof(wait_queue_head_t));
1893
1894 for(i = 0; i < zone->wait_table_size; ++i)
1895 init_waitqueue_head(zone->wait_table + i);
1896}
1897
1898static __devinit void zone_pcp_init(struct zone *zone)
1899{
1900 int cpu;
1901 unsigned long batch = zone_batchsize(zone);
1902
1903 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1904#ifdef CONFIG_NUMA
1905 /* Early boot. Slab allocator not functional yet */
1906 zone->pageset[cpu] = &boot_pageset[cpu];
1907 setup_pageset(&boot_pageset[cpu],0);
1908#else
1909 setup_pageset(zone_pcp(zone,cpu), batch);
1910#endif
1911 }
1912 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
1913 zone->name, zone->present_pages, batch);
1914}
1915
1916static __devinit void init_currently_empty_zone(struct zone *zone,
1917 unsigned long zone_start_pfn, unsigned long size)
1918{
1919 struct pglist_data *pgdat = zone->zone_pgdat;
1920
1921 zone_wait_table_init(zone, size);
1922 pgdat->nr_zones = zone_idx(zone) + 1;
1923
1924 zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1925 zone->zone_start_pfn = zone_start_pfn;
1926
1927 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
1928
1929 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
1930}
1931
1878/* 1932/*
1879 * Set up the zone data structures: 1933 * Set up the zone data structures:
1880 * - mark all pages reserved 1934 * - mark all pages reserved
@@ -1884,8 +1938,8 @@ void __init setup_per_cpu_pageset()
1884static void __init free_area_init_core(struct pglist_data *pgdat, 1938static void __init free_area_init_core(struct pglist_data *pgdat,
1885 unsigned long *zones_size, unsigned long *zholes_size) 1939 unsigned long *zones_size, unsigned long *zholes_size)
1886{ 1940{
1887 unsigned long i, j; 1941 unsigned long j;
1888 int cpu, nid = pgdat->node_id; 1942 int nid = pgdat->node_id;
1889 unsigned long zone_start_pfn = pgdat->node_start_pfn; 1943 unsigned long zone_start_pfn = pgdat->node_start_pfn;
1890 1944
1891 pgdat->nr_zones = 0; 1945 pgdat->nr_zones = 0;
@@ -1895,7 +1949,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
1895 for (j = 0; j < MAX_NR_ZONES; j++) { 1949 for (j = 0; j < MAX_NR_ZONES; j++) {
1896 struct zone *zone = pgdat->node_zones + j; 1950 struct zone *zone = pgdat->node_zones + j;
1897 unsigned long size, realsize; 1951 unsigned long size, realsize;
1898 unsigned long batch;
1899 1952
1900 realsize = size = zones_size[j]; 1953 realsize = size = zones_size[j];
1901 if (zholes_size) 1954 if (zholes_size)
@@ -1915,19 +1968,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
1915 1968
1916 zone->temp_priority = zone->prev_priority = DEF_PRIORITY; 1969 zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
1917 1970
1918 batch = zone_batchsize(zone); 1971 zone_pcp_init(zone);
1919
1920 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1921#ifdef CONFIG_NUMA
1922 /* Early boot. Slab allocator not functional yet */
1923 zone->pageset[cpu] = &boot_pageset[cpu];
1924 setup_pageset(&boot_pageset[cpu],0);
1925#else
1926 setup_pageset(zone_pcp(zone,cpu), batch);
1927#endif
1928 }
1929 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
1930 zone_names[j], realsize, batch);
1931 INIT_LIST_HEAD(&zone->active_list); 1972 INIT_LIST_HEAD(&zone->active_list);
1932 INIT_LIST_HEAD(&zone->inactive_list); 1973 INIT_LIST_HEAD(&zone->inactive_list);
1933 zone->nr_scan_active = 0; 1974 zone->nr_scan_active = 0;
@@ -1938,32 +1979,9 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
1938 if (!size) 1979 if (!size)
1939 continue; 1980 continue;
1940 1981
1941 /*
1942 * The per-page waitqueue mechanism uses hashed waitqueues
1943 * per zone.
1944 */
1945 zone->wait_table_size = wait_table_size(size);
1946 zone->wait_table_bits =
1947 wait_table_bits(zone->wait_table_size);
1948 zone->wait_table = (wait_queue_head_t *)
1949 alloc_bootmem_node(pgdat, zone->wait_table_size
1950 * sizeof(wait_queue_head_t));
1951
1952 for(i = 0; i < zone->wait_table_size; ++i)
1953 init_waitqueue_head(zone->wait_table + i);
1954
1955 pgdat->nr_zones = j+1;
1956
1957 zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1958 zone->zone_start_pfn = zone_start_pfn;
1959
1960 memmap_init(size, nid, j, zone_start_pfn);
1961
1962 zonetable_add(zone, nid, j, zone_start_pfn, size); 1982 zonetable_add(zone, nid, j, zone_start_pfn, size);
1963 1983 init_currently_empty_zone(zone, zone_start_pfn, size);
1964 zone_start_pfn += size; 1984 zone_start_pfn += size;
1965
1966 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
1967 } 1985 }
1968} 1986}
1969 1987