aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-03-28 21:24:50 -0500
committerPaul Mackerras <paulus@samba.org>2006-03-28 21:24:50 -0500
commitbac30d1a78d0f11c613968fc8b351a91ed465386 (patch)
treee52f3c876522a2f6047a6ec1c27df2e8a79486b8 /mm
parente8222502ee6157e2713da9e0792c21f4ad458d50 (diff)
parentca9ba4471c1203bb6e759b76e83167fec54fe590 (diff)
Merge ../linux-2.6
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile2
-rw-r--r--mm/bootmem.c39
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/mmzone.c50
-rw-r--r--mm/page_alloc.c56
-rw-r--r--mm/slab.c4
-rw-r--r--mm/swap.c2
-rw-r--r--mm/vmscan.c6
8 files changed, 135 insertions, 26 deletions
diff --git a/mm/Makefile b/mm/Makefile
index f10c753dce6d..0b8f73f2ed16 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -10,7 +10,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ 10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11 page_alloc.o page-writeback.o pdflush.o \ 11 page_alloc.o page-writeback.o pdflush.o \
12 readahead.o swap.o truncate.o vmscan.o \ 12 readahead.o swap.o truncate.o vmscan.o \
13 prio_tree.o util.o $(mmu-y) 13 prio_tree.o util.o mmzone.o $(mmu-y)
14 14
15obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o 15obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
16obj-$(CONFIG_HUGETLBFS) += hugetlb.o 16obj-$(CONFIG_HUGETLBFS) += hugetlb.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index b55bd39fc5dd..d3e3bd2ffcea 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -33,6 +33,7 @@ EXPORT_SYMBOL(max_pfn); /* This is exported so
33 * dma_get_required_mask(), which uses 33 * dma_get_required_mask(), which uses
34 * it, can be an inline function */ 34 * it, can be an inline function */
35 35
36static LIST_HEAD(bdata_list);
36#ifdef CONFIG_CRASH_DUMP 37#ifdef CONFIG_CRASH_DUMP
37/* 38/*
38 * If we have booted due to a crash, max_pfn will be a very low value. We need 39 * If we have booted due to a crash, max_pfn will be a very low value. We need
@@ -52,6 +53,27 @@ unsigned long __init bootmem_bootmap_pages (unsigned long pages)
52 53
53 return mapsize; 54 return mapsize;
54} 55}
56/*
57 * link bdata in order
58 */
59static void link_bootmem(bootmem_data_t *bdata)
60{
61 bootmem_data_t *ent;
62 if (list_empty(&bdata_list)) {
63 list_add(&bdata->list, &bdata_list);
64 return;
65 }
66 /* insert in order */
67 list_for_each_entry(ent, &bdata_list, list) {
68 if (bdata->node_boot_start < ent->node_boot_start) {
69 list_add_tail(&bdata->list, &ent->list);
70 return;
71 }
72 }
73 list_add_tail(&bdata->list, &bdata_list);
74 return;
75}
76
55 77
56/* 78/*
57 * Called once to set up the allocator itself. 79 * Called once to set up the allocator itself.
@@ -62,13 +84,11 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
62 bootmem_data_t *bdata = pgdat->bdata; 84 bootmem_data_t *bdata = pgdat->bdata;
63 unsigned long mapsize = ((end - start)+7)/8; 85 unsigned long mapsize = ((end - start)+7)/8;
64 86
65 pgdat->pgdat_next = pgdat_list;
66 pgdat_list = pgdat;
67
68 mapsize = ALIGN(mapsize, sizeof(long)); 87 mapsize = ALIGN(mapsize, sizeof(long));
69 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); 88 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
70 bdata->node_boot_start = (start << PAGE_SHIFT); 89 bdata->node_boot_start = (start << PAGE_SHIFT);
71 bdata->node_low_pfn = end; 90 bdata->node_low_pfn = end;
91 link_bootmem(bdata);
72 92
73 /* 93 /*
74 * Initially all pages are reserved - setup_arch() has to 94 * Initially all pages are reserved - setup_arch() has to
@@ -383,12 +403,11 @@ unsigned long __init free_all_bootmem (void)
383 403
384void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) 404void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal)
385{ 405{
386 pg_data_t *pgdat = pgdat_list; 406 bootmem_data_t *bdata;
387 void *ptr; 407 void *ptr;
388 408
389 for_each_pgdat(pgdat) 409 list_for_each_entry(bdata, &bdata_list, list)
390 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, 410 if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, 0)))
391 align, goal, 0)))
392 return(ptr); 411 return(ptr);
393 412
394 /* 413 /*
@@ -416,11 +435,11 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigne
416 435
417void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) 436void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal)
418{ 437{
419 pg_data_t *pgdat = pgdat_list; 438 bootmem_data_t *bdata;
420 void *ptr; 439 void *ptr;
421 440
422 for_each_pgdat(pgdat) 441 list_for_each_entry(bdata, &bdata_list, list)
423 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, 442 if ((ptr = __alloc_bootmem_core(bdata, size,
424 align, goal, LOW32LIMIT))) 443 align, goal, LOW32LIMIT)))
425 return(ptr); 444 return(ptr);
426 445
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4f71cfd29c6f..dec8249e972d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -912,7 +912,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
912 /* 912 /*
913 * Check if this process has the right to modify the specified 913 * Check if this process has the right to modify the specified
914 * process. The right exists if the process has administrative 914 * process. The right exists if the process has administrative
915 * capabilities, superuser priviledges or the same 915 * capabilities, superuser privileges or the same
916 * userid as the target process. 916 * userid as the target process.
917 */ 917 */
918 if ((current->euid != task->suid) && (current->euid != task->uid) && 918 if ((current->euid != task->suid) && (current->euid != task->uid) &&
diff --git a/mm/mmzone.c b/mm/mmzone.c
new file mode 100644
index 000000000000..b022370e612e
--- /dev/null
+++ b/mm/mmzone.c
@@ -0,0 +1,50 @@
1/*
2 * linux/mm/mmzone.c
3 *
4 * management codes for pgdats and zones.
5 */
6
7
8#include <linux/config.h>
9#include <linux/stddef.h>
10#include <linux/mmzone.h>
11#include <linux/module.h>
12
13struct pglist_data *first_online_pgdat(void)
14{
15 return NODE_DATA(first_online_node);
16}
17
18EXPORT_SYMBOL(first_online_pgdat);
19
20struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
21{
22 int nid = next_online_node(pgdat->node_id);
23
24 if (nid == MAX_NUMNODES)
25 return NULL;
26 return NODE_DATA(nid);
27}
28EXPORT_SYMBOL(next_online_pgdat);
29
30
31/*
32 * next_zone - helper magic for for_each_zone()
33 */
34struct zone *next_zone(struct zone *zone)
35{
36 pg_data_t *pgdat = zone->zone_pgdat;
37
38 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
39 zone++;
40 else {
41 pgdat = next_online_pgdat(pgdat);
42 if (pgdat)
43 zone = pgdat->node_zones;
44 else
45 zone = NULL;
46 }
47 return zone;
48}
49EXPORT_SYMBOL(next_zone);
50
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 338a02bb004d..dc523a1f270d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -49,7 +49,6 @@ nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
49EXPORT_SYMBOL(node_online_map); 49EXPORT_SYMBOL(node_online_map);
50nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 50nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
51EXPORT_SYMBOL(node_possible_map); 51EXPORT_SYMBOL(node_possible_map);
52struct pglist_data *pgdat_list __read_mostly;
53unsigned long totalram_pages __read_mostly; 52unsigned long totalram_pages __read_mostly;
54unsigned long totalhigh_pages __read_mostly; 53unsigned long totalhigh_pages __read_mostly;
55long nr_swap_pages; 54long nr_swap_pages;
@@ -1201,7 +1200,7 @@ unsigned int nr_free_highpages (void)
1201 pg_data_t *pgdat; 1200 pg_data_t *pgdat;
1202 unsigned int pages = 0; 1201 unsigned int pages = 0;
1203 1202
1204 for_each_pgdat(pgdat) 1203 for_each_online_pgdat(pgdat)
1205 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1204 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1206 1205
1207 return pages; 1206 return pages;
@@ -1343,7 +1342,7 @@ void get_zone_counts(unsigned long *active,
1343 *active = 0; 1342 *active = 0;
1344 *inactive = 0; 1343 *inactive = 0;
1345 *free = 0; 1344 *free = 0;
1346 for_each_pgdat(pgdat) { 1345 for_each_online_pgdat(pgdat) {
1347 unsigned long l, m, n; 1346 unsigned long l, m, n;
1348 __get_zone_counts(&l, &m, &n, pgdat); 1347 __get_zone_counts(&l, &m, &n, pgdat);
1349 *active += l; 1348 *active += l;
@@ -2042,7 +2041,6 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
2042 zone_wait_table_init(zone, size); 2041 zone_wait_table_init(zone, size);
2043 pgdat->nr_zones = zone_idx(zone) + 1; 2042 pgdat->nr_zones = zone_idx(zone) + 1;
2044 2043
2045 zone->zone_mem_map = pfn_to_page(zone_start_pfn);
2046 zone->zone_start_pfn = zone_start_pfn; 2044 zone->zone_start_pfn = zone_start_pfn;
2047 2045
2048 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2046 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
@@ -2170,8 +2168,9 @@ static void *frag_start(struct seq_file *m, loff_t *pos)
2170{ 2168{
2171 pg_data_t *pgdat; 2169 pg_data_t *pgdat;
2172 loff_t node = *pos; 2170 loff_t node = *pos;
2173 2171 for (pgdat = first_online_pgdat();
2174 for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next) 2172 pgdat && node;
2173 pgdat = next_online_pgdat(pgdat))
2175 --node; 2174 --node;
2176 2175
2177 return pgdat; 2176 return pgdat;
@@ -2182,7 +2181,7 @@ static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
2182 pg_data_t *pgdat = (pg_data_t *)arg; 2181 pg_data_t *pgdat = (pg_data_t *)arg;
2183 2182
2184 (*pos)++; 2183 (*pos)++;
2185 return pgdat->pgdat_next; 2184 return next_online_pgdat(pgdat);
2186} 2185}
2187 2186
2188static void frag_stop(struct seq_file *m, void *arg) 2187static void frag_stop(struct seq_file *m, void *arg)
@@ -2483,7 +2482,7 @@ static void setup_per_zone_lowmem_reserve(void)
2483 struct pglist_data *pgdat; 2482 struct pglist_data *pgdat;
2484 int j, idx; 2483 int j, idx;
2485 2484
2486 for_each_pgdat(pgdat) { 2485 for_each_online_pgdat(pgdat) {
2487 for (j = 0; j < MAX_NR_ZONES; j++) { 2486 for (j = 0; j < MAX_NR_ZONES; j++) {
2488 struct zone *zone = pgdat->node_zones + j; 2487 struct zone *zone = pgdat->node_zones + j;
2489 unsigned long present_pages = zone->present_pages; 2488 unsigned long present_pages = zone->present_pages;
@@ -2745,3 +2744,44 @@ void *__init alloc_large_system_hash(const char *tablename,
2745 2744
2746 return table; 2745 return table;
2747} 2746}
2747
2748#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
2749/*
2750 * pfn <-> page translation. out-of-line version.
2751 * (see asm-generic/memory_model.h)
2752 */
2753#if defined(CONFIG_FLATMEM)
2754struct page *pfn_to_page(unsigned long pfn)
2755{
2756 return mem_map + (pfn - ARCH_PFN_OFFSET);
2757}
2758unsigned long page_to_pfn(struct page *page)
2759{
2760 return (page - mem_map) + ARCH_PFN_OFFSET;
2761}
2762#elif defined(CONFIG_DISCONTIGMEM)
2763struct page *pfn_to_page(unsigned long pfn)
2764{
2765 int nid = arch_pfn_to_nid(pfn);
2766 return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid);
2767}
2768unsigned long page_to_pfn(struct page *page)
2769{
2770 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
2771 return (page - pgdat->node_mem_map) + pgdat->node_start_pfn;
2772}
2773#elif defined(CONFIG_SPARSEMEM)
2774struct page *pfn_to_page(unsigned long pfn)
2775{
2776 return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn;
2777}
2778
2779unsigned long page_to_pfn(struct page *page)
2780{
2781 long section_id = page_to_section(page);
2782 return page - __section_mem_map_addr(__nr_to_section(section_id));
2783}
2784#endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */
2785EXPORT_SYMBOL(pfn_to_page);
2786EXPORT_SYMBOL(page_to_pfn);
2787#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
diff --git a/mm/slab.c b/mm/slab.c
index 681837499d7d..4cbf8bb13557 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3311,7 +3311,7 @@ void *__alloc_percpu(size_t size)
3311 * and we have no way of figuring out how to fix the array 3311 * and we have no way of figuring out how to fix the array
3312 * that we have allocated then.... 3312 * that we have allocated then....
3313 */ 3313 */
3314 for_each_cpu(i) { 3314 for_each_possible_cpu(i) {
3315 int node = cpu_to_node(i); 3315 int node = cpu_to_node(i);
3316 3316
3317 if (node_online(node)) 3317 if (node_online(node))
@@ -3398,7 +3398,7 @@ void free_percpu(const void *objp)
3398 /* 3398 /*
3399 * We allocate for all cpus so we cannot use for online cpu here. 3399 * We allocate for all cpus so we cannot use for online cpu here.
3400 */ 3400 */
3401 for_each_cpu(i) 3401 for_each_possible_cpu(i)
3402 kfree(p->ptrs[i]); 3402 kfree(p->ptrs[i]);
3403 kfree(p); 3403 kfree(p);
3404} 3404}
diff --git a/mm/swap.c b/mm/swap.c
index 91b7e2026f69..88895c249bc9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -512,7 +512,7 @@ long percpu_counter_sum(struct percpu_counter *fbc)
512 512
513 spin_lock(&fbc->lock); 513 spin_lock(&fbc->lock);
514 ret = fbc->count; 514 ret = fbc->count;
515 for_each_cpu(cpu) { 515 for_each_possible_cpu(cpu) {
516 long *pcount = per_cpu_ptr(fbc->counters, cpu); 516 long *pcount = per_cpu_ptr(fbc->counters, cpu);
517 ret += *pcount; 517 ret += *pcount;
518 } 518 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 78865c849f8f..acdf001d6941 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1305,7 +1305,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1305 1305
1306 current->reclaim_state = &reclaim_state; 1306 current->reclaim_state = &reclaim_state;
1307repeat: 1307repeat:
1308 for_each_pgdat(pgdat) { 1308 for_each_online_pgdat(pgdat) {
1309 unsigned long freed; 1309 unsigned long freed;
1310 1310
1311 freed = balance_pgdat(pgdat, nr_to_free, 0); 1311 freed = balance_pgdat(pgdat, nr_to_free, 0);
@@ -1335,7 +1335,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
1335 cpumask_t mask; 1335 cpumask_t mask;
1336 1336
1337 if (action == CPU_ONLINE) { 1337 if (action == CPU_ONLINE) {
1338 for_each_pgdat(pgdat) { 1338 for_each_online_pgdat(pgdat) {
1339 mask = node_to_cpumask(pgdat->node_id); 1339 mask = node_to_cpumask(pgdat->node_id);
1340 if (any_online_cpu(mask) != NR_CPUS) 1340 if (any_online_cpu(mask) != NR_CPUS)
1341 /* One of our CPUs online: restore mask */ 1341 /* One of our CPUs online: restore mask */
@@ -1351,7 +1351,7 @@ static int __init kswapd_init(void)
1351 pg_data_t *pgdat; 1351 pg_data_t *pgdat;
1352 1352
1353 swap_setup(); 1353 swap_setup();
1354 for_each_pgdat(pgdat) { 1354 for_each_online_pgdat(pgdat) {
1355 pid_t pid; 1355 pid_t pid;
1356 1356
1357 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL); 1357 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);