diff options
author | Andy Whitcroft <apw@shadowen.org> | 2005-06-23 03:07:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-23 12:45:04 -0400 |
commit | d41dee369bff3b9dcb6328d4d822926c28cc2594 (patch) | |
tree | a0405f3b7af3ebca21838a7d427bd75a067bf850 /include/linux/mm.h | |
parent | af705362ab6018071310c5fcd436a6b457517d5f (diff) |
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 92 |
1 files changed, 75 insertions, 17 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 57b2ead51dba..6eb7f48317f8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -397,40 +397,80 @@ static inline void put_page(struct page *page) | |||
397 | * sets it, so none of the operations on it need to be atomic. | 397 | * sets it, so none of the operations on it need to be atomic. |
398 | */ | 398 | */ |
399 | 399 | ||
400 | /* Page flags: | NODE | ZONE | ... | FLAGS | */ | 400 | |
401 | #define NODES_PGOFF ((sizeof(page_flags_t)*8) - NODES_SHIFT) | 401 | /* |
402 | #define ZONES_PGOFF (NODES_PGOFF - ZONES_SHIFT) | 402 | * page->flags layout: |
403 | * | ||
404 | * There are three possibilities for how page->flags get | ||
405 | * laid out. The first is for the normal case, without | ||
406 | * sparsemem. The second is for sparsemem when there is | ||
407 | * plenty of space for node and section. The last is when | ||
408 | * we have run out of space and have to fall back to an | ||
409 | * alternate (slower) way of determining the node. | ||
410 | * | ||
411 | * No sparsemem: | NODE | ZONE | ... | FLAGS | | ||
412 | * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | | ||
413 | * no space for node: | SECTION | ZONE | ... | FLAGS | | ||
414 | */ | ||
415 | #ifdef CONFIG_SPARSEMEM | ||
416 | #define SECTIONS_WIDTH SECTIONS_SHIFT | ||
417 | #else | ||
418 | #define SECTIONS_WIDTH 0 | ||
419 | #endif | ||
420 | |||
421 | #define ZONES_WIDTH ZONES_SHIFT | ||
422 | |||
423 | #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED | ||
424 | #define NODES_WIDTH NODES_SHIFT | ||
425 | #else | ||
426 | #define NODES_WIDTH 0 | ||
427 | #endif | ||
428 | |||
429 | /* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ | ||
430 | #define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH) | ||
431 | #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) | ||
432 | #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) | ||
433 | |||
434 | /* | ||
435 | * We are going to use the flags for the page to node mapping if its in | ||
436 | * there. This includes the case where there is no node, so it is implicit. | ||
437 | */ | ||
438 | #define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0) | ||
439 | |||
440 | #ifndef PFN_SECTION_SHIFT | ||
441 | #define PFN_SECTION_SHIFT 0 | ||
442 | #endif | ||
403 | 443 | ||
404 | /* | 444 | /* |
405 | * Define the bit shifts to access each section. For non-existant | 445 | * Define the bit shifts to access each section. For non-existant |
406 | * sections we define the shift as 0; that plus a 0 mask ensures | 446 | * sections we define the shift as 0; that plus a 0 mask ensures |
407 | * the compiler will optimise away reference to them. | 447 | * the compiler will optimise away reference to them. |
408 | */ | 448 | */ |
409 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_SHIFT != 0)) | 449 | #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) |
410 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_SHIFT != 0)) | 450 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) |
451 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) | ||
411 | 452 | ||
412 | /* NODE:ZONE is used to lookup the zone from a page. */ | 453 | /* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */ |
454 | #if FLAGS_HAS_NODE | ||
413 | #define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT) | 455 | #define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT) |
456 | #else | ||
457 | #define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) | ||
458 | #endif | ||
414 | #define ZONETABLE_PGSHIFT ZONES_PGSHIFT | 459 | #define ZONETABLE_PGSHIFT ZONES_PGSHIFT |
415 | 460 | ||
416 | #if NODES_SHIFT+ZONES_SHIFT > FLAGS_RESERVED | 461 | #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED |
417 | #error NODES_SHIFT+ZONES_SHIFT > FLAGS_RESERVED | 462 | #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED |
418 | #endif | 463 | #endif |
419 | 464 | ||
420 | #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone) | 465 | #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) |
421 | 466 | #define NODES_MASK ((1UL << NODES_WIDTH) - 1) | |
422 | #define ZONES_MASK ((1UL << ZONES_SHIFT) - 1) | 467 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) |
423 | #define NODES_MASK ((1UL << NODES_SHIFT) - 1) | ||
424 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) | 468 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) |
425 | 469 | ||
426 | static inline unsigned long page_zonenum(struct page *page) | 470 | static inline unsigned long page_zonenum(struct page *page) |
427 | { | 471 | { |
428 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; | 472 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; |
429 | } | 473 | } |
430 | static inline unsigned long page_to_nid(struct page *page) | ||
431 | { | ||
432 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; | ||
433 | } | ||
434 | 474 | ||
435 | struct zone; | 475 | struct zone; |
436 | extern struct zone *zone_table[]; | 476 | extern struct zone *zone_table[]; |
@@ -441,6 +481,18 @@ static inline struct zone *page_zone(struct page *page) | |||
441 | ZONETABLE_MASK]; | 481 | ZONETABLE_MASK]; |
442 | } | 482 | } |
443 | 483 | ||
484 | static inline unsigned long page_to_nid(struct page *page) | ||
485 | { | ||
486 | if (FLAGS_HAS_NODE) | ||
487 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; | ||
488 | else | ||
489 | return page_zone(page)->zone_pgdat->node_id; | ||
490 | } | ||
491 | static inline unsigned long page_to_section(struct page *page) | ||
492 | { | ||
493 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; | ||
494 | } | ||
495 | |||
444 | static inline void set_page_zone(struct page *page, unsigned long zone) | 496 | static inline void set_page_zone(struct page *page, unsigned long zone) |
445 | { | 497 | { |
446 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); | 498 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); |
@@ -451,12 +503,18 @@ static inline void set_page_node(struct page *page, unsigned long node) | |||
451 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); | 503 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); |
452 | page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; | 504 | page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; |
453 | } | 505 | } |
506 | static inline void set_page_section(struct page *page, unsigned long section) | ||
507 | { | ||
508 | page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); | ||
509 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; | ||
510 | } | ||
454 | 511 | ||
455 | static inline void set_page_links(struct page *page, unsigned long zone, | 512 | static inline void set_page_links(struct page *page, unsigned long zone, |
456 | unsigned long node) | 513 | unsigned long node, unsigned long pfn) |
457 | { | 514 | { |
458 | set_page_zone(page, zone); | 515 | set_page_zone(page, zone); |
459 | set_page_node(page, node); | 516 | set_page_node(page, node); |
517 | set_page_section(page, pfn_to_section_nr(pfn)); | ||
460 | } | 518 | } |
461 | 519 | ||
462 | #ifndef CONFIG_DISCONTIGMEM | 520 | #ifndef CONFIG_DISCONTIGMEM |