diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mm.h | 92 | ||||
-rw-r--r-- | include/linux/mmzone.h | 96 | ||||
-rw-r--r-- | include/linux/numa.h | 2 |
3 files changed, 172 insertions, 18 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 57b2ead51dba..6eb7f48317f8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -397,40 +397,80 @@ static inline void put_page(struct page *page) | |||
397 | * sets it, so none of the operations on it need to be atomic. | 397 | * sets it, so none of the operations on it need to be atomic. |
398 | */ | 398 | */ |
399 | 399 | ||
400 | /* Page flags: | NODE | ZONE | ... | FLAGS | */ | 400 | |
401 | #define NODES_PGOFF ((sizeof(page_flags_t)*8) - NODES_SHIFT) | 401 | /* |
402 | #define ZONES_PGOFF (NODES_PGOFF - ZONES_SHIFT) | 402 | * page->flags layout: |
403 | * | ||
404 | * There are three possibilities for how page->flags get | ||
405 | * laid out. The first is for the normal case, without | ||
406 | * sparsemem. The second is for sparsemem when there is | ||
407 | * plenty of space for node and section. The last is when | ||
408 | * we have run out of space and have to fall back to an | ||
409 | * alternate (slower) way of determining the node. | ||
410 | * | ||
411 | * No sparsemem: | NODE | ZONE | ... | FLAGS | | ||
412 | * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | | ||
413 | * no space for node: | SECTION | ZONE | ... | FLAGS | | ||
414 | */ | ||
415 | #ifdef CONFIG_SPARSEMEM | ||
416 | #define SECTIONS_WIDTH SECTIONS_SHIFT | ||
417 | #else | ||
418 | #define SECTIONS_WIDTH 0 | ||
419 | #endif | ||
420 | |||
421 | #define ZONES_WIDTH ZONES_SHIFT | ||
422 | |||
423 | #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED | ||
424 | #define NODES_WIDTH NODES_SHIFT | ||
425 | #else | ||
426 | #define NODES_WIDTH 0 | ||
427 | #endif | ||
428 | |||
429 | /* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ | ||
430 | #define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH) | ||
431 | #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) | ||
432 | #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) | ||
433 | |||
434 | /* | ||
435 | * We are going to use the flags for the page to node mapping if its in | ||
436 | * there. This includes the case where there is no node, so it is implicit. | ||
437 | */ | ||
438 | #define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0) | ||
439 | |||
440 | #ifndef PFN_SECTION_SHIFT | ||
441 | #define PFN_SECTION_SHIFT 0 | ||
442 | #endif | ||
403 | 443 | ||
404 | /* | 444 | /* |
405 | * Define the bit shifts to access each section. For non-existant | 445 | * Define the bit shifts to access each section. For non-existant |
406 | * sections we define the shift as 0; that plus a 0 mask ensures | 446 | * sections we define the shift as 0; that plus a 0 mask ensures |
407 | * the compiler will optimise away reference to them. | 447 | * the compiler will optimise away reference to them. |
408 | */ | 448 | */ |
409 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_SHIFT != 0)) | 449 | #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) |
410 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_SHIFT != 0)) | 450 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) |
451 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) | ||
411 | 452 | ||
412 | /* NODE:ZONE is used to lookup the zone from a page. */ | 453 | /* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */ |
454 | #if FLAGS_HAS_NODE | ||
413 | #define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT) | 455 | #define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT) |
456 | #else | ||
457 | #define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) | ||
458 | #endif | ||
414 | #define ZONETABLE_PGSHIFT ZONES_PGSHIFT | 459 | #define ZONETABLE_PGSHIFT ZONES_PGSHIFT |
415 | 460 | ||
416 | #if NODES_SHIFT+ZONES_SHIFT > FLAGS_RESERVED | 461 | #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED |
417 | #error NODES_SHIFT+ZONES_SHIFT > FLAGS_RESERVED | 462 | #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED |
418 | #endif | 463 | #endif |
419 | 464 | ||
420 | #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone) | 465 | #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) |
421 | 466 | #define NODES_MASK ((1UL << NODES_WIDTH) - 1) | |
422 | #define ZONES_MASK ((1UL << ZONES_SHIFT) - 1) | 467 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) |
423 | #define NODES_MASK ((1UL << NODES_SHIFT) - 1) | ||
424 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) | 468 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) |
425 | 469 | ||
426 | static inline unsigned long page_zonenum(struct page *page) | 470 | static inline unsigned long page_zonenum(struct page *page) |
427 | { | 471 | { |
428 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; | 472 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; |
429 | } | 473 | } |
430 | static inline unsigned long page_to_nid(struct page *page) | ||
431 | { | ||
432 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; | ||
433 | } | ||
434 | 474 | ||
435 | struct zone; | 475 | struct zone; |
436 | extern struct zone *zone_table[]; | 476 | extern struct zone *zone_table[]; |
@@ -441,6 +481,18 @@ static inline struct zone *page_zone(struct page *page) | |||
441 | ZONETABLE_MASK]; | 481 | ZONETABLE_MASK]; |
442 | } | 482 | } |
443 | 483 | ||
484 | static inline unsigned long page_to_nid(struct page *page) | ||
485 | { | ||
486 | if (FLAGS_HAS_NODE) | ||
487 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; | ||
488 | else | ||
489 | return page_zone(page)->zone_pgdat->node_id; | ||
490 | } | ||
491 | static inline unsigned long page_to_section(struct page *page) | ||
492 | { | ||
493 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; | ||
494 | } | ||
495 | |||
444 | static inline void set_page_zone(struct page *page, unsigned long zone) | 496 | static inline void set_page_zone(struct page *page, unsigned long zone) |
445 | { | 497 | { |
446 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); | 498 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); |
@@ -451,12 +503,18 @@ static inline void set_page_node(struct page *page, unsigned long node) | |||
451 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); | 503 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); |
452 | page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; | 504 | page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; |
453 | } | 505 | } |
506 | static inline void set_page_section(struct page *page, unsigned long section) | ||
507 | { | ||
508 | page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); | ||
509 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; | ||
510 | } | ||
454 | 511 | ||
455 | static inline void set_page_links(struct page *page, unsigned long zone, | 512 | static inline void set_page_links(struct page *page, unsigned long zone, |
456 | unsigned long node) | 513 | unsigned long node, unsigned long pfn) |
457 | { | 514 | { |
458 | set_page_zone(page, zone); | 515 | set_page_zone(page, zone); |
459 | set_page_node(page, node); | 516 | set_page_node(page, node); |
517 | set_page_section(page, pfn_to_section_nr(pfn)); | ||
460 | } | 518 | } |
461 | 519 | ||
462 | #ifndef CONFIG_DISCONTIGMEM | 520 | #ifndef CONFIG_DISCONTIGMEM |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6ef07de98d69..19860d317ec2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -269,7 +269,9 @@ typedef struct pglist_data { | |||
269 | struct zone node_zones[MAX_NR_ZONES]; | 269 | struct zone node_zones[MAX_NR_ZONES]; |
270 | struct zonelist node_zonelists[GFP_ZONETYPES]; | 270 | struct zonelist node_zonelists[GFP_ZONETYPES]; |
271 | int nr_zones; | 271 | int nr_zones; |
272 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | ||
272 | struct page *node_mem_map; | 273 | struct page *node_mem_map; |
274 | #endif | ||
273 | struct bootmem_data *bdata; | 275 | struct bootmem_data *bdata; |
274 | unsigned long node_start_pfn; | 276 | unsigned long node_start_pfn; |
275 | unsigned long node_present_pages; /* total number of physical pages */ | 277 | unsigned long node_present_pages; /* total number of physical pages */ |
@@ -284,7 +286,11 @@ typedef struct pglist_data { | |||
284 | 286 | ||
285 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | 287 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) |
286 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | 288 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) |
289 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | ||
287 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) | 290 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
291 | #else | ||
292 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | ||
293 | #endif | ||
288 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 294 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
289 | 295 | ||
290 | extern struct pglist_data *pgdat_list; | 296 | extern struct pglist_data *pgdat_list; |
@@ -416,6 +422,10 @@ extern struct pglist_data contig_page_data; | |||
416 | 422 | ||
417 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 423 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
418 | 424 | ||
425 | #ifdef CONFIG_SPARSEMEM | ||
426 | #include <asm/sparsemem.h> | ||
427 | #endif | ||
428 | |||
419 | #if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED) | 429 | #if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED) |
420 | /* | 430 | /* |
421 | * with 32 bit page->flags field, we reserve 8 bits for node/zone info. | 431 | * with 32 bit page->flags field, we reserve 8 bits for node/zone info. |
@@ -439,6 +449,92 @@ extern struct pglist_data contig_page_data; | |||
439 | #define early_pfn_to_nid(nid) (0UL) | 449 | #define early_pfn_to_nid(nid) (0UL) |
440 | #endif | 450 | #endif |
441 | 451 | ||
452 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | ||
453 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | ||
454 | |||
455 | #ifdef CONFIG_SPARSEMEM | ||
456 | |||
457 | /* | ||
458 | * SECTION_SHIFT #bits space required to store a section # | ||
459 | * | ||
460 | * PA_SECTION_SHIFT physical address to/from section number | ||
461 | * PFN_SECTION_SHIFT pfn to/from section number | ||
462 | */ | ||
463 | #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | ||
464 | |||
465 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) | ||
466 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | ||
467 | |||
468 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) | ||
469 | |||
470 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) | ||
471 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) | ||
472 | |||
473 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | ||
474 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | ||
475 | #endif | ||
476 | |||
477 | struct page; | ||
478 | struct mem_section { | ||
479 | struct page *section_mem_map; | ||
480 | }; | ||
481 | |||
482 | extern struct mem_section mem_section[NR_MEM_SECTIONS]; | ||
483 | |||
484 | /* | ||
485 | * Given a kernel address, find the home node of the underlying memory. | ||
486 | */ | ||
487 | #define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) | ||
488 | |||
489 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | ||
490 | { | ||
491 | return &mem_section[pfn_to_section_nr(pfn)]; | ||
492 | } | ||
493 | |||
494 | #define pfn_to_page(pfn) \ | ||
495 | ({ \ | ||
496 | unsigned long __pfn = (pfn); \ | ||
497 | __pfn_to_section(__pfn)->section_mem_map + __pfn; \ | ||
498 | }) | ||
499 | #define page_to_pfn(page) \ | ||
500 | ({ \ | ||
501 | page - mem_section[page_to_section(page)].section_mem_map; \ | ||
502 | }) | ||
503 | |||
504 | static inline int pfn_valid(unsigned long pfn) | ||
505 | { | ||
506 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | ||
507 | return 0; | ||
508 | return mem_section[pfn_to_section_nr(pfn)].section_mem_map != 0; | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * These are _only_ used during initialisation, therefore they | ||
513 | * can use __initdata ... They could have names to indicate | ||
514 | * this restriction. | ||
515 | */ | ||
516 | #ifdef CONFIG_NUMA | ||
517 | #define pfn_to_nid early_pfn_to_nid | ||
518 | #endif | ||
519 | |||
520 | #define pfn_to_pgdat(pfn) \ | ||
521 | ({ \ | ||
522 | NODE_DATA(pfn_to_nid(pfn)); \ | ||
523 | }) | ||
524 | |||
525 | #define early_pfn_valid(pfn) pfn_valid(pfn) | ||
526 | void sparse_init(void); | ||
527 | #else | ||
528 | #define sparse_init() do {} while (0) | ||
529 | #endif /* CONFIG_SPARSEMEM */ | ||
530 | |||
531 | #ifndef early_pfn_valid | ||
532 | #define early_pfn_valid(pfn) (1) | ||
533 | #endif | ||
534 | |||
535 | void memory_present(int nid, unsigned long start, unsigned long end); | ||
536 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | ||
537 | |||
442 | #endif /* !__ASSEMBLY__ */ | 538 | #endif /* !__ASSEMBLY__ */ |
443 | #endif /* __KERNEL__ */ | 539 | #endif /* __KERNEL__ */ |
444 | #endif /* _LINUX_MMZONE_H */ | 540 | #endif /* _LINUX_MMZONE_H */ |
diff --git a/include/linux/numa.h b/include/linux/numa.h index bd0c8c4e9a95..f0c539bd3cfc 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | 5 | ||
6 | #ifdef CONFIG_DISCONTIGMEM | 6 | #ifndef CONFIG_FLATMEM |
7 | #include <asm/numnodes.h> | 7 | #include <asm/numnodes.h> |
8 | #endif | 8 | #endif |
9 | 9 | ||