aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <haveblue@us.ibm.com>2005-06-23 03:07:40 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-23 12:45:01 -0400
commit348f8b6c4837a07304d2f72b11ce8d96588065e0 (patch)
treef4c6c332b2c327630b284598325dff2f44e6c9cf
parent6f167ec721108c9282d54424516a12c805e3c306 (diff)
[PATCH] sparsemem base: reorganize page->flags bit operations
Generify the value fields in the page_flags. The aim is to allow the location and size of these fields to be varied. Additionally we want to move away from fixed allocations per field whilst still enforcing the overall bit utilisation limits. We rely on the compiler to spot and optimise the accessor functions. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/mm.h53
-rw-r--r--include/linux/mmzone.h19
-rw-r--r--mm/page_alloc.c2
3 files changed, 52 insertions, 22 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1813b162b0a8..57b2ead51dba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -395,19 +395,41 @@ static inline void put_page(struct page *page)
395/* 395/*
396 * The zone field is never updated after free_area_init_core() 396 * The zone field is never updated after free_area_init_core()
397 * sets it, so none of the operations on it need to be atomic. 397 * sets it, so none of the operations on it need to be atomic.
398 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
399 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
400 */ 398 */
401#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT) 399
400/* Page flags: | NODE | ZONE | ... | FLAGS | */
401#define NODES_PGOFF ((sizeof(page_flags_t)*8) - NODES_SHIFT)
402#define ZONES_PGOFF (NODES_PGOFF - ZONES_SHIFT)
403
404/*
405 * Define the bit shifts to access each section. For non-existant
406 * sections we define the shift as 0; that plus a 0 mask ensures
407 * the compiler will optimise away reference to them.
408 */
409#define NODES_PGSHIFT (NODES_PGOFF * (NODES_SHIFT != 0))
410#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_SHIFT != 0))
411
412/* NODE:ZONE is used to lookup the zone from a page. */
413#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT)
414#define ZONETABLE_PGSHIFT ZONES_PGSHIFT
415
416#if NODES_SHIFT+ZONES_SHIFT > FLAGS_RESERVED
417#error NODES_SHIFT+ZONES_SHIFT > FLAGS_RESERVED
418#endif
419
402#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone) 420#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
403 421
422#define ZONES_MASK ((1UL << ZONES_SHIFT) - 1)
423#define NODES_MASK ((1UL << NODES_SHIFT) - 1)
424#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
425
404static inline unsigned long page_zonenum(struct page *page) 426static inline unsigned long page_zonenum(struct page *page)
405{ 427{
406 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT)); 428 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
407} 429}
408static inline unsigned long page_to_nid(struct page *page) 430static inline unsigned long page_to_nid(struct page *page)
409{ 431{
410 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT)); 432 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
411} 433}
412 434
413struct zone; 435struct zone;
@@ -415,13 +437,26 @@ extern struct zone *zone_table[];
415 437
416static inline struct zone *page_zone(struct page *page) 438static inline struct zone *page_zone(struct page *page)
417{ 439{
418 return zone_table[page->flags >> NODEZONE_SHIFT]; 440 return zone_table[(page->flags >> ZONETABLE_PGSHIFT) &
441 ZONETABLE_MASK];
442}
443
444static inline void set_page_zone(struct page *page, unsigned long zone)
445{
446 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
447 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
448}
449static inline void set_page_node(struct page *page, unsigned long node)
450{
451 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
452 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
419} 453}
420 454
421static inline void set_page_zone(struct page *page, unsigned long nodezone_num) 455static inline void set_page_links(struct page *page, unsigned long zone,
456 unsigned long node)
422{ 457{
423 page->flags &= ~(~0UL << NODEZONE_SHIFT); 458 set_page_zone(page, zone);
424 page->flags |= nodezone_num << NODEZONE_SHIFT; 459 set_page_node(page, node);
425} 460}
426 461
427#ifndef CONFIG_DISCONTIGMEM 462#ifndef CONFIG_DISCONTIGMEM
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b79633d3a97b..39e912708e2a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -414,30 +414,25 @@ extern struct pglist_data contig_page_data;
414 414
415#include <asm/mmzone.h> 415#include <asm/mmzone.h>
416 416
417#endif /* !CONFIG_DISCONTIGMEM */
418
417#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED) 419#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED)
418/* 420/*
419 * with 32 bit page->flags field, we reserve 8 bits for node/zone info. 421 * with 32 bit page->flags field, we reserve 8 bits for node/zone info.
420 * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes. 422 * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
421 */ 423 */
422#define MAX_NODES_SHIFT 6 424#define FLAGS_RESERVED 8
425
423#elif BITS_PER_LONG == 64 426#elif BITS_PER_LONG == 64
424/* 427/*
425 * with 64 bit flags field, there's plenty of room. 428 * with 64 bit flags field, there's plenty of room.
426 */ 429 */
427#define MAX_NODES_SHIFT 10 430#define FLAGS_RESERVED 32
428#endif
429 431
430#endif /* !CONFIG_DISCONTIGMEM */ 432#else
431
432#if NODES_SHIFT > MAX_NODES_SHIFT
433#error NODES_SHIFT > MAX_NODES_SHIFT
434#endif
435 433
436/* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */ 434#error BITS_PER_LONG not defined
437#define MAX_ZONES_SHIFT 2
438 435
439#if ZONES_SHIFT > MAX_ZONES_SHIFT
440#error ZONES_SHIFT > MAX_ZONES_SHIFT
441#endif 436#endif
442 437
443#endif /* !__ASSEMBLY__ */ 438#endif /* !__ASSEMBLY__ */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bf1dd8819097..1958358e29b0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1653,7 +1653,7 @@ void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1653 struct page *page; 1653 struct page *page;
1654 1654
1655 for (page = start; page < (start + size); page++) { 1655 for (page = start; page < (start + size); page++) {
1656 set_page_zone(page, NODEZONE(nid, zone)); 1656 set_page_links(page, zone, nid);
1657 set_page_count(page, 0); 1657 set_page_count(page, 0);
1658 reset_page_mapcount(page); 1658 reset_page_mapcount(page);
1659 SetPageReserved(page); 1659 SetPageReserved(page);