aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2005-10-21 03:22:44 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-28 11:16:51 -0400
commit260b23674fdb570f3235ce55892246bef1c24c2a (patch)
tree471e7b546cbf1f7ee4a165e5bd9a2de0770e53be /mm
parentc4cdd038318863e912e9b992489f61497f98b442 (diff)
[PATCH] gfp_t: the rest
zone handling, mapping->flags handling Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/highmem.c14
-rw-r--r--mm/page_alloc.c29
2 files changed, 24 insertions, 19 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index 90e1861e2da0..ce2e7e8bbfa7 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -30,11 +30,9 @@
30 30
31static mempool_t *page_pool, *isa_page_pool; 31static mempool_t *page_pool, *isa_page_pool;
32 32
33static void *page_pool_alloc(gfp_t gfp_mask, void *data) 33static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
34{ 34{
35 unsigned int gfp = gfp_mask | (unsigned int) (long) data; 35 return alloc_page(gfp_mask | GFP_DMA);
36
37 return alloc_page(gfp);
38} 36}
39 37
40static void page_pool_free(void *page, void *data) 38static void page_pool_free(void *page, void *data)
@@ -51,6 +49,12 @@ static void page_pool_free(void *page, void *data)
51 * n means that there are (n-1) current users of it. 49 * n means that there are (n-1) current users of it.
52 */ 50 */
53#ifdef CONFIG_HIGHMEM 51#ifdef CONFIG_HIGHMEM
52
53static void *page_pool_alloc(gfp_t gfp_mask, void *data)
54{
55 return alloc_page(gfp_mask);
56}
57
54static int pkmap_count[LAST_PKMAP]; 58static int pkmap_count[LAST_PKMAP];
55static unsigned int last_pkmap_nr; 59static unsigned int last_pkmap_nr;
56static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); 60static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -267,7 +271,7 @@ int init_emergency_isa_pool(void)
267 if (isa_page_pool) 271 if (isa_page_pool)
268 return 0; 272 return 0;
269 273
270 isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); 274 isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
271 if (!isa_page_pool) 275 if (!isa_page_pool)
272 BUG(); 276 BUG();
273 277
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aa43ae3ab8c9..94c864eac9c4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -734,7 +734,7 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
734 * of the allocation. 734 * of the allocation.
735 */ 735 */
736int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 736int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
737 int classzone_idx, int can_try_harder, int gfp_high) 737 int classzone_idx, int can_try_harder, gfp_t gfp_high)
738{ 738{
739 /* free_pages my go negative - that's OK */ 739 /* free_pages my go negative - that's OK */
740 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 740 long min = mark, free_pages = z->free_pages - (1 << order) + 1;
@@ -777,7 +777,7 @@ struct page * fastcall
777__alloc_pages(gfp_t gfp_mask, unsigned int order, 777__alloc_pages(gfp_t gfp_mask, unsigned int order,
778 struct zonelist *zonelist) 778 struct zonelist *zonelist)
779{ 779{
780 const int wait = gfp_mask & __GFP_WAIT; 780 const gfp_t wait = gfp_mask & __GFP_WAIT;
781 struct zone **zones, *z; 781 struct zone **zones, *z;
782 struct page *page; 782 struct page *page;
783 struct reclaim_state reclaim_state; 783 struct reclaim_state reclaim_state;
@@ -996,7 +996,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
996 * get_zeroed_page() returns a 32-bit address, which cannot represent 996 * get_zeroed_page() returns a 32-bit address, which cannot represent
997 * a highmem page 997 * a highmem page
998 */ 998 */
999 BUG_ON(gfp_mask & __GFP_HIGHMEM); 999 BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1000 1000
1001 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1001 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1002 if (page) 1002 if (page)
@@ -1428,6 +1428,16 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli
1428 return j; 1428 return j;
1429} 1429}
1430 1430
1431static inline int highest_zone(int zone_bits)
1432{
1433 int res = ZONE_NORMAL;
1434 if (zone_bits & (__force int)__GFP_HIGHMEM)
1435 res = ZONE_HIGHMEM;
1436 if (zone_bits & (__force int)__GFP_DMA)
1437 res = ZONE_DMA;
1438 return res;
1439}
1440
1431#ifdef CONFIG_NUMA 1441#ifdef CONFIG_NUMA
1432#define MAX_NODE_LOAD (num_online_nodes()) 1442#define MAX_NODE_LOAD (num_online_nodes())
1433static int __initdata node_load[MAX_NUMNODES]; 1443static int __initdata node_load[MAX_NUMNODES];
@@ -1524,11 +1534,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
1524 zonelist = pgdat->node_zonelists + i; 1534 zonelist = pgdat->node_zonelists + i;
1525 for (j = 0; zonelist->zones[j] != NULL; j++); 1535 for (j = 0; zonelist->zones[j] != NULL; j++);
1526 1536
1527 k = ZONE_NORMAL; 1537 k = highest_zone(i);
1528 if (i & __GFP_HIGHMEM)
1529 k = ZONE_HIGHMEM;
1530 if (i & __GFP_DMA)
1531 k = ZONE_DMA;
1532 1538
1533 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1539 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1534 zonelist->zones[j] = NULL; 1540 zonelist->zones[j] = NULL;
@@ -1549,12 +1555,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
1549 zonelist = pgdat->node_zonelists + i; 1555 zonelist = pgdat->node_zonelists + i;
1550 1556
1551 j = 0; 1557 j = 0;
1552 k = ZONE_NORMAL; 1558 k = highest_zone(i);
1553 if (i & __GFP_HIGHMEM)
1554 k = ZONE_HIGHMEM;
1555 if (i & __GFP_DMA)
1556 k = ZONE_DMA;
1557
1558 j = build_zonelists_node(pgdat, zonelist, j, k); 1559 j = build_zonelists_node(pgdat, zonelist, j, k);
1559 /* 1560 /*
1560 * Now we build the zonelist so that it contains the zones 1561 * Now we build the zonelist so that it contains the zones