aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2006-06-23 05:03:21 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:42:49 -0400
commit929f97276bcf7f4a95272ed08a85339b98ba210d (patch)
tree4975698af9559279c83e4e268213ed13e3efee9a
parent833423143c3a7c6545e409d65febd0d92deb351b (diff)
[PATCH] change gen_pool allocator to not touch managed memory
Modify the gen_pool allocator (lib/genalloc.c) to utilize a bitmap scheme instead of the buddy scheme. The purpose of this change is to eliminate the touching of the actual memory being allocated. Since the change modifies the interface, a change to the uncached allocator (arch/ia64/kernel/uncached.c) is also required. Both Andrey Volkov and Jes Sorenson have expressed a desire that the gen_pool allocator not write to the memory being managed. See the following: http://marc.theaimsgroup.com/?l=linux-kernel&m=113518602713125&w=2 http://marc.theaimsgroup.com/?l=linux-kernel&m=113533568827916&w=2 Signed-off-by: Dean Nelson <dcn@sgi.com> Cc: Andrey Volkov <avolkov@varma-el.com> Acked-by: Jes Sorensen <jes@trained-monkey.org> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/ia64/kernel/uncached.c200
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c15
-rw-r--r--include/linux/genalloc.h35
-rw-r--r--lib/genalloc.c263
4 files changed, 252 insertions, 261 deletions
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index fcd2bad0286f..5f03b9e524dd 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. 2 * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License 5 * under the terms of version 2 of the GNU General Public License
@@ -29,15 +29,8 @@
29#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
30#include <asm/sn/arch.h> 30#include <asm/sn/arch.h>
31 31
32#define DEBUG 0
33 32
34#if DEBUG 33extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
35#define dprintk printk
36#else
37#define dprintk(x...) do { } while (0)
38#endif
39
40void __init efi_memmap_walk_uc (efi_freemem_callback_t callback);
41 34
42#define MAX_UNCACHED_GRANULES 5 35#define MAX_UNCACHED_GRANULES 5
43static int allocated_granules; 36static int allocated_granules;
@@ -60,6 +53,7 @@ static void uncached_ipi_visibility(void *data)
60static void uncached_ipi_mc_drain(void *data) 53static void uncached_ipi_mc_drain(void *data)
61{ 54{
62 int status; 55 int status;
56
63 status = ia64_pal_mc_drain(); 57 status = ia64_pal_mc_drain();
64 if (status) 58 if (status)
65 printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " 59 printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
@@ -67,30 +61,35 @@ static void uncached_ipi_mc_drain(void *data)
67} 61}
68 62
69 63
70static unsigned long 64/*
71uncached_get_new_chunk(struct gen_pool *poolp) 65 * Add a new chunk of uncached memory pages to the specified pool.
66 *
67 * @pool: pool to add new chunk of uncached memory to
68 * @nid: node id of node to allocate memory from, or -1
69 *
70 * This is accomplished by first allocating a granule of cached memory pages
71 * and then converting them to uncached memory pages.
72 */
73static int uncached_add_chunk(struct gen_pool *pool, int nid)
72{ 74{
73 struct page *page; 75 struct page *page;
74 void *tmp;
75 int status, i; 76 int status, i;
76 unsigned long addr, node; 77 unsigned long c_addr, uc_addr;
77 78
78 if (allocated_granules >= MAX_UNCACHED_GRANULES) 79 if (allocated_granules >= MAX_UNCACHED_GRANULES)
79 return 0; 80 return -1;
81
82 /* attempt to allocate a granule's worth of cached memory pages */
80 83
81 node = poolp->private; 84 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
82 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO,
83 IA64_GRANULE_SHIFT-PAGE_SHIFT); 85 IA64_GRANULE_SHIFT-PAGE_SHIFT);
86 if (!page)
87 return -1;
84 88
85 dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n", 89 /* convert the memory pages from cached to uncached */
86 page, (unsigned long)(page-vmem_map) << PAGE_SHIFT);
87 90
88 /* 91 c_addr = (unsigned long)page_address(page);
89 * Do magic if no mem on local node! XXX 92 uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
90 */
91 if (!page)
92 return 0;
93 tmp = page_address(page);
94 93
95 /* 94 /*
96 * There's a small race here where it's possible for someone to 95 * There's a small race here where it's possible for someone to
@@ -100,76 +99,90 @@ uncached_get_new_chunk(struct gen_pool *poolp)
100 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) 99 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
101 SetPageUncached(&page[i]); 100 SetPageUncached(&page[i]);
102 101
103 flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE); 102 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
104 103
105 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 104 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
106
107 dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
108 status, raw_smp_processor_id());
109
110 if (!status) { 105 if (!status) {
111 status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); 106 status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
112 if (status) 107 if (status)
113 printk(KERN_WARNING "smp_call_function failed for " 108 goto failed;
114 "uncached_ipi_visibility! (%i)\n", status);
115 } 109 }
116 110
111 preempt_disable();
112
117 if (ia64_platform_is("sn2")) 113 if (ia64_platform_is("sn2"))
118 sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE); 114 sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
119 else 115 else
120 flush_icache_range((unsigned long)tmp, 116 flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
121 (unsigned long)tmp+IA64_GRANULE_SIZE); 117
118 /* flush the just introduced uncached translation from the TLB */
119 local_flush_tlb_all();
120
121 preempt_enable();
122 122
123 ia64_pal_mc_drain(); 123 ia64_pal_mc_drain();
124 status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); 124 status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
125 if (status) 125 if (status)
126 printk(KERN_WARNING "smp_call_function failed for " 126 goto failed;
127 "uncached_ipi_mc_drain! (%i)\n", status);
128 127
129 addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; 128 /*
129 * The chunk of memory pages has been converted to uncached so now we
130 * can add it to the pool.
131 */
132 status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
133 if (status)
134 goto failed;
130 135
131 allocated_granules++; 136 allocated_granules++;
132 return addr; 137 return 0;
138
139 /* failed to convert or add the chunk so give it back to the kernel */
140failed:
141 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
142 ClearPageUncached(&page[i]);
143
144 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
145 return -1;
133} 146}
134 147
135 148
136/* 149/*
137 * uncached_alloc_page 150 * uncached_alloc_page
138 * 151 *
152 * @starting_nid: node id of node to start with, or -1
153 *
139 * Allocate 1 uncached page. Allocates on the requested node. If no 154 * Allocate 1 uncached page. Allocates on the requested node. If no
140 * uncached pages are available on the requested node, roundrobin starting 155 * uncached pages are available on the requested node, roundrobin starting
141 * with higher nodes. 156 * with the next higher node.
142 */ 157 */
143unsigned long 158unsigned long uncached_alloc_page(int starting_nid)
144uncached_alloc_page(int nid)
145{ 159{
146 unsigned long maddr; 160 unsigned long uc_addr;
161 struct gen_pool *pool;
162 int nid;
147 163
148 maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE); 164 if (unlikely(starting_nid >= MAX_NUMNODES))
165 return 0;
149 166
150 dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n", 167 if (starting_nid < 0)
151 maddr, nid); 168 starting_nid = numa_node_id();
169 nid = starting_nid;
152 170
153 /* 171 do {
154 * If no memory is availble on our local node, try the 172 if (!node_online(nid))
155 * remaining nodes in the system. 173 continue;
156 */ 174 pool = uncached_pool[nid];
157 if (!maddr) { 175 if (pool == NULL)
158 int i; 176 continue;
159 177 do {
160 for (i = MAX_NUMNODES - 1; i >= 0; i--) { 178 uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
161 if (i == nid || !node_online(i)) 179 if (uc_addr != 0)
162 continue; 180 return uc_addr;
163 maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE); 181 } while (uncached_add_chunk(pool, nid) == 0);
164 dprintk(KERN_DEBUG "uncached_alloc_page alternate search " 182
165 "returns %lx on node %i\n", maddr, i); 183 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
166 if (maddr) {
167 break;
168 }
169 }
170 }
171 184
172 return maddr; 185 return 0;
173} 186}
174EXPORT_SYMBOL(uncached_alloc_page); 187EXPORT_SYMBOL(uncached_alloc_page);
175 188
@@ -177,21 +190,22 @@ EXPORT_SYMBOL(uncached_alloc_page);
177/* 190/*
178 * uncached_free_page 191 * uncached_free_page
179 * 192 *
193 * @uc_addr: uncached address of page to free
194 *
180 * Free a single uncached page. 195 * Free a single uncached page.
181 */ 196 */
182void 197void uncached_free_page(unsigned long uc_addr)
183uncached_free_page(unsigned long maddr)
184{ 198{
185 int node; 199 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
186 200 struct gen_pool *pool = uncached_pool[nid];
187 node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET);
188 201
189 dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node); 202 if (unlikely(pool == NULL))
203 return;
190 204
191 if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) 205 if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
192 panic("uncached_free_page invalid address %lx\n", maddr); 206 panic("uncached_free_page invalid address %lx\n", uc_addr);
193 207
194 gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE); 208 gen_pool_free(pool, uc_addr, PAGE_SIZE);
195} 209}
196EXPORT_SYMBOL(uncached_free_page); 210EXPORT_SYMBOL(uncached_free_page);
197 211
@@ -199,43 +213,39 @@ EXPORT_SYMBOL(uncached_free_page);
199/* 213/*
200 * uncached_build_memmap, 214 * uncached_build_memmap,
201 * 215 *
216 * @uc_start: uncached starting address of a chunk of uncached memory
217 * @uc_end: uncached ending address of a chunk of uncached memory
218 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
219 *
202 * Called at boot time to build a map of pages that can be used for 220 * Called at boot time to build a map of pages that can be used for
203 * memory special operations. 221 * memory special operations.
204 */ 222 */
205static int __init 223static int __init uncached_build_memmap(unsigned long uc_start,
206uncached_build_memmap(unsigned long start, unsigned long end, void *arg) 224 unsigned long uc_end, void *arg)
207{ 225{
208 long length = end - start; 226 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
209 int node; 227 struct gen_pool *pool = uncached_pool[nid];
210 228 size_t size = uc_end - uc_start;
211 dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
212 229
213 touch_softlockup_watchdog(); 230 touch_softlockup_watchdog();
214 memset((char *)start, 0, length);
215 231
216 node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); 232 if (pool != NULL) {
217 233 memset((char *)uc_start, 0, size);
218 for (; start < end ; start += PAGE_SIZE) { 234 (void) gen_pool_add(pool, uc_start, size, nid);
219 dprintk(KERN_INFO "sticking %lx into the pool!\n", start);
220 gen_pool_free(uncached_pool[node], start, PAGE_SIZE);
221 } 235 }
222
223 return 0; 236 return 0;
224} 237}
225 238
226 239
227static int __init uncached_init(void) { 240static int __init uncached_init(void)
228 int i; 241{
242 int nid;
229 243
230 for (i = 0; i < MAX_NUMNODES; i++) { 244 for_each_online_node(nid) {
231 if (!node_online(i)) 245 uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
232 continue;
233 uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT,
234 &uncached_get_new_chunk, i);
235 } 246 }
236 247
237 efi_memmap_walk_uc(uncached_build_memmap); 248 efi_memmap_walk_uc(uncached_build_memmap, NULL);
238
239 return 0; 249 return 0;
240} 250}
241 251
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
index bc3cfa17cd0f..2862cb33026d 100644
--- a/arch/ia64/sn/kernel/sn2/cache.c
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -3,11 +3,12 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved.
7 * 7 *
8 */ 8 */
9#include <linux/module.h> 9#include <linux/module.h>
10#include <asm/pgalloc.h> 10#include <asm/pgalloc.h>
11#include <asm/sn/arch.h>
11 12
12/** 13/**
13 * sn_flush_all_caches - flush a range of address from all caches (incl. L4) 14 * sn_flush_all_caches - flush a range of address from all caches (incl. L4)
@@ -17,18 +18,24 @@
17 * Flush a range of addresses from all caches including L4. 18 * Flush a range of addresses from all caches including L4.
18 * All addresses fully or partially contained within 19 * All addresses fully or partially contained within
19 * @flush_addr to @flush_addr + @bytes are flushed 20 * @flush_addr to @flush_addr + @bytes are flushed
20 * from the all caches. 21 * from all caches.
21 */ 22 */
22void 23void
23sn_flush_all_caches(long flush_addr, long bytes) 24sn_flush_all_caches(long flush_addr, long bytes)
24{ 25{
25 flush_icache_range(flush_addr, flush_addr+bytes); 26 unsigned long addr = flush_addr;
27
28 /* SHub1 requires a cached address */
29 if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED))
30 addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL);
31
32 flush_icache_range(addr, addr + bytes);
26 /* 33 /*
27 * The last call may have returned before the caches 34 * The last call may have returned before the caches
28 * were actually flushed, so we call it again to make 35 * were actually flushed, so we call it again to make
29 * sure. 36 * sure.
30 */ 37 */
31 flush_icache_range(flush_addr, flush_addr+bytes); 38 flush_icache_range(addr, addr + bytes);
32 mb(); 39 mb();
33} 40}
34EXPORT_SYMBOL(sn_flush_all_caches); 41EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 7fd0576a4454..690c42803d2e 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -4,37 +4,32 @@
4 * Uses for this includes on-device special memory, uncached memory 4 * Uses for this includes on-device special memory, uncached memory
5 * etc. 5 * etc.
6 * 6 *
7 * This code is based on the buddy allocator found in the sym53c8xx_2
8 * driver, adapted for general purpose use.
9 *
10 * This source code is licensed under the GNU General Public License, 7 * This source code is licensed under the GNU General Public License,
11 * Version 2. See the file COPYING for more details. 8 * Version 2. See the file COPYING for more details.
12 */ 9 */
13 10
14#include <linux/spinlock.h>
15 11
16#define ALLOC_MIN_SHIFT 5 /* 32 bytes minimum */
17/* 12/*
18 * Link between free memory chunks of a given size. 13 * General purpose special memory pool descriptor.
19 */ 14 */
20struct gen_pool_link { 15struct gen_pool {
21 struct gen_pool_link *next; 16 rwlock_t lock;
17 struct list_head chunks; /* list of chunks in this pool */
18 int min_alloc_order; /* minimum allocation order */
22}; 19};
23 20
24/* 21/*
25 * Memory pool descriptor. 22 * General purpose special memory pool chunk descriptor.
26 */ 23 */
27struct gen_pool { 24struct gen_pool_chunk {
28 spinlock_t lock; 25 spinlock_t lock;
29 unsigned long (*get_new_chunk)(struct gen_pool *); 26 struct list_head next_chunk; /* next chunk in pool */
30 struct gen_pool *next; 27 unsigned long start_addr; /* starting address of memory chunk */
31 struct gen_pool_link *h; 28 unsigned long end_addr; /* ending address of memory chunk */
32 unsigned long private; 29 unsigned long bits[0]; /* bitmap for allocating memory chunk */
33 int max_chunk_shift;
34}; 30};
35 31
36unsigned long gen_pool_alloc(struct gen_pool *poolp, int size); 32extern struct gen_pool *gen_pool_create(int, int);
37void gen_pool_free(struct gen_pool *mp, unsigned long ptr, int size); 33extern int gen_pool_add(struct gen_pool *, unsigned long, size_t, int);
38struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift, 34extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
39 unsigned long (*fp)(struct gen_pool *), 35extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
40 unsigned long data);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 9ce0a6a3b85a..71338b48e889 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -4,10 +4,6 @@
4 * Uses for this includes on-device special memory, uncached memory 4 * Uses for this includes on-device special memory, uncached memory
5 * etc. 5 * etc.
6 * 6 *
7 * This code is based on the buddy allocator found in the sym53c8xx_2
8 * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
9 * and adapted for general purpose use.
10 *
11 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> 7 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
12 * 8 *
13 * This source code is licensed under the GNU General Public License, 9 * This source code is licensed under the GNU General Public License,
@@ -15,172 +11,155 @@
15 */ 11 */
16 12
17#include <linux/module.h> 13#include <linux/module.h>
18#include <linux/stddef.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/mm.h>
24#include <linux/spinlock.h>
25#include <linux/genalloc.h> 14#include <linux/genalloc.h>
26 15
27#include <asm/page.h>
28
29 16
30struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift, 17/*
31 unsigned long (*fp)(struct gen_pool *), 18 * Create a new special memory pool.
32 unsigned long data) 19 *
20 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
21 * @nid: node id of the node the pool structure should be allocated on, or -1
22 */
23struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
33{ 24{
34 struct gen_pool *poolp; 25 struct gen_pool *pool;
35 unsigned long tmp;
36 int i;
37
38 /*
39 * This is really an arbitrary limit, +10 is enough for
40 * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
41 * this can be increased without problems.
42 */
43 if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
44 ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
45 return NULL;
46
47 if (!max_chunk_shift)
48 max_chunk_shift = PAGE_SHIFT;
49
50 poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
51 if (!poolp)
52 return NULL;
53 memset(poolp, 0, sizeof(struct gen_pool));
54 poolp->h = kmalloc(sizeof(struct gen_pool_link) *
55 (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
56 GFP_KERNEL);
57 if (!poolp->h) {
58 printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
59 kfree(poolp);
60 return NULL;
61 }
62 memset(poolp->h, 0, sizeof(struct gen_pool_link) *
63 (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
64
65 spin_lock_init(&poolp->lock);
66 poolp->get_new_chunk = fp;
67 poolp->max_chunk_shift = max_chunk_shift;
68 poolp->private = data;
69
70 for (i = 0; i < nr_chunks; i++) {
71 tmp = poolp->get_new_chunk(poolp);
72 printk(KERN_INFO "allocated %lx\n", tmp);
73 if (!tmp)
74 break;
75 gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
76 }
77 26
78 return poolp; 27 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
28 if (pool != NULL) {
29 rwlock_init(&pool->lock);
30 INIT_LIST_HEAD(&pool->chunks);
31 pool->min_alloc_order = min_alloc_order;
32 }
33 return pool;
79} 34}
80EXPORT_SYMBOL(gen_pool_create); 35EXPORT_SYMBOL(gen_pool_create);
81 36
82 37
83/* 38/*
84 * Simple power of two buddy-like generic allocator. 39 * Add a new chunk of memory to the specified pool.
85 * Provides naturally aligned memory chunks. 40 *
41 * @pool: pool to add new memory chunk to
42 * @addr: starting address of memory chunk to add to pool
43 * @size: size in bytes of the memory chunk to add to pool
44 * @nid: node id of the node the chunk structure and bitmap should be
45 * allocated on, or -1
86 */ 46 */
87unsigned long gen_pool_alloc(struct gen_pool *poolp, int size) 47int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
48 int nid)
88{ 49{
89 int j, i, s, max_chunk_size; 50 struct gen_pool_chunk *chunk;
90 unsigned long a, flags; 51 int nbits = size >> pool->min_alloc_order;
91 struct gen_pool_link *h = poolp->h; 52 int nbytes = sizeof(struct gen_pool_chunk) +
53 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
92 54
93 max_chunk_size = 1 << poolp->max_chunk_shift; 55 chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
56 if (unlikely(chunk == NULL))
57 return -1;
94 58
95 if (size > max_chunk_size) 59 memset(chunk, 0, nbytes);
96 return 0; 60 spin_lock_init(&chunk->lock);
61 chunk->start_addr = addr;
62 chunk->end_addr = addr + size;
97 63
98 size = max(size, 1 << ALLOC_MIN_SHIFT); 64 write_lock(&pool->lock);
99 i = fls(size - 1); 65 list_add(&chunk->next_chunk, &pool->chunks);
100 s = 1 << i; 66 write_unlock(&pool->lock);
101 j = i -= ALLOC_MIN_SHIFT; 67
102 68 return 0;
103 spin_lock_irqsave(&poolp->lock, flags);
104 while (!h[j].next) {
105 if (s == max_chunk_size) {
106 struct gen_pool_link *ptr;
107 spin_unlock_irqrestore(&poolp->lock, flags);
108 ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
109 spin_lock_irqsave(&poolp->lock, flags);
110 h[j].next = ptr;
111 if (h[j].next)
112 h[j].next->next = NULL;
113 break;
114 }
115 j++;
116 s <<= 1;
117 }
118 a = (unsigned long) h[j].next;
119 if (a) {
120 h[j].next = h[j].next->next;
121 /*
122 * This should be split into a seperate function doing
123 * the chunk split in order to support custom
124 * handling memory not physically accessible by host
125 */
126 while (j > i) {
127 j -= 1;
128 s >>= 1;
129 h[j].next = (struct gen_pool_link *) (a + s);
130 h[j].next->next = NULL;
131 }
132 }
133 spin_unlock_irqrestore(&poolp->lock, flags);
134 return a;
135} 69}
136EXPORT_SYMBOL(gen_pool_alloc); 70EXPORT_SYMBOL(gen_pool_add);
137 71
138 72
139/* 73/*
140 * Counter-part of the generic allocator. 74 * Allocate the requested number of bytes from the specified pool.
75 * Uses a first-fit algorithm.
76 *
77 * @pool: pool to allocate from
78 * @size: number of bytes to allocate from the pool
141 */ 79 */
142void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size) 80unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
143{ 81{
144 struct gen_pool_link *q; 82 struct list_head *_chunk;
145 struct gen_pool_link *h = poolp->h; 83 struct gen_pool_chunk *chunk;
146 unsigned long a, b, flags; 84 unsigned long addr, flags;
147 int i, s, max_chunk_size; 85 int order = pool->min_alloc_order;
148 86 int nbits, bit, start_bit, end_bit;
149 max_chunk_size = 1 << poolp->max_chunk_shift;
150 87
151 if (size > max_chunk_size) 88 if (size == 0)
152 return; 89 return 0;
153
154 size = max(size, 1 << ALLOC_MIN_SHIFT);
155 i = fls(size - 1);
156 s = 1 << i;
157 i -= ALLOC_MIN_SHIFT;
158
159 a = ptr;
160 90
161 spin_lock_irqsave(&poolp->lock, flags); 91 nbits = (size + (1UL << order) - 1) >> order;
162 while (1) { 92
163 if (s == max_chunk_size) { 93 read_lock(&pool->lock);
164 ((struct gen_pool_link *)a)->next = h[i].next; 94 list_for_each(_chunk, &pool->chunks) {
165 h[i].next = (struct gen_pool_link *)a; 95 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
166 break; 96
97 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
98 end_bit -= nbits + 1;
99
100 spin_lock_irqsave(&chunk->lock, flags);
101 bit = -1;
102 while (bit + 1 < end_bit) {
103 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
104 if (bit >= end_bit)
105 break;
106
107 start_bit = bit;
108 if (nbits > 1) {
109 bit = find_next_bit(chunk->bits, bit + nbits,
110 bit + 1);
111 if (bit - start_bit < nbits)
112 continue;
113 }
114
115 addr = chunk->start_addr +
116 ((unsigned long)start_bit << order);
117 while (nbits--)
118 __set_bit(start_bit++, &chunk->bits);
119 spin_unlock_irqrestore(&chunk->lock, flags);
120 read_unlock(&pool->lock);
121 return addr;
167 } 122 }
168 b = a ^ s; 123 spin_unlock_irqrestore(&chunk->lock, flags);
169 q = &h[i]; 124 }
125 read_unlock(&pool->lock);
126 return 0;
127}
128EXPORT_SYMBOL(gen_pool_alloc);
170 129
171 while (q->next && q->next != (struct gen_pool_link *)b)
172 q = q->next;
173 130
174 if (!q->next) { 131/*
175 ((struct gen_pool_link *)a)->next = h[i].next; 132 * Free the specified memory back to the specified pool.
176 h[i].next = (struct gen_pool_link *)a; 133 *
134 * @pool: pool to free to
135 * @addr: starting address of memory to free back to pool
136 * @size: size in bytes of memory to free
137 */
138void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
139{
140 struct list_head *_chunk;
141 struct gen_pool_chunk *chunk;
142 unsigned long flags;
143 int order = pool->min_alloc_order;
144 int bit, nbits;
145
146 nbits = (size + (1UL << order) - 1) >> order;
147
148 read_lock(&pool->lock);
149 list_for_each(_chunk, &pool->chunks) {
150 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
151
152 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
153 BUG_ON(addr + size > chunk->end_addr);
154 spin_lock_irqsave(&chunk->lock, flags);
155 bit = (addr - chunk->start_addr) >> order;
156 while (nbits--)
157 __clear_bit(bit++, &chunk->bits);
158 spin_unlock_irqrestore(&chunk->lock, flags);
177 break; 159 break;
178 } 160 }
179 q->next = q->next->next;
180 a = a & b;
181 s <<= 1;
182 i++;
183 } 161 }
184 spin_unlock_irqrestore(&poolp->lock, flags); 162 BUG_ON(nbits > 0);
163 read_unlock(&pool->lock);
185} 164}
186EXPORT_SYMBOL(gen_pool_free); 165EXPORT_SYMBOL(gen_pool_free);