aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2009-04-02 00:21:44 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-08 12:31:31 -0400
commite1b9aa3f47242e757c776a3771bb6613e675bf9c (patch)
treeaa81121a42fdc7f2f1cc916db67558ec3d0f5017 /mm/percpu.c
parentae9e6bc9f74f8247cbca50a6a93c80e0d686fa19 (diff)
percpu: remove rbtree and use page->index instead
Impact: use page->index for addr to chunk mapping instead of dedicated rbtree The rbtree is used to determine the chunk from the virtual address. However, we can already determine the page struct from a virtual address and there are several unused fields in page struct used by vmalloc. Use the index field to store a pointer to the chunk. Then there is no need anymore for an rbtree. tj: * s/(set|get)_chunk/pcpu_\1_page_chunk/ * Drop inline from the above two functions and moved them upwards so that they are with other simple helpers. * Initial pages might not (actually most of the time don't) live in the vmalloc area. With the previous patch to manually reverse-map both first chunks, this is no longer an issue. Removed pcpu_set_chunk() call on initial pages. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: rusty@rustcorp.com.au Cc: Paul Mundt <lethal@linux-sh.org> Cc: rmk@arm.linux.org.uk Cc: starvik@axis.com Cc: ralf@linux-mips.org Cc: davem@davemloft.net Cc: cooloney@kernel.org Cc: kyle@mcmartin.ca Cc: matthew@wil.cx Cc: grundler@parisc-linux.org Cc: takata@linux-m32r.org Cc: benh@kernel.crashing.org Cc: rth@twiddle.net Cc: ink@jurassic.park.msu.ru Cc: heiko.carstens@de.ibm.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nick Piggin <npiggin@suse.de> LKML-Reference: <49D43D58.4050102@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c100
1 files changed, 20 insertions, 80 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index bf1bf1f4a729..c0b2c1a76e81 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -23,7 +23,7 @@
23 * Allocation is done in offset-size areas of single unit space. Ie, 23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring 25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart. 26 * percpu base registers pcpu_unit_size apart.
27 * 27 *
28 * There are usually many small percpu allocations many of them as 28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists 29 * small as 4 bytes. The allocator organizes chunks into lists
@@ -38,8 +38,8 @@
38 * region and negative allocated. Allocation inside a chunk is done 38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching 39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator. 40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk 41 * Chunks can be determined from the address using the index field
42 * mapping during free. 42 * in the page struct. The index field contains a pointer to the chunk.
43 * 43 *
44 * To use this allocator, arch code should do the followings. 44 * To use this allocator, arch code should do the followings.
45 * 45 *
@@ -61,7 +61,6 @@
61#include <linux/mutex.h> 61#include <linux/mutex.h>
62#include <linux/percpu.h> 62#include <linux/percpu.h>
63#include <linux/pfn.h> 63#include <linux/pfn.h>
64#include <linux/rbtree.h>
65#include <linux/slab.h> 64#include <linux/slab.h>
66#include <linux/spinlock.h> 65#include <linux/spinlock.h>
67#include <linux/vmalloc.h> 66#include <linux/vmalloc.h>
@@ -88,7 +87,6 @@
88 87
89struct pcpu_chunk { 88struct pcpu_chunk {
90 struct list_head list; /* linked to pcpu_slot lists */ 89 struct list_head list; /* linked to pcpu_slot lists */
91 struct rb_node rb_node; /* key is chunk->vm->addr */
92 int free_size; /* free bytes in the chunk */ 90 int free_size; /* free bytes in the chunk */
93 int contig_hint; /* max contiguous size hint */ 91 int contig_hint; /* max contiguous size hint */
94 struct vm_struct *vm; /* mapped vmalloc region */ 92 struct vm_struct *vm; /* mapped vmalloc region */
@@ -133,7 +131,7 @@ static int pcpu_reserved_chunk_limit;
133 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 131 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
134 * protects allocation/reclaim paths, chunks and chunk->page arrays. 132 * protects allocation/reclaim paths, chunks and chunk->page arrays.
135 * The latter is a spinlock and protects the index data structures - 133 * The latter is a spinlock and protects the index data structures -
136 * chunk slots, rbtree, chunks and area maps in chunks. 134 * chunk slots, chunks and area maps in chunks.
137 * 135 *
138 * During allocation, pcpu_alloc_mutex is kept locked all the time and 136 * During allocation, pcpu_alloc_mutex is kept locked all the time and
139 * pcpu_lock is grabbed and released as necessary. All actual memory 137 * pcpu_lock is grabbed and released as necessary. All actual memory
@@ -152,7 +150,6 @@ static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
152static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 150static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
153 151
154static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 152static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
155static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */
156 153
157/* reclaim work to release fully free chunks, scheduled from free path */ 154/* reclaim work to release fully free chunks, scheduled from free path */
158static void pcpu_reclaim(struct work_struct *work); 155static void pcpu_reclaim(struct work_struct *work);
@@ -203,6 +200,18 @@ static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
203 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; 200 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
204} 201}
205 202
203/* set the pointer to a chunk in a page struct */
204static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
205{
206 page->index = (unsigned long)pcpu;
207}
208
209/* obtain pointer to a chunk from a page struct */
210static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
211{
212 return (struct pcpu_chunk *)page->index;
213}
214
206/** 215/**
207 * pcpu_mem_alloc - allocate memory 216 * pcpu_mem_alloc - allocate memory
208 * @size: bytes to allocate 217 * @size: bytes to allocate
@@ -269,40 +278,9 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
269 } 278 }
270} 279}
271 280
272static struct rb_node **pcpu_chunk_rb_search(void *addr,
273 struct rb_node **parentp)
274{
275 struct rb_node **p = &pcpu_addr_root.rb_node;
276 struct rb_node *parent = NULL;
277 struct pcpu_chunk *chunk;
278
279 while (*p) {
280 parent = *p;
281 chunk = rb_entry(parent, struct pcpu_chunk, rb_node);
282
283 if (addr < chunk->vm->addr)
284 p = &(*p)->rb_left;
285 else if (addr > chunk->vm->addr)
286 p = &(*p)->rb_right;
287 else
288 break;
289 }
290
291 if (parentp)
292 *parentp = parent;
293 return p;
294}
295
296/** 281/**
297 * pcpu_chunk_addr_search - search for chunk containing specified address 282 * pcpu_chunk_addr_search - determine chunk containing specified address
298 * @addr: address to search for 283 * @addr: address for which the chunk needs to be determined.
299 *
300 * Look for chunk which might contain @addr. More specifically, it
301 * searchs for the chunk with the highest start address which isn't
302 * beyond @addr.
303 *
304 * CONTEXT:
305 * pcpu_lock.
306 * 284 *
307 * RETURNS: 285 * RETURNS:
308 * The address of the found chunk. 286 * The address of the found chunk.
@@ -310,8 +288,6 @@ static struct rb_node **pcpu_chunk_rb_search(void *addr,
310static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 288static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
311{ 289{
312 void *first_start = pcpu_first_chunk->vm->addr; 290 void *first_start = pcpu_first_chunk->vm->addr;
313 struct rb_node *n, *parent;
314 struct pcpu_chunk *chunk;
315 291
316 /* is it in the first chunk? */ 292 /* is it in the first chunk? */
317 if (addr >= first_start && addr < first_start + pcpu_chunk_size) { 293 if (addr >= first_start && addr < first_start + pcpu_chunk_size) {
@@ -321,42 +297,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
321 return pcpu_first_chunk; 297 return pcpu_first_chunk;
322 } 298 }
323 299
324 /* nah... search the regular ones */ 300 return pcpu_get_page_chunk(vmalloc_to_page(addr));
325 n = *pcpu_chunk_rb_search(addr, &parent);
326 if (!n) {
327 /* no exactly matching chunk, the parent is the closest */
328 n = parent;
329 BUG_ON(!n);
330 }
331 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
332
333 if (addr < chunk->vm->addr) {
334 /* the parent was the next one, look for the previous one */
335 n = rb_prev(n);
336 BUG_ON(!n);
337 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
338 }
339
340 return chunk;
341}
342
343/**
344 * pcpu_chunk_addr_insert - insert chunk into address rb tree
345 * @new: chunk to insert
346 *
347 * Insert @new into address rb tree.
348 *
349 * CONTEXT:
350 * pcpu_lock.
351 */
352static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
353{
354 struct rb_node **p, *parent;
355
356 p = pcpu_chunk_rb_search(new->vm->addr, &parent);
357 BUG_ON(*p);
358 rb_link_node(&new->rb_node, parent, p);
359 rb_insert_color(&new->rb_node, &pcpu_addr_root);
360} 301}
361 302
362/** 303/**
@@ -768,6 +709,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
768 alloc_mask, 0); 709 alloc_mask, 0);
769 if (!*pagep) 710 if (!*pagep)
770 goto err; 711 goto err;
712 pcpu_set_page_chunk(*pagep, chunk);
771 } 713 }
772 } 714 }
773 715
@@ -892,7 +834,6 @@ restart:
892 834
893 spin_lock_irq(&pcpu_lock); 835 spin_lock_irq(&pcpu_lock);
894 pcpu_chunk_relocate(chunk, -1); 836 pcpu_chunk_relocate(chunk, -1);
895 pcpu_chunk_addr_insert(chunk);
896 goto restart; 837 goto restart;
897 838
898area_found: 839area_found:
@@ -981,7 +922,6 @@ static void pcpu_reclaim(struct work_struct *work)
981 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 922 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
982 continue; 923 continue;
983 924
984 rb_erase(&chunk->rb_node, &pcpu_addr_root);
985 list_move(&chunk->list, &todo); 925 list_move(&chunk->list, &todo);
986 } 926 }
987 927