diff options
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 100 |
1 files changed, 20 insertions, 80 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index bf1bf1f4a729..c0b2c1a76e81 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -23,7 +23,7 @@ | |||
23 | * Allocation is done in offset-size areas of single unit space. Ie, | 23 | * Allocation is done in offset-size areas of single unit space. Ie, |
24 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, | 24 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, |
25 | * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring | 25 | * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring |
26 | * percpu base registers UNIT_SIZE apart. | 26 | * percpu base registers pcpu_unit_size apart. |
27 | * | 27 | * |
28 | * There are usually many small percpu allocations many of them as | 28 | * There are usually many small percpu allocations many of them as |
29 | * small as 4 bytes. The allocator organizes chunks into lists | 29 | * small as 4 bytes. The allocator organizes chunks into lists |
@@ -38,8 +38,8 @@ | |||
38 | * region and negative allocated. Allocation inside a chunk is done | 38 | * region and negative allocated. Allocation inside a chunk is done |
39 | * by scanning this map sequentially and serving the first matching | 39 | * by scanning this map sequentially and serving the first matching |
40 | * entry. This is mostly copied from the percpu_modalloc() allocator. | 40 | * entry. This is mostly copied from the percpu_modalloc() allocator. |
41 | * Chunks are also linked into a rb tree to ease address to chunk | 41 | * Chunks can be determined from the address using the index field |
42 | * mapping during free. | 42 | * in the page struct. The index field contains a pointer to the chunk. |
43 | * | 43 | * |
44 | * To use this allocator, arch code should do the followings. | 44 | * To use this allocator, arch code should do the followings. |
45 | * | 45 | * |
@@ -61,7 +61,6 @@ | |||
61 | #include <linux/mutex.h> | 61 | #include <linux/mutex.h> |
62 | #include <linux/percpu.h> | 62 | #include <linux/percpu.h> |
63 | #include <linux/pfn.h> | 63 | #include <linux/pfn.h> |
64 | #include <linux/rbtree.h> | ||
65 | #include <linux/slab.h> | 64 | #include <linux/slab.h> |
66 | #include <linux/spinlock.h> | 65 | #include <linux/spinlock.h> |
67 | #include <linux/vmalloc.h> | 66 | #include <linux/vmalloc.h> |
@@ -88,7 +87,6 @@ | |||
88 | 87 | ||
89 | struct pcpu_chunk { | 88 | struct pcpu_chunk { |
90 | struct list_head list; /* linked to pcpu_slot lists */ | 89 | struct list_head list; /* linked to pcpu_slot lists */ |
91 | struct rb_node rb_node; /* key is chunk->vm->addr */ | ||
92 | int free_size; /* free bytes in the chunk */ | 90 | int free_size; /* free bytes in the chunk */ |
93 | int contig_hint; /* max contiguous size hint */ | 91 | int contig_hint; /* max contiguous size hint */ |
94 | struct vm_struct *vm; /* mapped vmalloc region */ | 92 | struct vm_struct *vm; /* mapped vmalloc region */ |
@@ -133,7 +131,7 @@ static int pcpu_reserved_chunk_limit; | |||
133 | * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former | 131 | * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former |
134 | * protects allocation/reclaim paths, chunks and chunk->page arrays. | 132 | * protects allocation/reclaim paths, chunks and chunk->page arrays. |
135 | * The latter is a spinlock and protects the index data structures - | 133 | * The latter is a spinlock and protects the index data structures - |
136 | * chunk slots, rbtree, chunks and area maps in chunks. | 134 | * chunk slots, chunks and area maps in chunks. |
137 | * | 135 | * |
138 | * During allocation, pcpu_alloc_mutex is kept locked all the time and | 136 | * During allocation, pcpu_alloc_mutex is kept locked all the time and |
139 | * pcpu_lock is grabbed and released as necessary. All actual memory | 137 | * pcpu_lock is grabbed and released as necessary. All actual memory |
@@ -152,7 +150,6 @@ static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ | |||
152 | static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ | 150 | static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ |
153 | 151 | ||
154 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ | 152 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
155 | static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ | ||
156 | 153 | ||
157 | /* reclaim work to release fully free chunks, scheduled from free path */ | 154 | /* reclaim work to release fully free chunks, scheduled from free path */ |
158 | static void pcpu_reclaim(struct work_struct *work); | 155 | static void pcpu_reclaim(struct work_struct *work); |
@@ -203,6 +200,18 @@ static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, | |||
203 | return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; | 200 | return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; |
204 | } | 201 | } |
205 | 202 | ||
203 | /* set the pointer to a chunk in a page struct */ | ||
204 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) | ||
205 | { | ||
206 | page->index = (unsigned long)pcpu; | ||
207 | } | ||
208 | |||
209 | /* obtain pointer to a chunk from a page struct */ | ||
210 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | ||
211 | { | ||
212 | return (struct pcpu_chunk *)page->index; | ||
213 | } | ||
214 | |||
206 | /** | 215 | /** |
207 | * pcpu_mem_alloc - allocate memory | 216 | * pcpu_mem_alloc - allocate memory |
208 | * @size: bytes to allocate | 217 | * @size: bytes to allocate |
@@ -269,40 +278,9 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |||
269 | } | 278 | } |
270 | } | 279 | } |
271 | 280 | ||
272 | static struct rb_node **pcpu_chunk_rb_search(void *addr, | ||
273 | struct rb_node **parentp) | ||
274 | { | ||
275 | struct rb_node **p = &pcpu_addr_root.rb_node; | ||
276 | struct rb_node *parent = NULL; | ||
277 | struct pcpu_chunk *chunk; | ||
278 | |||
279 | while (*p) { | ||
280 | parent = *p; | ||
281 | chunk = rb_entry(parent, struct pcpu_chunk, rb_node); | ||
282 | |||
283 | if (addr < chunk->vm->addr) | ||
284 | p = &(*p)->rb_left; | ||
285 | else if (addr > chunk->vm->addr) | ||
286 | p = &(*p)->rb_right; | ||
287 | else | ||
288 | break; | ||
289 | } | ||
290 | |||
291 | if (parentp) | ||
292 | *parentp = parent; | ||
293 | return p; | ||
294 | } | ||
295 | |||
296 | /** | 281 | /** |
297 | * pcpu_chunk_addr_search - search for chunk containing specified address | 282 | * pcpu_chunk_addr_search - determine chunk containing specified address |
298 | * @addr: address to search for | 283 | * @addr: address for which the chunk needs to be determined. |
299 | * | ||
300 | * Look for chunk which might contain @addr. More specifically, it | ||
301 | * searchs for the chunk with the highest start address which isn't | ||
302 | * beyond @addr. | ||
303 | * | ||
304 | * CONTEXT: | ||
305 | * pcpu_lock. | ||
306 | * | 284 | * |
307 | * RETURNS: | 285 | * RETURNS: |
308 | * The address of the found chunk. | 286 | * The address of the found chunk. |
@@ -310,8 +288,6 @@ static struct rb_node **pcpu_chunk_rb_search(void *addr, | |||
310 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | 288 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) |
311 | { | 289 | { |
312 | void *first_start = pcpu_first_chunk->vm->addr; | 290 | void *first_start = pcpu_first_chunk->vm->addr; |
313 | struct rb_node *n, *parent; | ||
314 | struct pcpu_chunk *chunk; | ||
315 | 291 | ||
316 | /* is it in the first chunk? */ | 292 | /* is it in the first chunk? */ |
317 | if (addr >= first_start && addr < first_start + pcpu_chunk_size) { | 293 | if (addr >= first_start && addr < first_start + pcpu_chunk_size) { |
@@ -321,42 +297,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |||
321 | return pcpu_first_chunk; | 297 | return pcpu_first_chunk; |
322 | } | 298 | } |
323 | 299 | ||
324 | /* nah... search the regular ones */ | 300 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); |
325 | n = *pcpu_chunk_rb_search(addr, &parent); | ||
326 | if (!n) { | ||
327 | /* no exactly matching chunk, the parent is the closest */ | ||
328 | n = parent; | ||
329 | BUG_ON(!n); | ||
330 | } | ||
331 | chunk = rb_entry(n, struct pcpu_chunk, rb_node); | ||
332 | |||
333 | if (addr < chunk->vm->addr) { | ||
334 | /* the parent was the next one, look for the previous one */ | ||
335 | n = rb_prev(n); | ||
336 | BUG_ON(!n); | ||
337 | chunk = rb_entry(n, struct pcpu_chunk, rb_node); | ||
338 | } | ||
339 | |||
340 | return chunk; | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * pcpu_chunk_addr_insert - insert chunk into address rb tree | ||
345 | * @new: chunk to insert | ||
346 | * | ||
347 | * Insert @new into address rb tree. | ||
348 | * | ||
349 | * CONTEXT: | ||
350 | * pcpu_lock. | ||
351 | */ | ||
352 | static void pcpu_chunk_addr_insert(struct pcpu_chunk *new) | ||
353 | { | ||
354 | struct rb_node **p, *parent; | ||
355 | |||
356 | p = pcpu_chunk_rb_search(new->vm->addr, &parent); | ||
357 | BUG_ON(*p); | ||
358 | rb_link_node(&new->rb_node, parent, p); | ||
359 | rb_insert_color(&new->rb_node, &pcpu_addr_root); | ||
360 | } | 301 | } |
361 | 302 | ||
362 | /** | 303 | /** |
@@ -768,6 +709,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
768 | alloc_mask, 0); | 709 | alloc_mask, 0); |
769 | if (!*pagep) | 710 | if (!*pagep) |
770 | goto err; | 711 | goto err; |
712 | pcpu_set_page_chunk(*pagep, chunk); | ||
771 | } | 713 | } |
772 | } | 714 | } |
773 | 715 | ||
@@ -892,7 +834,6 @@ restart: | |||
892 | 834 | ||
893 | spin_lock_irq(&pcpu_lock); | 835 | spin_lock_irq(&pcpu_lock); |
894 | pcpu_chunk_relocate(chunk, -1); | 836 | pcpu_chunk_relocate(chunk, -1); |
895 | pcpu_chunk_addr_insert(chunk); | ||
896 | goto restart; | 837 | goto restart; |
897 | 838 | ||
898 | area_found: | 839 | area_found: |
@@ -981,7 +922,6 @@ static void pcpu_reclaim(struct work_struct *work) | |||
981 | if (chunk == list_first_entry(head, struct pcpu_chunk, list)) | 922 | if (chunk == list_first_entry(head, struct pcpu_chunk, list)) |
982 | continue; | 923 | continue; |
983 | 924 | ||
984 | rb_erase(&chunk->rb_node, &pcpu_addr_root); | ||
985 | list_move(&chunk->list, &todo); | 925 | list_move(&chunk->list, &todo); |
986 | } | 926 | } |
987 | 927 | ||