diff options
author | Tejun Heo <tj@kernel.org> | 2009-03-06 10:44:11 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-03-06 10:44:11 -0500 |
commit | a56dbddf06b653ef9c04ca3767f260fd31ccebab (patch) | |
tree | 803293315a281a88f807fa3506ac5bef0a910ba4 | |
parent | 9f7dcf224bd09ec9ebcbfb383bf2c465e0e0b03d (diff) |
percpu: move fully free chunk reclamation into a work
Impact: code reorganization for later changes
Do fully free chunk reclamation using a work. This change is to
prepare for locking changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r-- | mm/percpu.c | 48 |
1 files changed, 38 insertions, 10 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 7d9bc35e8ed2..4c8a419119da 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/rbtree.h> | 63 | #include <linux/rbtree.h> |
64 | #include <linux/slab.h> | 64 | #include <linux/slab.h> |
65 | #include <linux/vmalloc.h> | 65 | #include <linux/vmalloc.h> |
66 | #include <linux/workqueue.h> | ||
66 | 67 | ||
67 | #include <asm/cacheflush.h> | 68 | #include <asm/cacheflush.h> |
68 | #include <asm/tlbflush.h> | 69 | #include <asm/tlbflush.h> |
@@ -118,6 +119,10 @@ static DEFINE_MUTEX(pcpu_mutex); | |||
118 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ | 119 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
119 | static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ | 120 | static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ |
120 | 121 | ||
122 | /* reclaim work to release fully free chunks, scheduled from free path */ | ||
123 | static void pcpu_reclaim(struct work_struct *work); | ||
124 | static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); | ||
125 | |||
121 | static int __pcpu_size_to_slot(int size) | 126 | static int __pcpu_size_to_slot(int size) |
122 | { | 127 | { |
123 | int highbit = fls(size); /* size is in bytes */ | 128 | int highbit = fls(size); /* size is in bytes */ |
@@ -846,13 +851,37 @@ void *__alloc_reserved_percpu(size_t size, size_t align) | |||
846 | return pcpu_alloc(size, align, true); | 851 | return pcpu_alloc(size, align, true); |
847 | } | 852 | } |
848 | 853 | ||
849 | static void pcpu_kill_chunk(struct pcpu_chunk *chunk) | 854 | /** |
855 | * pcpu_reclaim - reclaim fully free chunks, workqueue function | ||
856 | * @work: unused | ||
857 | * | ||
858 | * Reclaim all fully free chunks except for the first one. | ||
859 | */ | ||
860 | static void pcpu_reclaim(struct work_struct *work) | ||
850 | { | 861 | { |
851 | WARN_ON(chunk->immutable); | 862 | LIST_HEAD(todo); |
852 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); | 863 | struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; |
853 | list_del(&chunk->list); | 864 | struct pcpu_chunk *chunk, *next; |
854 | rb_erase(&chunk->rb_node, &pcpu_addr_root); | 865 | |
855 | free_pcpu_chunk(chunk); | 866 | mutex_lock(&pcpu_mutex); |
867 | |||
868 | list_for_each_entry_safe(chunk, next, head, list) { | ||
869 | WARN_ON(chunk->immutable); | ||
870 | |||
871 | /* spare the first one */ | ||
872 | if (chunk == list_first_entry(head, struct pcpu_chunk, list)) | ||
873 | continue; | ||
874 | |||
875 | rb_erase(&chunk->rb_node, &pcpu_addr_root); | ||
876 | list_move(&chunk->list, &todo); | ||
877 | } | ||
878 | |||
879 | mutex_unlock(&pcpu_mutex); | ||
880 | |||
881 | list_for_each_entry_safe(chunk, next, &todo, list) { | ||
882 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); | ||
883 | free_pcpu_chunk(chunk); | ||
884 | } | ||
856 | } | 885 | } |
857 | 886 | ||
858 | /** | 887 | /** |
@@ -877,14 +906,13 @@ void free_percpu(void *ptr) | |||
877 | 906 | ||
878 | pcpu_free_area(chunk, off); | 907 | pcpu_free_area(chunk, off); |
879 | 908 | ||
880 | /* the chunk became fully free, kill one if there are other free ones */ | 909 | /* if there are more than one fully free chunks, wake up grim reaper */ |
881 | if (chunk->free_size == pcpu_unit_size) { | 910 | if (chunk->free_size == pcpu_unit_size) { |
882 | struct pcpu_chunk *pos; | 911 | struct pcpu_chunk *pos; |
883 | 912 | ||
884 | list_for_each_entry(pos, | 913 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
885 | &pcpu_slot[pcpu_chunk_slot(chunk)], list) | ||
886 | if (pos != chunk) { | 914 | if (pos != chunk) { |
887 | pcpu_kill_chunk(pos); | 915 | schedule_work(&pcpu_reclaim_work); |
888 | break; | 916 | break; |
889 | } | 917 | } |
890 | } | 918 | } |