aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c48
1 files changed, 38 insertions, 10 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 7d9bc35e8ed2..4c8a419119da 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -63,6 +63,7 @@
63#include <linux/rbtree.h> 63#include <linux/rbtree.h>
64#include <linux/slab.h> 64#include <linux/slab.h>
65#include <linux/vmalloc.h> 65#include <linux/vmalloc.h>
66#include <linux/workqueue.h>
66 67
67#include <asm/cacheflush.h> 68#include <asm/cacheflush.h>
68#include <asm/tlbflush.h> 69#include <asm/tlbflush.h>
@@ -118,6 +119,10 @@ static DEFINE_MUTEX(pcpu_mutex);
118static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 119static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
119static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ 120static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */
120 121
122/* reclaim work to release fully free chunks, scheduled from free path */
123static void pcpu_reclaim(struct work_struct *work);
124static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
125
121static int __pcpu_size_to_slot(int size) 126static int __pcpu_size_to_slot(int size)
122{ 127{
123 int highbit = fls(size); /* size is in bytes */ 128 int highbit = fls(size); /* size is in bytes */
@@ -846,13 +851,37 @@ void *__alloc_reserved_percpu(size_t size, size_t align)
846 return pcpu_alloc(size, align, true); 851 return pcpu_alloc(size, align, true);
847} 852}
848 853
849static void pcpu_kill_chunk(struct pcpu_chunk *chunk) 854/**
855 * pcpu_reclaim - reclaim fully free chunks, workqueue function
856 * @work: unused
857 *
858 * Reclaim all fully free chunks except for the first one.
859 */
860static void pcpu_reclaim(struct work_struct *work)
850{ 861{
851 WARN_ON(chunk->immutable); 862 LIST_HEAD(todo);
852 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); 863 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
853 list_del(&chunk->list); 864 struct pcpu_chunk *chunk, *next;
854 rb_erase(&chunk->rb_node, &pcpu_addr_root); 865
855 free_pcpu_chunk(chunk); 866 mutex_lock(&pcpu_mutex);
867
868 list_for_each_entry_safe(chunk, next, head, list) {
869 WARN_ON(chunk->immutable);
870
871 /* spare the first one */
872 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
873 continue;
874
875 rb_erase(&chunk->rb_node, &pcpu_addr_root);
876 list_move(&chunk->list, &todo);
877 }
878
879 mutex_unlock(&pcpu_mutex);
880
881 list_for_each_entry_safe(chunk, next, &todo, list) {
882 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
883 free_pcpu_chunk(chunk);
884 }
856} 885}
857 886
858/** 887/**
@@ -877,14 +906,13 @@ void free_percpu(void *ptr)
877 906
878 pcpu_free_area(chunk, off); 907 pcpu_free_area(chunk, off);
879 908
880 /* the chunk became fully free, kill one if there are other free ones */ 909 /* if there are more than one fully free chunks, wake up grim reaper */
881 if (chunk->free_size == pcpu_unit_size) { 910 if (chunk->free_size == pcpu_unit_size) {
882 struct pcpu_chunk *pos; 911 struct pcpu_chunk *pos;
883 912
884 list_for_each_entry(pos, 913 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
885 &pcpu_slot[pcpu_chunk_slot(chunk)], list)
886 if (pos != chunk) { 914 if (pos != chunk) {
887 pcpu_kill_chunk(pos); 915 schedule_work(&pcpu_reclaim_work);
888 break; 916 break;
889 } 917 }
890 } 918 }