diff options
Diffstat (limited to 'include/linux/list_lru.h')
| -rw-r--r-- | include/linux/list_lru.h | 82 |
1 files changed, 67 insertions, 15 deletions
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index f3434533fbf8..2a6b9947aaa3 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h | |||
| @@ -9,6 +9,9 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
| 11 | #include <linux/nodemask.h> | 11 | #include <linux/nodemask.h> |
| 12 | #include <linux/shrinker.h> | ||
| 13 | |||
| 14 | struct mem_cgroup; | ||
| 12 | 15 | ||
| 13 | /* list_lru_walk_cb has to always return one of those */ | 16 | /* list_lru_walk_cb has to always return one of those */ |
| 14 | enum lru_status { | 17 | enum lru_status { |
| @@ -21,24 +24,45 @@ enum lru_status { | |||
| 21 | internally, but has to return locked. */ | 24 | internally, but has to return locked. */ |
| 22 | }; | 25 | }; |
| 23 | 26 | ||
| 24 | struct list_lru_node { | 27 | struct list_lru_one { |
| 25 | spinlock_t lock; | ||
| 26 | struct list_head list; | 28 | struct list_head list; |
| 27 | /* kept as signed so we can catch imbalance bugs */ | 29 | /* may become negative during memcg reparenting */ |
| 28 | long nr_items; | 30 | long nr_items; |
| 31 | }; | ||
| 32 | |||
| 33 | struct list_lru_memcg { | ||
| 34 | /* array of per cgroup lists, indexed by memcg_cache_id */ | ||
| 35 | struct list_lru_one *lru[0]; | ||
| 36 | }; | ||
| 37 | |||
| 38 | struct list_lru_node { | ||
| 39 | /* protects all lists on the node, including per cgroup */ | ||
| 40 | spinlock_t lock; | ||
| 41 | /* global list, used for the root cgroup in cgroup aware lrus */ | ||
| 42 | struct list_lru_one lru; | ||
| 43 | #ifdef CONFIG_MEMCG_KMEM | ||
| 44 | /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ | ||
| 45 | struct list_lru_memcg *memcg_lrus; | ||
| 46 | #endif | ||
| 29 | } ____cacheline_aligned_in_smp; | 47 | } ____cacheline_aligned_in_smp; |
| 30 | 48 | ||
| 31 | struct list_lru { | 49 | struct list_lru { |
| 32 | struct list_lru_node *node; | 50 | struct list_lru_node *node; |
| 33 | nodemask_t active_nodes; | 51 | #ifdef CONFIG_MEMCG_KMEM |
| 52 | struct list_head list; | ||
| 53 | #endif | ||
| 34 | }; | 54 | }; |
| 35 | 55 | ||
| 36 | void list_lru_destroy(struct list_lru *lru); | 56 | void list_lru_destroy(struct list_lru *lru); |
| 37 | int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key); | 57 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
| 38 | static inline int list_lru_init(struct list_lru *lru) | 58 | struct lock_class_key *key); |
| 39 | { | 59 | |
| 40 | return list_lru_init_key(lru, NULL); | 60 | #define list_lru_init(lru) __list_lru_init((lru), false, NULL) |
| 41 | } | 61 | #define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key)) |
| 62 | #define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL) | ||
| 63 | |||
| 64 | int memcg_update_all_list_lrus(int num_memcgs); | ||
| 65 | void memcg_drain_all_list_lrus(int src_idx, int dst_idx); | ||
| 42 | 66 | ||
| 43 | /** | 67 | /** |
| 44 | * list_lru_add: add an element to the lru list's tail | 68 | * list_lru_add: add an element to the lru list's tail |
| @@ -72,32 +96,48 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item); | |||
| 72 | bool list_lru_del(struct list_lru *lru, struct list_head *item); | 96 | bool list_lru_del(struct list_lru *lru, struct list_head *item); |
| 73 | 97 | ||
| 74 | /** | 98 | /** |
| 75 | * list_lru_count_node: return the number of objects currently held by @lru | 99 | * list_lru_count_one: return the number of objects currently held by @lru |
| 76 | * @lru: the lru pointer. | 100 | * @lru: the lru pointer. |
| 77 | * @nid: the node id to count from. | 101 | * @nid: the node id to count from. |
| 102 | * @memcg: the cgroup to count from. | ||
| 78 | * | 103 | * |
| 79 | * Always return a non-negative number, 0 for empty lists. There is no | 104 | * Always return a non-negative number, 0 for empty lists. There is no |
| 80 | * guarantee that the list is not updated while the count is being computed. | 105 | * guarantee that the list is not updated while the count is being computed. |
| 81 | * Callers that want such a guarantee need to provide an outer lock. | 106 | * Callers that want such a guarantee need to provide an outer lock. |
| 82 | */ | 107 | */ |
| 108 | unsigned long list_lru_count_one(struct list_lru *lru, | ||
| 109 | int nid, struct mem_cgroup *memcg); | ||
| 83 | unsigned long list_lru_count_node(struct list_lru *lru, int nid); | 110 | unsigned long list_lru_count_node(struct list_lru *lru, int nid); |
| 111 | |||
| 112 | static inline unsigned long list_lru_shrink_count(struct list_lru *lru, | ||
| 113 | struct shrink_control *sc) | ||
| 114 | { | ||
| 115 | return list_lru_count_one(lru, sc->nid, sc->memcg); | ||
| 116 | } | ||
| 117 | |||
| 84 | static inline unsigned long list_lru_count(struct list_lru *lru) | 118 | static inline unsigned long list_lru_count(struct list_lru *lru) |
| 85 | { | 119 | { |
| 86 | long count = 0; | 120 | long count = 0; |
| 87 | int nid; | 121 | int nid; |
| 88 | 122 | ||
| 89 | for_each_node_mask(nid, lru->active_nodes) | 123 | for_each_node_state(nid, N_NORMAL_MEMORY) |
| 90 | count += list_lru_count_node(lru, nid); | 124 | count += list_lru_count_node(lru, nid); |
| 91 | 125 | ||
| 92 | return count; | 126 | return count; |
| 93 | } | 127 | } |
| 94 | 128 | ||
| 95 | typedef enum lru_status | 129 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item); |
| 96 | (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); | 130 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, |
| 131 | struct list_head *head); | ||
| 132 | |||
| 133 | typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, | ||
| 134 | struct list_lru_one *list, spinlock_t *lock, void *cb_arg); | ||
| 135 | |||
| 97 | /** | 136 | /** |
| 98 | * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items. | 137 | * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. |
| 99 | * @lru: the lru pointer. | 138 | * @lru: the lru pointer. |
| 100 | * @nid: the node id to scan from. | 139 | * @nid: the node id to scan from. |
| 140 | * @memcg: the cgroup to scan from. | ||
| 101 | * @isolate: callback function that is resposible for deciding what to do with | 141 | * @isolate: callback function that is resposible for deciding what to do with |
| 102 | * the item currently being scanned | 142 | * the item currently being scanned |
| 103 | * @cb_arg: opaque type that will be passed to @isolate | 143 | * @cb_arg: opaque type that will be passed to @isolate |
| @@ -115,18 +155,30 @@ typedef enum lru_status | |||
| 115 | * | 155 | * |
| 116 | * Return value: the number of objects effectively removed from the LRU. | 156 | * Return value: the number of objects effectively removed from the LRU. |
| 117 | */ | 157 | */ |
| 158 | unsigned long list_lru_walk_one(struct list_lru *lru, | ||
| 159 | int nid, struct mem_cgroup *memcg, | ||
| 160 | list_lru_walk_cb isolate, void *cb_arg, | ||
| 161 | unsigned long *nr_to_walk); | ||
| 118 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, | 162 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
| 119 | list_lru_walk_cb isolate, void *cb_arg, | 163 | list_lru_walk_cb isolate, void *cb_arg, |
| 120 | unsigned long *nr_to_walk); | 164 | unsigned long *nr_to_walk); |
| 121 | 165 | ||
| 122 | static inline unsigned long | 166 | static inline unsigned long |
| 167 | list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, | ||
| 168 | list_lru_walk_cb isolate, void *cb_arg) | ||
| 169 | { | ||
| 170 | return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, | ||
| 171 | &sc->nr_to_scan); | ||
| 172 | } | ||
| 173 | |||
| 174 | static inline unsigned long | ||
| 123 | list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, | 175 | list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, |
| 124 | void *cb_arg, unsigned long nr_to_walk) | 176 | void *cb_arg, unsigned long nr_to_walk) |
| 125 | { | 177 | { |
| 126 | long isolated = 0; | 178 | long isolated = 0; |
| 127 | int nid; | 179 | int nid; |
| 128 | 180 | ||
| 129 | for_each_node_mask(nid, lru->active_nodes) { | 181 | for_each_node_state(nid, N_NORMAL_MEMORY) { |
| 130 | isolated += list_lru_walk_node(lru, nid, isolate, | 182 | isolated += list_lru_walk_node(lru, nid, isolate, |
| 131 | cb_arg, &nr_to_walk); | 183 | cb_arg, &nr_to_walk); |
| 132 | if (nr_to_walk <= 0) | 184 | if (nr_to_walk <= 0) |
