aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/lru_cache.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/lru_cache.h')
-rw-r--r--include/linux/lru_cache.h71
1 files changed, 26 insertions, 45 deletions
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 4019013c659..7a71ffad037 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -52,8 +52,8 @@ We replicate IO (more or less synchronously) to local and remote disk.
52 52
53For crash recovery after replication node failure, 53For crash recovery after replication node failure,
54 we need to resync all regions that have been target of in-flight WRITE IO 54 we need to resync all regions that have been target of in-flight WRITE IO
55 (in use, or "hot", regions), as we don't know whether or not those WRITEs 55 (in use, or "hot", regions), as we don't know wether or not those WRITEs have
56 have made it to stable storage. 56 made it to stable storage.
57 57
58 To avoid a "full resync", we need to persistently track these regions. 58 To avoid a "full resync", we need to persistently track these regions.
59 59
@@ -166,11 +166,9 @@ struct lc_element {
166 /* if we want to track a larger set of objects, 166 /* if we want to track a larger set of objects,
167 * it needs to become arch independend u64 */ 167 * it needs to become arch independend u64 */
168 unsigned lc_number; 168 unsigned lc_number;
169
169 /* special label when on free list */ 170 /* special label when on free list */
170#define LC_FREE (~0U) 171#define LC_FREE (~0U)
171
172 /* for pending changes */
173 unsigned lc_new_number;
174}; 172};
175 173
176struct lru_cache { 174struct lru_cache {
@@ -178,7 +176,6 @@ struct lru_cache {
178 struct list_head lru; 176 struct list_head lru;
179 struct list_head free; 177 struct list_head free;
180 struct list_head in_use; 178 struct list_head in_use;
181 struct list_head to_be_changed;
182 179
183 /* the pre-created kmem cache to allocate the objects from */ 180 /* the pre-created kmem cache to allocate the objects from */
184 struct kmem_cache *lc_cache; 181 struct kmem_cache *lc_cache;
@@ -189,7 +186,7 @@ struct lru_cache {
189 size_t element_off; 186 size_t element_off;
190 187
191 /* number of elements (indices) */ 188 /* number of elements (indices) */
192 unsigned int nr_elements; 189 unsigned int nr_elements;
193 /* Arbitrary limit on maximum tracked objects. Practical limit is much 190 /* Arbitrary limit on maximum tracked objects. Practical limit is much
194 * lower due to allocation failures, probably. For typical use cases, 191 * lower due to allocation failures, probably. For typical use cases,
195 * nr_elements should be a few thousand at most. 192 * nr_elements should be a few thousand at most.
@@ -197,19 +194,18 @@ struct lru_cache {
197 * 8 high bits of .lc_index to be overloaded with flags in the future. */ 194 * 8 high bits of .lc_index to be overloaded with flags in the future. */
198#define LC_MAX_ACTIVE (1<<24) 195#define LC_MAX_ACTIVE (1<<24)
199 196
200 /* allow to accumulate a few (index:label) changes,
201 * but no more than max_pending_changes */
202 unsigned int max_pending_changes;
203 /* number of elements currently on to_be_changed list */
204 unsigned int pending_changes;
205
206 /* statistics */ 197 /* statistics */
207 unsigned used; /* number of elements currently on in_use list */ 198 unsigned used; /* number of lelements currently on in_use list */
208 unsigned long hits, misses, starving, locked, changed; 199 unsigned long hits, misses, starving, dirty, changed;
209 200
210 /* see below: flag-bits for lru_cache */ 201 /* see below: flag-bits for lru_cache */
211 unsigned long flags; 202 unsigned long flags;
212 203
204 /* when changing the label of an index element */
205 unsigned int new_number;
206
207 /* for paranoia when changing the label of an index element */
208 struct lc_element *changing_element;
213 209
214 void *lc_private; 210 void *lc_private;
215 const char *name; 211 const char *name;
@@ -225,15 +221,10 @@ enum {
225 /* debugging aid, to catch concurrent access early. 221 /* debugging aid, to catch concurrent access early.
226 * user needs to guarantee exclusive access by proper locking! */ 222 * user needs to guarantee exclusive access by proper locking! */
227 __LC_PARANOIA, 223 __LC_PARANOIA,
228 224 /* if we need to change the set, but currently there is a changing
229 /* annotate that the set is "dirty", possibly accumulating further 225 * transaction pending, we are "dirty", and must deferr further
230 * changes, until a transaction is finally triggered */ 226 * changing requests */
231 __LC_DIRTY, 227 __LC_DIRTY,
232
233 /* Locked, no further changes allowed.
234 * Also used to serialize changing transactions. */
235 __LC_LOCKED,
236
237 /* if we need to change the set, but currently there is no free nor 228 /* if we need to change the set, but currently there is no free nor
238 * unused element available, we are "starving", and must not give out 229 * unused element available, we are "starving", and must not give out
239 * further references, to guarantee that eventually some refcnt will 230 * further references, to guarantee that eventually some refcnt will
@@ -245,11 +236,9 @@ enum {
245}; 236};
246#define LC_PARANOIA (1<<__LC_PARANOIA) 237#define LC_PARANOIA (1<<__LC_PARANOIA)
247#define LC_DIRTY (1<<__LC_DIRTY) 238#define LC_DIRTY (1<<__LC_DIRTY)
248#define LC_LOCKED (1<<__LC_LOCKED)
249#define LC_STARVING (1<<__LC_STARVING) 239#define LC_STARVING (1<<__LC_STARVING)
250 240
251extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, 241extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
252 unsigned max_pending_changes,
253 unsigned e_count, size_t e_size, size_t e_off); 242 unsigned e_count, size_t e_size, size_t e_off);
254extern void lc_reset(struct lru_cache *lc); 243extern void lc_reset(struct lru_cache *lc);
255extern void lc_destroy(struct lru_cache *lc); 244extern void lc_destroy(struct lru_cache *lc);
@@ -260,7 +249,7 @@ extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
260extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); 249extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
261extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); 250extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
262extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); 251extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
263extern void lc_committed(struct lru_cache *lc); 252extern void lc_changed(struct lru_cache *lc, struct lc_element *e);
264 253
265struct seq_file; 254struct seq_file;
266extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); 255extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
@@ -269,40 +258,32 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
269 void (*detail) (struct seq_file *, struct lc_element *)); 258 void (*detail) (struct seq_file *, struct lc_element *));
270 259
271/** 260/**
272 * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set 261 * lc_try_lock - can be used to stop lc_get() from changing the tracked set
273 * @lc: the lru cache to operate on 262 * @lc: the lru cache to operate on
274 * 263 *
275 * Allows (expects) the set to be "dirty". Note that the reference counts and 264 * Note that the reference counts and order on the active and lru lists may
276 * order on the active and lru lists may still change. Used to serialize 265 * still change. Returns true if we acquired the lock.
277 * changing transactions. Returns true if we aquired the lock.
278 */ 266 */
279static inline int lc_try_lock_for_transaction(struct lru_cache *lc) 267static inline int lc_try_lock(struct lru_cache *lc)
280{ 268{
281 return !test_and_set_bit(__LC_LOCKED, &lc->flags); 269 return !test_and_set_bit(__LC_DIRTY, &lc->flags);
282} 270}
283 271
284/** 272/**
285 * lc_try_lock - variant to stop lc_get() from changing the tracked set
286 * @lc: the lru cache to operate on
287 *
288 * Note that the reference counts and order on the active and lru lists may
289 * still change. Only works on a "clean" set. Returns true if we aquired the
290 * lock, which means there are no pending changes, and any further attempt to
291 * change the set will not succeed until the next lc_unlock().
292 */
293extern int lc_try_lock(struct lru_cache *lc);
294
295/**
296 * lc_unlock - unlock @lc, allow lc_get() to change the set again 273 * lc_unlock - unlock @lc, allow lc_get() to change the set again
297 * @lc: the lru cache to operate on 274 * @lc: the lru cache to operate on
298 */ 275 */
299static inline void lc_unlock(struct lru_cache *lc) 276static inline void lc_unlock(struct lru_cache *lc)
300{ 277{
301 clear_bit(__LC_DIRTY, &lc->flags); 278 clear_bit(__LC_DIRTY, &lc->flags);
302 clear_bit_unlock(__LC_LOCKED, &lc->flags); 279 smp_mb__after_clear_bit();
303} 280}
304 281
305extern bool lc_is_used(struct lru_cache *lc, unsigned int enr); 282static inline int lc_is_used(struct lru_cache *lc, unsigned int enr)
283{
284 struct lc_element *e = lc_find(lc, enr);
285 return e && e->refcnt;
286}
306 287
307#define lc_entry(ptr, type, member) \ 288#define lc_entry(ptr, type, member) \
308 container_of(ptr, type, member) 289 container_of(ptr, type, member)