aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2011-02-21 07:21:01 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-10-14 10:47:45 -0400
commit46a15bc3ec425b546d140581c28192ab7877ddc4 (patch)
treeb7a8940d3c26129336429e61f831dfd5b5de30c7 /include
parent45dfffebd08c1445493bfa8f0ec05b38714b9b2d (diff)
lru_cache: allow multiple changes per transaction
Allow multiple changes to the active set of elements in lru_cache. The only current user of lru_cache, drbd, is driving this generalisation. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/lru_cache.h68
1 files changed, 44 insertions, 24 deletions
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 4cceafb0732d..cbafae40c649 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -166,9 +166,11 @@ struct lc_element {
166 /* if we want to track a larger set of objects, 166 /* if we want to track a larger set of objects,
167 * it needs to become arch independend u64 */ 167 * it needs to become arch independend u64 */
168 unsigned lc_number; 168 unsigned lc_number;
169
170 /* special label when on free list */ 169 /* special label when on free list */
171#define LC_FREE (~0U) 170#define LC_FREE (~0U)
171
172 /* for pending changes */
173 unsigned lc_new_number;
172}; 174};
173 175
174struct lru_cache { 176struct lru_cache {
@@ -176,6 +178,7 @@ struct lru_cache {
176 struct list_head lru; 178 struct list_head lru;
177 struct list_head free; 179 struct list_head free;
178 struct list_head in_use; 180 struct list_head in_use;
181 struct list_head to_be_changed;
179 182
180 /* the pre-created kmem cache to allocate the objects from */ 183 /* the pre-created kmem cache to allocate the objects from */
181 struct kmem_cache *lc_cache; 184 struct kmem_cache *lc_cache;
@@ -186,7 +189,7 @@ struct lru_cache {
186 size_t element_off; 189 size_t element_off;
187 190
188 /* number of elements (indices) */ 191 /* number of elements (indices) */
189 unsigned int nr_elements; 192 unsigned int nr_elements;
190 /* Arbitrary limit on maximum tracked objects. Practical limit is much 193 /* Arbitrary limit on maximum tracked objects. Practical limit is much
191 * lower due to allocation failures, probably. For typical use cases, 194 * lower due to allocation failures, probably. For typical use cases,
192 * nr_elements should be a few thousand at most. 195 * nr_elements should be a few thousand at most.
@@ -194,18 +197,19 @@ struct lru_cache {
194 * 8 high bits of .lc_index to be overloaded with flags in the future. */ 197 * 8 high bits of .lc_index to be overloaded with flags in the future. */
195#define LC_MAX_ACTIVE (1<<24) 198#define LC_MAX_ACTIVE (1<<24)
196 199
200 /* allow to accumulate a few (index:label) changes,
201 * but no more than max_pending_changes */
202 unsigned int max_pending_changes;
203 /* number of elements currently on to_be_changed list */
204 unsigned int pending_changes;
205
197 /* statistics */ 206 /* statistics */
198 unsigned used; /* number of lelements currently on in_use list */ 207 unsigned used; /* number of elements currently on in_use list */
199 unsigned long hits, misses, starving, dirty, changed; 208 unsigned long hits, misses, starving, locked, changed;
200 209
201 /* see below: flag-bits for lru_cache */ 210 /* see below: flag-bits for lru_cache */
202 unsigned long flags; 211 unsigned long flags;
203 212
204 /* when changing the label of an index element */
205 unsigned int new_number;
206
207 /* for paranoia when changing the label of an index element */
208 struct lc_element *changing_element;
209 213
210 void *lc_private; 214 void *lc_private;
211 const char *name; 215 const char *name;
@@ -221,10 +225,15 @@ enum {
221 /* debugging aid, to catch concurrent access early. 225 /* debugging aid, to catch concurrent access early.
222 * user needs to guarantee exclusive access by proper locking! */ 226 * user needs to guarantee exclusive access by proper locking! */
223 __LC_PARANOIA, 227 __LC_PARANOIA,
224 /* if we need to change the set, but currently there is a changing 228
225 * transaction pending, we are "dirty", and must deferr further 229 /* annotate that the set is "dirty", possibly accumulating further
226 * changing requests */ 230 * changes, until a transaction is finally triggered */
227 __LC_DIRTY, 231 __LC_DIRTY,
232
233 /* Locked, no further changes allowed.
234 * Also used to serialize changing transactions. */
235 __LC_LOCKED,
236
228 /* if we need to change the set, but currently there is no free nor 237 /* if we need to change the set, but currently there is no free nor
229 * unused element available, we are "starving", and must not give out 238 * unused element available, we are "starving", and must not give out
230 * further references, to guarantee that eventually some refcnt will 239 * further references, to guarantee that eventually some refcnt will
@@ -236,9 +245,11 @@ enum {
236}; 245};
237#define LC_PARANOIA (1<<__LC_PARANOIA) 246#define LC_PARANOIA (1<<__LC_PARANOIA)
238#define LC_DIRTY (1<<__LC_DIRTY) 247#define LC_DIRTY (1<<__LC_DIRTY)
248#define LC_LOCKED (1<<__LC_LOCKED)
239#define LC_STARVING (1<<__LC_STARVING) 249#define LC_STARVING (1<<__LC_STARVING)
240 250
241extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, 251extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
252 unsigned max_pending_changes,
242 unsigned e_count, size_t e_size, size_t e_off); 253 unsigned e_count, size_t e_size, size_t e_off);
243extern void lc_reset(struct lru_cache *lc); 254extern void lc_reset(struct lru_cache *lc);
244extern void lc_destroy(struct lru_cache *lc); 255extern void lc_destroy(struct lru_cache *lc);
@@ -249,7 +260,7 @@ extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
249extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); 260extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
250extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); 261extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
251extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); 262extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
252extern void lc_changed(struct lru_cache *lc, struct lc_element *e); 263extern void lc_committed(struct lru_cache *lc);
253 264
254struct seq_file; 265struct seq_file;
255extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); 266extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
@@ -258,31 +269,40 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
258 void (*detail) (struct seq_file *, struct lc_element *)); 269 void (*detail) (struct seq_file *, struct lc_element *));
259 270
260/** 271/**
261 * lc_try_lock - can be used to stop lc_get() from changing the tracked set 272 * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set
262 * @lc: the lru cache to operate on 273 * @lc: the lru cache to operate on
263 * 274 *
264 * Note that the reference counts and order on the active and lru lists may 275 * Allows (expects) the set to be "dirty". Note that the reference counts and
265 * still change. Returns true if we acquired the lock. 276 * order on the active and lru lists may still change. Used to serialize
277 * changing transactions. Returns true if we aquired the lock.
266 */ 278 */
267static inline int lc_try_lock(struct lru_cache *lc) 279static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
268{ 280{
269 return !test_and_set_bit(__LC_DIRTY, &lc->flags); 281 return !test_and_set_bit(__LC_LOCKED, &lc->flags);
270} 282}
271 283
272/** 284/**
285 * lc_try_lock - variant to stop lc_get() from changing the tracked set
286 * @lc: the lru cache to operate on
287 *
288 * Note that the reference counts and order on the active and lru lists may
289 * still change. Only works on a "clean" set. Returns true if we aquired the
290 * lock, which means there are no pending changes, and any further attempt to
291 * change the set will not succeed until the next lc_unlock().
292 */
293extern int lc_try_lock(struct lru_cache *lc);
294
295/**
273 * lc_unlock - unlock @lc, allow lc_get() to change the set again 296 * lc_unlock - unlock @lc, allow lc_get() to change the set again
274 * @lc: the lru cache to operate on 297 * @lc: the lru cache to operate on
275 */ 298 */
276static inline void lc_unlock(struct lru_cache *lc) 299static inline void lc_unlock(struct lru_cache *lc)
277{ 300{
278 clear_bit_unlock(__LC_DIRTY, &lc->flags); 301 clear_bit(__LC_DIRTY, &lc->flags);
302 clear_bit_unlock(__LC_LOCKED, &lc->flags);
279} 303}
280 304
281static inline int lc_is_used(struct lru_cache *lc, unsigned int enr) 305extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
282{
283 struct lc_element *e = lc_find(lc, enr);
284 return e && e->refcnt;
285}
286 306
287#define lc_entry(ptr, type, member) \ 307#define lc_entry(ptr, type, member) \
288 container_of(ptr, type, member) 308 container_of(ptr, type, member)