diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-17 16:39:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-17 16:39:11 -0500 |
commit | 9228ff90387e276ad67b10c0eb525c9d6a57d5e9 (patch) | |
tree | e7c87b68daba7cf7ca4c342c6b52165bd78fbe16 /include/linux/lru_cache.h | |
parent | 9360b53661a2c7754517b2925580055bacc8ec38 (diff) | |
parent | d2ec180c23a5a1bfe34d8638b0342a47c00cf70f (diff) |
Merge branch 'for-3.8/drivers' of git://git.kernel.dk/linux-block
Pull block driver update from Jens Axboe:
"Now that the core bits are in, here are the driver bits for 3.8. The
branch contains:
- A huge pile of drbd bits that were dumped from the 3.7 merge
window. Following that, it was both made perfectly clear that
there is going to be no more over-the-wall pulls and how the
situation on individual pulls can be improved.
- A few cleanups from Akinobu Mita for drbd and cciss.
- Queue improvement for loop from Lukas. This grew into adding a
generic interface for waiting/checking an even with a specific
lock, allowing this to be pulled out of md and now loop and drbd is
also using it.
- A few fixes for xen back/front block driver from Roger Pau Monne.
- Partition improvements from Stephen Warren, allowing partiion UUID
to be used as an identifier."
* 'for-3.8/drivers' of git://git.kernel.dk/linux-block: (609 commits)
drbd: update Kconfig to match current dependencies
drbd: Fix drbdsetup wait-connect, wait-sync etc... commands
drbd: close race between drbd_set_role and drbd_connect
drbd: respect no-md-barriers setting also when changed online via disk-options
drbd: Remove obsolete check
drbd: fixup after wait_even_lock_irq() addition to generic code
loop: Limit the number of requests in the bio list
wait: add wait_event_lock_irq() interface
xen-blkfront: free allocated page
xen-blkback: move free persistent grants code
block: partition: msdos: provide UUIDs for partitions
init: reduce PARTUUID min length to 1 from 36
block: store partition_meta_info.uuid as a string
cciss: use check_signature()
cciss: cleanup bitops usage
drbd: use copy_highpage
drbd: if the replication link breaks during handshake, keep retrying
drbd: check return of kmalloc in receive_uuids
drbd: Broadcast sync progress no more often than once per second
drbd: don't try to clear bits once the disk has failed
...
Diffstat (limited to 'include/linux/lru_cache.h')
-rw-r--r-- | include/linux/lru_cache.h | 67 |
1 files changed, 43 insertions, 24 deletions
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index cafc7f99e124..4019013c6593 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h | |||
@@ -166,9 +166,11 @@ struct lc_element { | |||
166 | /* if we want to track a larger set of objects, | 166 | /* if we want to track a larger set of objects, |
167 | * it needs to become arch independend u64 */ | 167 | * it needs to become arch independend u64 */ |
168 | unsigned lc_number; | 168 | unsigned lc_number; |
169 | |||
170 | /* special label when on free list */ | 169 | /* special label when on free list */ |
171 | #define LC_FREE (~0U) | 170 | #define LC_FREE (~0U) |
171 | |||
172 | /* for pending changes */ | ||
173 | unsigned lc_new_number; | ||
172 | }; | 174 | }; |
173 | 175 | ||
174 | struct lru_cache { | 176 | struct lru_cache { |
@@ -176,6 +178,7 @@ struct lru_cache { | |||
176 | struct list_head lru; | 178 | struct list_head lru; |
177 | struct list_head free; | 179 | struct list_head free; |
178 | struct list_head in_use; | 180 | struct list_head in_use; |
181 | struct list_head to_be_changed; | ||
179 | 182 | ||
180 | /* the pre-created kmem cache to allocate the objects from */ | 183 | /* the pre-created kmem cache to allocate the objects from */ |
181 | struct kmem_cache *lc_cache; | 184 | struct kmem_cache *lc_cache; |
@@ -186,7 +189,7 @@ struct lru_cache { | |||
186 | size_t element_off; | 189 | size_t element_off; |
187 | 190 | ||
188 | /* number of elements (indices) */ | 191 | /* number of elements (indices) */ |
189 | unsigned int nr_elements; | 192 | unsigned int nr_elements; |
190 | /* Arbitrary limit on maximum tracked objects. Practical limit is much | 193 | /* Arbitrary limit on maximum tracked objects. Practical limit is much |
191 | * lower due to allocation failures, probably. For typical use cases, | 194 | * lower due to allocation failures, probably. For typical use cases, |
192 | * nr_elements should be a few thousand at most. | 195 | * nr_elements should be a few thousand at most. |
@@ -194,18 +197,19 @@ struct lru_cache { | |||
194 | * 8 high bits of .lc_index to be overloaded with flags in the future. */ | 197 | * 8 high bits of .lc_index to be overloaded with flags in the future. */ |
195 | #define LC_MAX_ACTIVE (1<<24) | 198 | #define LC_MAX_ACTIVE (1<<24) |
196 | 199 | ||
200 | /* allow to accumulate a few (index:label) changes, | ||
201 | * but no more than max_pending_changes */ | ||
202 | unsigned int max_pending_changes; | ||
203 | /* number of elements currently on to_be_changed list */ | ||
204 | unsigned int pending_changes; | ||
205 | |||
197 | /* statistics */ | 206 | /* statistics */ |
198 | unsigned used; /* number of lelements currently on in_use list */ | 207 | unsigned used; /* number of elements currently on in_use list */ |
199 | unsigned long hits, misses, starving, dirty, changed; | 208 | unsigned long hits, misses, starving, locked, changed; |
200 | 209 | ||
201 | /* see below: flag-bits for lru_cache */ | 210 | /* see below: flag-bits for lru_cache */ |
202 | unsigned long flags; | 211 | unsigned long flags; |
203 | 212 | ||
204 | /* when changing the label of an index element */ | ||
205 | unsigned int new_number; | ||
206 | |||
207 | /* for paranoia when changing the label of an index element */ | ||
208 | struct lc_element *changing_element; | ||
209 | 213 | ||
210 | void *lc_private; | 214 | void *lc_private; |
211 | const char *name; | 215 | const char *name; |
@@ -221,10 +225,15 @@ enum { | |||
221 | /* debugging aid, to catch concurrent access early. | 225 | /* debugging aid, to catch concurrent access early. |
222 | * user needs to guarantee exclusive access by proper locking! */ | 226 | * user needs to guarantee exclusive access by proper locking! */ |
223 | __LC_PARANOIA, | 227 | __LC_PARANOIA, |
224 | /* if we need to change the set, but currently there is a changing | 228 | |
225 | * transaction pending, we are "dirty", and must deferr further | 229 | /* annotate that the set is "dirty", possibly accumulating further |
226 | * changing requests */ | 230 | * changes, until a transaction is finally triggered */ |
227 | __LC_DIRTY, | 231 | __LC_DIRTY, |
232 | |||
233 | /* Locked, no further changes allowed. | ||
234 | * Also used to serialize changing transactions. */ | ||
235 | __LC_LOCKED, | ||
236 | |||
228 | /* if we need to change the set, but currently there is no free nor | 237 | /* if we need to change the set, but currently there is no free nor |
229 | * unused element available, we are "starving", and must not give out | 238 | * unused element available, we are "starving", and must not give out |
230 | * further references, to guarantee that eventually some refcnt will | 239 | * further references, to guarantee that eventually some refcnt will |
@@ -236,9 +245,11 @@ enum { | |||
236 | }; | 245 | }; |
237 | #define LC_PARANOIA (1<<__LC_PARANOIA) | 246 | #define LC_PARANOIA (1<<__LC_PARANOIA) |
238 | #define LC_DIRTY (1<<__LC_DIRTY) | 247 | #define LC_DIRTY (1<<__LC_DIRTY) |
248 | #define LC_LOCKED (1<<__LC_LOCKED) | ||
239 | #define LC_STARVING (1<<__LC_STARVING) | 249 | #define LC_STARVING (1<<__LC_STARVING) |
240 | 250 | ||
241 | extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, | 251 | extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, |
252 | unsigned max_pending_changes, | ||
242 | unsigned e_count, size_t e_size, size_t e_off); | 253 | unsigned e_count, size_t e_size, size_t e_off); |
243 | extern void lc_reset(struct lru_cache *lc); | 254 | extern void lc_reset(struct lru_cache *lc); |
244 | extern void lc_destroy(struct lru_cache *lc); | 255 | extern void lc_destroy(struct lru_cache *lc); |
@@ -249,7 +260,7 @@ extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); | |||
249 | extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); | 260 | extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); |
250 | extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); | 261 | extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); |
251 | extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); | 262 | extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); |
252 | extern void lc_changed(struct lru_cache *lc, struct lc_element *e); | 263 | extern void lc_committed(struct lru_cache *lc); |
253 | 264 | ||
254 | struct seq_file; | 265 | struct seq_file; |
255 | extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); | 266 | extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); |
@@ -258,32 +269,40 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char | |||
258 | void (*detail) (struct seq_file *, struct lc_element *)); | 269 | void (*detail) (struct seq_file *, struct lc_element *)); |
259 | 270 | ||
260 | /** | 271 | /** |
261 | * lc_try_lock - can be used to stop lc_get() from changing the tracked set | 272 | * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set |
262 | * @lc: the lru cache to operate on | 273 | * @lc: the lru cache to operate on |
263 | * | 274 | * |
264 | * Note that the reference counts and order on the active and lru lists may | 275 | * Allows (expects) the set to be "dirty". Note that the reference counts and |
265 | * still change. Returns true if we acquired the lock. | 276 | * order on the active and lru lists may still change. Used to serialize |
277 | * changing transactions. Returns true if we aquired the lock. | ||
266 | */ | 278 | */ |
267 | static inline int lc_try_lock(struct lru_cache *lc) | 279 | static inline int lc_try_lock_for_transaction(struct lru_cache *lc) |
268 | { | 280 | { |
269 | return !test_and_set_bit(__LC_DIRTY, &lc->flags); | 281 | return !test_and_set_bit(__LC_LOCKED, &lc->flags); |
270 | } | 282 | } |
271 | 283 | ||
272 | /** | 284 | /** |
285 | * lc_try_lock - variant to stop lc_get() from changing the tracked set | ||
286 | * @lc: the lru cache to operate on | ||
287 | * | ||
288 | * Note that the reference counts and order on the active and lru lists may | ||
289 | * still change. Only works on a "clean" set. Returns true if we aquired the | ||
290 | * lock, which means there are no pending changes, and any further attempt to | ||
291 | * change the set will not succeed until the next lc_unlock(). | ||
292 | */ | ||
293 | extern int lc_try_lock(struct lru_cache *lc); | ||
294 | |||
295 | /** | ||
273 | * lc_unlock - unlock @lc, allow lc_get() to change the set again | 296 | * lc_unlock - unlock @lc, allow lc_get() to change the set again |
274 | * @lc: the lru cache to operate on | 297 | * @lc: the lru cache to operate on |
275 | */ | 298 | */ |
276 | static inline void lc_unlock(struct lru_cache *lc) | 299 | static inline void lc_unlock(struct lru_cache *lc) |
277 | { | 300 | { |
278 | clear_bit(__LC_DIRTY, &lc->flags); | 301 | clear_bit(__LC_DIRTY, &lc->flags); |
279 | smp_mb__after_clear_bit(); | 302 | clear_bit_unlock(__LC_LOCKED, &lc->flags); |
280 | } | 303 | } |
281 | 304 | ||
282 | static inline int lc_is_used(struct lru_cache *lc, unsigned int enr) | 305 | extern bool lc_is_used(struct lru_cache *lc, unsigned int enr); |
283 | { | ||
284 | struct lc_element *e = lc_find(lc, enr); | ||
285 | return e && e->refcnt; | ||
286 | } | ||
287 | 306 | ||
288 | #define lc_entry(ptr, type, member) \ | 307 | #define lc_entry(ptr, type, member) \ |
289 | container_of(ptr, type, member) | 308 | container_of(ptr, type, member) |