diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/fscache-cache.h | 40 | ||||
| -rw-r--r-- | include/linux/fscache.h | 27 | ||||
| -rw-r--r-- | include/linux/slow-work.h | 72 |
3 files changed, 135 insertions, 4 deletions
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 84d3532dd3ea..7be0c6fbe880 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
| @@ -91,6 +91,8 @@ struct fscache_operation { | |||
| 91 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ | 91 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ |
| 92 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ | 92 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ |
| 93 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ | 93 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ |
| 94 | #define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */ | ||
| 95 | #define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */ | ||
| 94 | 96 | ||
| 95 | atomic_t usage; | 97 | atomic_t usage; |
| 96 | unsigned debug_id; /* debugging ID */ | 98 | unsigned debug_id; /* debugging ID */ |
| @@ -102,6 +104,16 @@ struct fscache_operation { | |||
| 102 | 104 | ||
| 103 | /* operation releaser */ | 105 | /* operation releaser */ |
| 104 | fscache_operation_release_t release; | 106 | fscache_operation_release_t release; |
| 107 | |||
| 108 | #ifdef CONFIG_SLOW_WORK_PROC | ||
| 109 | const char *name; /* operation name */ | ||
| 110 | const char *state; /* operation state */ | ||
| 111 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) | ||
| 112 | #define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0) | ||
| 113 | #else | ||
| 114 | #define fscache_set_op_name(OP, N) do { } while(0) | ||
| 115 | #define fscache_set_op_state(OP, S) do { } while(0) | ||
| 116 | #endif | ||
| 105 | }; | 117 | }; |
| 106 | 118 | ||
| 107 | extern atomic_t fscache_op_debug_id; | 119 | extern atomic_t fscache_op_debug_id; |
| @@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, | |||
| 125 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | 137 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); |
| 126 | op->release = release; | 138 | op->release = release; |
| 127 | INIT_LIST_HEAD(&op->pend_link); | 139 | INIT_LIST_HEAD(&op->pend_link); |
| 140 | fscache_set_op_state(op, "Init"); | ||
| 128 | } | 141 | } |
| 129 | 142 | ||
| 130 | /** | 143 | /** |
| @@ -221,8 +234,10 @@ struct fscache_cache_ops { | |||
| 221 | struct fscache_object *(*alloc_object)(struct fscache_cache *cache, | 234 | struct fscache_object *(*alloc_object)(struct fscache_cache *cache, |
| 222 | struct fscache_cookie *cookie); | 235 | struct fscache_cookie *cookie); |
| 223 | 236 | ||
| 224 | /* look up the object for a cookie */ | 237 | /* look up the object for a cookie |
| 225 | void (*lookup_object)(struct fscache_object *object); | 238 | * - return -ETIMEDOUT to be requeued |
| 239 | */ | ||
| 240 | int (*lookup_object)(struct fscache_object *object); | ||
| 226 | 241 | ||
| 227 | /* finished looking up */ | 242 | /* finished looking up */ |
| 228 | void (*lookup_complete)(struct fscache_object *object); | 243 | void (*lookup_complete)(struct fscache_object *object); |
| @@ -297,12 +312,14 @@ struct fscache_cookie { | |||
| 297 | atomic_t usage; /* number of users of this cookie */ | 312 | atomic_t usage; /* number of users of this cookie */ |
| 298 | atomic_t n_children; /* number of children of this cookie */ | 313 | atomic_t n_children; /* number of children of this cookie */ |
| 299 | spinlock_t lock; | 314 | spinlock_t lock; |
| 315 | spinlock_t stores_lock; /* lock on page store tree */ | ||
| 300 | struct hlist_head backing_objects; /* object(s) backing this file/index */ | 316 | struct hlist_head backing_objects; /* object(s) backing this file/index */ |
| 301 | const struct fscache_cookie_def *def; /* definition */ | 317 | const struct fscache_cookie_def *def; /* definition */ |
| 302 | struct fscache_cookie *parent; /* parent of this entry */ | 318 | struct fscache_cookie *parent; /* parent of this entry */ |
| 303 | void *netfs_data; /* back pointer to netfs */ | 319 | void *netfs_data; /* back pointer to netfs */ |
| 304 | struct radix_tree_root stores; /* pages to be stored on this cookie */ | 320 | struct radix_tree_root stores; /* pages to be stored on this cookie */ |
| 305 | #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ | 321 | #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ |
| 322 | #define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */ | ||
| 306 | 323 | ||
| 307 | unsigned long flags; | 324 | unsigned long flags; |
| 308 | #define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ | 325 | #define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ |
| @@ -337,6 +354,7 @@ struct fscache_object { | |||
| 337 | FSCACHE_OBJECT_RECYCLING, /* retiring object */ | 354 | FSCACHE_OBJECT_RECYCLING, /* retiring object */ |
| 338 | FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ | 355 | FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ |
| 339 | FSCACHE_OBJECT_DEAD, /* object is now dead */ | 356 | FSCACHE_OBJECT_DEAD, /* object is now dead */ |
| 357 | FSCACHE_OBJECT__NSTATES | ||
| 340 | } state; | 358 | } state; |
| 341 | 359 | ||
| 342 | int debug_id; /* debugging ID */ | 360 | int debug_id; /* debugging ID */ |
| @@ -345,6 +363,7 @@ struct fscache_object { | |||
| 345 | int n_obj_ops; /* number of object ops outstanding on object */ | 363 | int n_obj_ops; /* number of object ops outstanding on object */ |
| 346 | int n_in_progress; /* number of ops in progress */ | 364 | int n_in_progress; /* number of ops in progress */ |
| 347 | int n_exclusive; /* number of exclusive ops queued */ | 365 | int n_exclusive; /* number of exclusive ops queued */ |
| 366 | atomic_t n_reads; /* number of read ops in progress */ | ||
| 348 | spinlock_t lock; /* state and operations lock */ | 367 | spinlock_t lock; /* state and operations lock */ |
| 349 | 368 | ||
| 350 | unsigned long lookup_jif; /* time at which lookup started */ | 369 | unsigned long lookup_jif; /* time at which lookup started */ |
| @@ -358,6 +377,7 @@ struct fscache_object { | |||
| 358 | #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ | 377 | #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ |
| 359 | #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ | 378 | #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ |
| 360 | #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ | 379 | #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ |
| 380 | #define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/ | ||
| 361 | 381 | ||
| 362 | unsigned long flags; | 382 | unsigned long flags; |
| 363 | #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ | 383 | #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ |
| @@ -373,7 +393,11 @@ struct fscache_object { | |||
| 373 | struct list_head dependents; /* FIFO of dependent objects */ | 393 | struct list_head dependents; /* FIFO of dependent objects */ |
| 374 | struct list_head dep_link; /* link in parent's dependents list */ | 394 | struct list_head dep_link; /* link in parent's dependents list */ |
| 375 | struct list_head pending_ops; /* unstarted operations on this object */ | 395 | struct list_head pending_ops; /* unstarted operations on this object */ |
| 396 | #ifdef CONFIG_FSCACHE_OBJECT_LIST | ||
| 397 | struct rb_node objlist_link; /* link in global object list */ | ||
| 398 | #endif | ||
| 376 | pgoff_t store_limit; /* current storage limit */ | 399 | pgoff_t store_limit; /* current storage limit */ |
| 400 | loff_t store_limit_l; /* current storage limit */ | ||
| 377 | }; | 401 | }; |
| 378 | 402 | ||
| 379 | extern const char *fscache_object_states[]; | 403 | extern const char *fscache_object_states[]; |
| @@ -383,6 +407,10 @@ extern const char *fscache_object_states[]; | |||
| 383 | (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ | 407 | (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ |
| 384 | (obj)->state < FSCACHE_OBJECT_DYING) | 408 | (obj)->state < FSCACHE_OBJECT_DYING) |
| 385 | 409 | ||
| 410 | #define fscache_object_is_dead(obj) \ | ||
| 411 | (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ | ||
| 412 | (obj)->state >= FSCACHE_OBJECT_DYING) | ||
| 413 | |||
| 386 | extern const struct slow_work_ops fscache_object_slow_work_ops; | 414 | extern const struct slow_work_ops fscache_object_slow_work_ops; |
| 387 | 415 | ||
| 388 | /** | 416 | /** |
| @@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object, | |||
| 414 | object->events = object->event_mask = 0; | 442 | object->events = object->event_mask = 0; |
| 415 | object->flags = 0; | 443 | object->flags = 0; |
| 416 | object->store_limit = 0; | 444 | object->store_limit = 0; |
| 445 | object->store_limit_l = 0; | ||
| 417 | object->cache = cache; | 446 | object->cache = cache; |
| 418 | object->cookie = cookie; | 447 | object->cookie = cookie; |
| 419 | object->parent = NULL; | 448 | object->parent = NULL; |
| @@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object, | |||
| 422 | extern void fscache_object_lookup_negative(struct fscache_object *object); | 451 | extern void fscache_object_lookup_negative(struct fscache_object *object); |
| 423 | extern void fscache_obtained_object(struct fscache_object *object); | 452 | extern void fscache_obtained_object(struct fscache_object *object); |
| 424 | 453 | ||
| 454 | #ifdef CONFIG_FSCACHE_OBJECT_LIST | ||
| 455 | extern void fscache_object_destroy(struct fscache_object *object); | ||
| 456 | #else | ||
| 457 | #define fscache_object_destroy(object) do {} while(0) | ||
| 458 | #endif | ||
| 459 | |||
| 425 | /** | 460 | /** |
| 426 | * fscache_object_destroyed - Note destruction of an object in a cache | 461 | * fscache_object_destroyed - Note destruction of an object in a cache |
| 427 | * @cache: The cache from which the object came | 462 | * @cache: The cache from which the object came |
| @@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object) | |||
| 460 | static inline | 495 | static inline |
| 461 | void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) | 496 | void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) |
| 462 | { | 497 | { |
| 498 | object->store_limit_l = i_size; | ||
| 463 | object->store_limit = i_size >> PAGE_SHIFT; | 499 | object->store_limit = i_size >> PAGE_SHIFT; |
| 464 | if (i_size & ~PAGE_MASK) | 500 | if (i_size & ~PAGE_MASK) |
| 465 | object->store_limit++; | 501 | object->store_limit++; |
diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 6d8ee466e0a0..595ce49288b7 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h | |||
| @@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); | |||
| 202 | extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); | 202 | extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); |
| 203 | extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); | 203 | extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); |
| 204 | extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); | 204 | extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); |
| 205 | extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *, | ||
| 206 | gfp_t); | ||
| 205 | 207 | ||
| 206 | /** | 208 | /** |
| 207 | * fscache_register_netfs - Register a filesystem as desiring caching services | 209 | * fscache_register_netfs - Register a filesystem as desiring caching services |
| @@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie, | |||
| 615 | __fscache_wait_on_page_write(cookie, page); | 617 | __fscache_wait_on_page_write(cookie, page); |
| 616 | } | 618 | } |
| 617 | 619 | ||
| 620 | /** | ||
| 621 | * fscache_maybe_release_page - Consider releasing a page, cancelling a store | ||
| 622 | * @cookie: The cookie representing the cache object | ||
| 623 | * @page: The netfs page that is being cached. | ||
| 624 | * @gfp: The gfp flags passed to releasepage() | ||
| 625 | * | ||
| 626 | * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's | ||
| 627 | * releasepage() call. A storage request on the page may cancelled if it is | ||
| 628 | * not currently being processed. | ||
| 629 | * | ||
| 630 | * The function returns true if the page no longer has a storage request on it, | ||
| 631 | * and false if a storage request is left in place. If true is returned, the | ||
| 632 | * page will have been passed to fscache_uncache_page(). If false is returned | ||
| 633 | * the page cannot be freed yet. | ||
| 634 | */ | ||
| 635 | static inline | ||
| 636 | bool fscache_maybe_release_page(struct fscache_cookie *cookie, | ||
| 637 | struct page *page, | ||
| 638 | gfp_t gfp) | ||
| 639 | { | ||
| 640 | if (fscache_cookie_valid(cookie) && PageFsCache(page)) | ||
| 641 | return __fscache_maybe_release_page(cookie, page, gfp); | ||
| 642 | return false; | ||
| 643 | } | ||
| 644 | |||
| 618 | #endif /* _LINUX_FSCACHE_H */ | 645 | #endif /* _LINUX_FSCACHE_H */ |
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index b65c8881f07a..5035a2691739 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h | |||
| @@ -17,13 +17,20 @@ | |||
| 17 | #ifdef CONFIG_SLOW_WORK | 17 | #ifdef CONFIG_SLOW_WORK |
| 18 | 18 | ||
| 19 | #include <linux/sysctl.h> | 19 | #include <linux/sysctl.h> |
| 20 | #include <linux/timer.h> | ||
| 20 | 21 | ||
| 21 | struct slow_work; | 22 | struct slow_work; |
| 23 | #ifdef CONFIG_SLOW_WORK_PROC | ||
| 24 | struct seq_file; | ||
| 25 | #endif | ||
| 22 | 26 | ||
| 23 | /* | 27 | /* |
| 24 | * The operations used to support slow work items | 28 | * The operations used to support slow work items |
| 25 | */ | 29 | */ |
| 26 | struct slow_work_ops { | 30 | struct slow_work_ops { |
| 31 | /* owner */ | ||
| 32 | struct module *owner; | ||
| 33 | |||
| 27 | /* get a ref on a work item | 34 | /* get a ref on a work item |
| 28 | * - return 0 if successful, -ve if not | 35 | * - return 0 if successful, -ve if not |
| 29 | */ | 36 | */ |
| @@ -34,6 +41,11 @@ struct slow_work_ops { | |||
| 34 | 41 | ||
| 35 | /* execute a work item */ | 42 | /* execute a work item */ |
| 36 | void (*execute)(struct slow_work *work); | 43 | void (*execute)(struct slow_work *work); |
| 44 | |||
| 45 | #ifdef CONFIG_SLOW_WORK_PROC | ||
| 46 | /* describe a work item for /proc */ | ||
| 47 | void (*desc)(struct slow_work *work, struct seq_file *m); | ||
| 48 | #endif | ||
| 37 | }; | 49 | }; |
| 38 | 50 | ||
| 39 | /* | 51 | /* |
| @@ -42,13 +54,24 @@ struct slow_work_ops { | |||
| 42 | * queued | 54 | * queued |
| 43 | */ | 55 | */ |
| 44 | struct slow_work { | 56 | struct slow_work { |
| 57 | struct module *owner; /* the owning module */ | ||
| 45 | unsigned long flags; | 58 | unsigned long flags; |
| 46 | #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ | 59 | #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ |
| 47 | #define SLOW_WORK_EXECUTING 1 /* item currently executing */ | 60 | #define SLOW_WORK_EXECUTING 1 /* item currently executing */ |
| 48 | #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ | 61 | #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ |
| 49 | #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ | 62 | #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ |
| 63 | #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ | ||
| 64 | #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ | ||
| 50 | const struct slow_work_ops *ops; /* operations table for this item */ | 65 | const struct slow_work_ops *ops; /* operations table for this item */ |
| 51 | struct list_head link; /* link in queue */ | 66 | struct list_head link; /* link in queue */ |
| 67 | #ifdef CONFIG_SLOW_WORK_PROC | ||
| 68 | struct timespec mark; /* jiffies at which queued or exec begun */ | ||
| 69 | #endif | ||
| 70 | }; | ||
| 71 | |||
| 72 | struct delayed_slow_work { | ||
| 73 | struct slow_work work; | ||
| 74 | struct timer_list timer; | ||
| 52 | }; | 75 | }; |
| 53 | 76 | ||
| 54 | /** | 77 | /** |
| @@ -67,6 +90,20 @@ static inline void slow_work_init(struct slow_work *work, | |||
| 67 | } | 90 | } |
| 68 | 91 | ||
| 69 | /** | 92 | /** |
| 93 | * slow_work_init - Initialise a delayed slow work item | ||
| 94 | * @work: The work item to initialise | ||
| 95 | * @ops: The operations to use to handle the slow work item | ||
| 96 | * | ||
| 97 | * Initialise a delayed slow work item. | ||
| 98 | */ | ||
| 99 | static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, | ||
| 100 | const struct slow_work_ops *ops) | ||
| 101 | { | ||
| 102 | init_timer(&dwork->timer); | ||
| 103 | slow_work_init(&dwork->work, ops); | ||
| 104 | } | ||
| 105 | |||
| 106 | /** | ||
| 70 | * vslow_work_init - Initialise a very slow work item | 107 | * vslow_work_init - Initialise a very slow work item |
| 71 | * @work: The work item to initialise | 108 | * @work: The work item to initialise |
| 72 | * @ops: The operations to use to handle the slow work item | 109 | * @ops: The operations to use to handle the slow work item |
| @@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work, | |||
| 83 | INIT_LIST_HEAD(&work->link); | 120 | INIT_LIST_HEAD(&work->link); |
| 84 | } | 121 | } |
| 85 | 122 | ||
| 123 | /** | ||
| 124 | * slow_work_is_queued - Determine if a slow work item is on the work queue | ||
| 125 | * work: The work item to test | ||
| 126 | * | ||
| 127 | * Determine if the specified slow-work item is on the work queue. This | ||
| 128 | * returns true if it is actually on the queue. | ||
| 129 | * | ||
| 130 | * If the item is executing and has been marked for requeue when execution | ||
| 131 | * finishes, then false will be returned. | ||
| 132 | * | ||
| 133 | * Anyone wishing to wait for completion of execution can wait on the | ||
| 134 | * SLOW_WORK_EXECUTING bit. | ||
| 135 | */ | ||
| 136 | static inline bool slow_work_is_queued(struct slow_work *work) | ||
| 137 | { | ||
| 138 | unsigned long flags = work->flags; | ||
| 139 | return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING); | ||
| 140 | } | ||
| 141 | |||
| 86 | extern int slow_work_enqueue(struct slow_work *work); | 142 | extern int slow_work_enqueue(struct slow_work *work); |
| 87 | extern int slow_work_register_user(void); | 143 | extern void slow_work_cancel(struct slow_work *work); |
| 88 | extern void slow_work_unregister_user(void); | 144 | extern int slow_work_register_user(struct module *owner); |
| 145 | extern void slow_work_unregister_user(struct module *owner); | ||
| 146 | |||
| 147 | extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | ||
| 148 | unsigned long delay); | ||
| 149 | |||
| 150 | static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) | ||
| 151 | { | ||
| 152 | slow_work_cancel(&dwork->work); | ||
| 153 | } | ||
| 154 | |||
| 155 | extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, | ||
| 156 | signed long *_timeout); | ||
| 89 | 157 | ||
| 90 | #ifdef CONFIG_SYSCTL | 158 | #ifdef CONFIG_SYSCTL |
| 91 | extern ctl_table slow_work_sysctls[]; | 159 | extern ctl_table slow_work_sysctls[]; |
