aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fscache/stats.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2012-12-20 16:52:36 -0500
committerDavid Howells <dhowells@redhat.com>2012-12-20 17:04:07 -0500
commitef778e7ae67cd426c30cad43378b908f5eb0bad5 (patch)
tree4893f19487cb99e8ec0eb835ec4391d952641a9c /fs/fscache/stats.c
parent9f10523f891928330b7529da54c1a3cc65180b1a (diff)
FS-Cache: Provide proper invalidation
Provide a proper invalidation method rather than relying on the netfs retiring the cookie it has and getting a new one. The problem with this is that isn't easy for the netfs to make sure that it has completed/cancelled all its outstanding storage and retrieval operations on the cookie it is retiring. Instead, have the cache provide an invalidation method that will cancel or wait for all currently outstanding operations before invalidating the cache, and will cause new operations to queue up behind that. Whilst invalidation is in progress, some requests will be rejected until the cache can stack a barrier on the operation queue to cause new operations to be deferred behind it. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs/fscache/stats.c')
-rw-r--r--fs/fscache/stats.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 4765190d537f..51cdaee14109 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -80,6 +80,9 @@ atomic_t fscache_n_acquires_ok;
80atomic_t fscache_n_acquires_nobufs; 80atomic_t fscache_n_acquires_nobufs;
81atomic_t fscache_n_acquires_oom; 81atomic_t fscache_n_acquires_oom;
82 82
83atomic_t fscache_n_invalidates;
84atomic_t fscache_n_invalidates_run;
85
83atomic_t fscache_n_updates; 86atomic_t fscache_n_updates;
84atomic_t fscache_n_updates_null; 87atomic_t fscache_n_updates_null;
85atomic_t fscache_n_updates_run; 88atomic_t fscache_n_updates_run;
@@ -112,6 +115,7 @@ atomic_t fscache_n_cop_alloc_object;
112atomic_t fscache_n_cop_lookup_object; 115atomic_t fscache_n_cop_lookup_object;
113atomic_t fscache_n_cop_lookup_complete; 116atomic_t fscache_n_cop_lookup_complete;
114atomic_t fscache_n_cop_grab_object; 117atomic_t fscache_n_cop_grab_object;
118atomic_t fscache_n_cop_invalidate_object;
115atomic_t fscache_n_cop_update_object; 119atomic_t fscache_n_cop_update_object;
116atomic_t fscache_n_cop_drop_object; 120atomic_t fscache_n_cop_drop_object;
117atomic_t fscache_n_cop_put_object; 121atomic_t fscache_n_cop_put_object;
@@ -168,6 +172,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
168 atomic_read(&fscache_n_object_created), 172 atomic_read(&fscache_n_object_created),
169 atomic_read(&fscache_n_object_lookups_timed_out)); 173 atomic_read(&fscache_n_object_lookups_timed_out));
170 174
175 seq_printf(m, "Invals : n=%u run=%u\n",
176 atomic_read(&fscache_n_invalidates),
177 atomic_read(&fscache_n_invalidates_run));
178
171 seq_printf(m, "Updates: n=%u nul=%u run=%u\n", 179 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
172 atomic_read(&fscache_n_updates), 180 atomic_read(&fscache_n_updates),
173 atomic_read(&fscache_n_updates_null), 181 atomic_read(&fscache_n_updates_null),
@@ -246,7 +254,8 @@ static int fscache_stats_show(struct seq_file *m, void *v)
246 atomic_read(&fscache_n_cop_lookup_object), 254 atomic_read(&fscache_n_cop_lookup_object),
247 atomic_read(&fscache_n_cop_lookup_complete), 255 atomic_read(&fscache_n_cop_lookup_complete),
248 atomic_read(&fscache_n_cop_grab_object)); 256 atomic_read(&fscache_n_cop_grab_object));
249 seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n", 257 seq_printf(m, "CacheOp: inv=%d upo=%d dro=%d pto=%d atc=%d syn=%d\n",
258 atomic_read(&fscache_n_cop_invalidate_object),
250 atomic_read(&fscache_n_cop_update_object), 259 atomic_read(&fscache_n_cop_update_object),
251 atomic_read(&fscache_n_cop_drop_object), 260 atomic_read(&fscache_n_cop_drop_object),
252 atomic_read(&fscache_n_cop_put_object), 261 atomic_read(&fscache_n_cop_put_object),