diff options
author | Sage Weil <sage@newdream.net> | 2010-02-09 18:24:44 -0500 |
---|---|---|
committer | Sage Weil <sage@newdream.net> | 2010-02-11 14:48:54 -0500 |
commit | 3c6f6b79a64db7f1c7abf09d693db3b0066784fb (patch) | |
tree | e17b4cd848212b0556180331280044dd1526b742 /fs/ceph | |
parent | 6a026589ba333185c466c906376fe022a27a53f9 (diff) |
ceph: cleanup async writeback, truncation, invalidate helpers
Grab inode ref in helper. Make work functions static, with consistent
naming.
Signed-off-by: Sage Weil <sage@newdream.net>
Diffstat (limited to 'fs/ceph')
-rw-r--r-- | fs/ceph/addr.c | 3 | ||||
-rw-r--r-- | fs/ceph/caps.c | 25 | ||||
-rw-r--r-- | fs/ceph/inode.c | 61 | ||||
-rw-r--r-- | fs/ceph/super.h | 19 |
4 files changed, 65 insertions, 43 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 89c5ff3b59d5..71f5ad1c1e26 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -947,8 +947,7 @@ retry_locked: | |||
947 | */ | 947 | */ |
948 | snapc = ceph_get_snap_context((void *)page->private); | 948 | snapc = ceph_get_snap_context((void *)page->private); |
949 | unlock_page(page); | 949 | unlock_page(page); |
950 | if (ceph_queue_writeback(inode)) | 950 | ceph_queue_writeback(inode); |
951 | igrab(inode); | ||
952 | wait_event_interruptible(ci->i_cap_wq, | 951 | wait_event_interruptible(ci->i_cap_wq, |
953 | context_is_writeable_or_written(inode, snapc)); | 952 | context_is_writeable_or_written(inode, snapc)); |
954 | ceph_put_snap_context(snapc); | 953 | ceph_put_snap_context(snapc); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 7f4841cd3a2b..68ee78109224 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -1602,8 +1602,7 @@ ack: | |||
1602 | spin_unlock(&inode->i_lock); | 1602 | spin_unlock(&inode->i_lock); |
1603 | 1603 | ||
1604 | if (queue_invalidate) | 1604 | if (queue_invalidate) |
1605 | if (ceph_queue_page_invalidation(inode)) | 1605 | ceph_queue_invalidate(inode); |
1606 | igrab(inode); | ||
1607 | 1606 | ||
1608 | if (session && drop_session_lock) | 1607 | if (session && drop_session_lock) |
1609 | mutex_unlock(&session->s_mutex); | 1608 | mutex_unlock(&session->s_mutex); |
@@ -2178,7 +2177,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2178 | int wake = 0; | 2177 | int wake = 0; |
2179 | int writeback = 0; | 2178 | int writeback = 0; |
2180 | int revoked_rdcache = 0; | 2179 | int revoked_rdcache = 0; |
2181 | int invalidate_async = 0; | 2180 | int queue_invalidate = 0; |
2182 | int tried_invalidate = 0; | 2181 | int tried_invalidate = 0; |
2183 | int ret; | 2182 | int ret; |
2184 | 2183 | ||
@@ -2205,7 +2204,7 @@ restart: | |||
2205 | /* there were locked pages.. invalidate later | 2204 | /* there were locked pages.. invalidate later |
2206 | in a separate thread. */ | 2205 | in a separate thread. */ |
2207 | if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { | 2206 | if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { |
2208 | invalidate_async = 1; | 2207 | queue_invalidate = 1; |
2209 | ci->i_rdcache_revoking = ci->i_rdcache_gen; | 2208 | ci->i_rdcache_revoking = ci->i_rdcache_gen; |
2210 | } | 2209 | } |
2211 | } else { | 2210 | } else { |
@@ -2319,21 +2318,15 @@ restart: | |||
2319 | } | 2318 | } |
2320 | 2319 | ||
2321 | spin_unlock(&inode->i_lock); | 2320 | spin_unlock(&inode->i_lock); |
2322 | if (writeback) { | 2321 | if (writeback) |
2323 | /* | 2322 | /* |
2324 | * queue inode for writeback: we can't actually call | 2323 | * queue inode for writeback: we can't actually call |
2325 | * filemap_write_and_wait, etc. from message handler | 2324 | * filemap_write_and_wait, etc. from message handler |
2326 | * context. | 2325 | * context. |
2327 | */ | 2326 | */ |
2328 | dout("queueing %p for writeback\n", inode); | 2327 | ceph_queue_writeback(inode); |
2329 | if (ceph_queue_writeback(inode)) | 2328 | if (queue_invalidate) |
2330 | igrab(inode); | 2329 | ceph_queue_invalidate(inode); |
2331 | } | ||
2332 | if (invalidate_async) { | ||
2333 | dout("queueing %p for page invalidation\n", inode); | ||
2334 | if (ceph_queue_page_invalidation(inode)) | ||
2335 | igrab(inode); | ||
2336 | } | ||
2337 | if (wake) | 2330 | if (wake) |
2338 | wake_up(&ci->i_cap_wq); | 2331 | wake_up(&ci->i_cap_wq); |
2339 | return reply; | 2332 | return reply; |
@@ -2479,9 +2472,7 @@ static void handle_cap_trunc(struct inode *inode, | |||
2479 | spin_unlock(&inode->i_lock); | 2472 | spin_unlock(&inode->i_lock); |
2480 | 2473 | ||
2481 | if (queue_trunc) | 2474 | if (queue_trunc) |
2482 | if (queue_work(ceph_client(inode->i_sb)->trunc_wq, | 2475 | ceph_queue_vmtruncate(inode); |
2483 | &ci->i_vmtruncate_work)) | ||
2484 | igrab(inode); | ||
2485 | } | 2476 | } |
2486 | 2477 | ||
2487 | /* | 2478 | /* |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index af85f2de2f7c..58bdff09c2c1 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -28,7 +28,9 @@ | |||
28 | 28 | ||
29 | static const struct inode_operations ceph_symlink_iops; | 29 | static const struct inode_operations ceph_symlink_iops; |
30 | 30 | ||
31 | static void ceph_inode_invalidate_pages(struct work_struct *work); | 31 | static void ceph_invalidate_work(struct work_struct *work); |
32 | static void ceph_writeback_work(struct work_struct *work); | ||
33 | static void ceph_vmtruncate_work(struct work_struct *work); | ||
32 | 34 | ||
33 | /* | 35 | /* |
34 | * find or create an inode, given the ceph ino number | 36 | * find or create an inode, given the ceph ino number |
@@ -357,8 +359,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb) | |||
357 | INIT_LIST_HEAD(&ci->i_snap_realm_item); | 359 | INIT_LIST_HEAD(&ci->i_snap_realm_item); |
358 | INIT_LIST_HEAD(&ci->i_snap_flush_item); | 360 | INIT_LIST_HEAD(&ci->i_snap_flush_item); |
359 | 361 | ||
360 | INIT_WORK(&ci->i_wb_work, ceph_inode_writeback); | 362 | INIT_WORK(&ci->i_wb_work, ceph_writeback_work); |
361 | INIT_WORK(&ci->i_pg_inv_work, ceph_inode_invalidate_pages); | 363 | INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); |
362 | 364 | ||
363 | INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); | 365 | INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); |
364 | 366 | ||
@@ -675,9 +677,7 @@ no_change: | |||
675 | 677 | ||
676 | /* queue truncate if we saw i_size decrease */ | 678 | /* queue truncate if we saw i_size decrease */ |
677 | if (queue_trunc) | 679 | if (queue_trunc) |
678 | if (queue_work(ceph_client(inode->i_sb)->trunc_wq, | 680 | ceph_queue_vmtruncate(inode); |
679 | &ci->i_vmtruncate_work)) | ||
680 | igrab(inode); | ||
681 | 681 | ||
682 | /* populate frag tree */ | 682 | /* populate frag tree */ |
683 | /* FIXME: move me up, if/when version reflects fragtree changes */ | 683 | /* FIXME: move me up, if/when version reflects fragtree changes */ |
@@ -1243,7 +1243,18 @@ int ceph_inode_set_size(struct inode *inode, loff_t size) | |||
1243 | * Write back inode data in a worker thread. (This can't be done | 1243 | * Write back inode data in a worker thread. (This can't be done |
1244 | * in the message handler context.) | 1244 | * in the message handler context.) |
1245 | */ | 1245 | */ |
1246 | void ceph_inode_writeback(struct work_struct *work) | 1246 | void ceph_queue_writeback(struct inode *inode) |
1247 | { | ||
1248 | if (queue_work(ceph_inode_to_client(inode)->wb_wq, | ||
1249 | &ceph_inode(inode)->i_wb_work)) { | ||
1250 | dout("ceph_queue_invalidate %p\n", inode); | ||
1251 | igrab(inode); | ||
1252 | } else { | ||
1253 | dout("ceph_queue_invalidate %p failed\n", inode); | ||
1254 | } | ||
1255 | } | ||
1256 | |||
1257 | static void ceph_writeback_work(struct work_struct *work) | ||
1247 | { | 1258 | { |
1248 | struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, | 1259 | struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, |
1249 | i_wb_work); | 1260 | i_wb_work); |
@@ -1255,10 +1266,24 @@ void ceph_inode_writeback(struct work_struct *work) | |||
1255 | } | 1266 | } |
1256 | 1267 | ||
1257 | /* | 1268 | /* |
1269 | * queue an async invalidation | ||
1270 | */ | ||
1271 | void ceph_queue_invalidate(struct inode *inode) | ||
1272 | { | ||
1273 | if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, | ||
1274 | &ceph_inode(inode)->i_pg_inv_work)) { | ||
1275 | dout("ceph_queue_invalidate %p\n", inode); | ||
1276 | igrab(inode); | ||
1277 | } else { | ||
1278 | dout("ceph_queue_invalidate %p failed\n", inode); | ||
1279 | } | ||
1280 | } | ||
1281 | |||
1282 | /* | ||
1258 | * Invalidate inode pages in a worker thread. (This can't be done | 1283 | * Invalidate inode pages in a worker thread. (This can't be done |
1259 | * in the message handler context.) | 1284 | * in the message handler context.) |
1260 | */ | 1285 | */ |
1261 | static void ceph_inode_invalidate_pages(struct work_struct *work) | 1286 | static void ceph_invalidate_work(struct work_struct *work) |
1262 | { | 1287 | { |
1263 | struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, | 1288 | struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, |
1264 | i_pg_inv_work); | 1289 | i_pg_inv_work); |
@@ -1307,7 +1332,7 @@ out: | |||
1307 | * | 1332 | * |
1308 | * We also truncate in a separate thread as well. | 1333 | * We also truncate in a separate thread as well. |
1309 | */ | 1334 | */ |
1310 | void ceph_vmtruncate_work(struct work_struct *work) | 1335 | static void ceph_vmtruncate_work(struct work_struct *work) |
1311 | { | 1336 | { |
1312 | struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, | 1337 | struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, |
1313 | i_vmtruncate_work); | 1338 | i_vmtruncate_work); |
@@ -1321,6 +1346,24 @@ void ceph_vmtruncate_work(struct work_struct *work) | |||
1321 | } | 1346 | } |
1322 | 1347 | ||
1323 | /* | 1348 | /* |
1349 | * Queue an async vmtruncate. If we fail to queue work, we will handle | ||
1350 | * the truncation the next time we call __ceph_do_pending_vmtruncate. | ||
1351 | */ | ||
1352 | void ceph_queue_vmtruncate(struct inode *inode) | ||
1353 | { | ||
1354 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
1355 | |||
1356 | if (queue_work(ceph_client(inode->i_sb)->trunc_wq, | ||
1357 | &ci->i_vmtruncate_work)) { | ||
1358 | dout("ceph_queue_vmtruncate %p\n", inode); | ||
1359 | igrab(inode); | ||
1360 | } else { | ||
1361 | dout("ceph_queue_vmtruncate %p failed, pending=%d\n", | ||
1362 | inode, ci->i_truncate_pending); | ||
1363 | } | ||
1364 | } | ||
1365 | |||
1366 | /* | ||
1324 | * called with i_mutex held. | 1367 | * called with i_mutex held. |
1325 | * | 1368 | * |
1326 | * Make sure any pending truncation is applied before doing anything | 1369 | * Make sure any pending truncation is applied before doing anything |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 3930fb685f0b..b2adfccbab98 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -573,18 +573,6 @@ static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb) | |||
573 | return (struct ceph_client *)sb->s_fs_info; | 573 | return (struct ceph_client *)sb->s_fs_info; |
574 | } | 574 | } |
575 | 575 | ||
576 | static inline int ceph_queue_writeback(struct inode *inode) | ||
577 | { | ||
578 | return queue_work(ceph_inode_to_client(inode)->wb_wq, | ||
579 | &ceph_inode(inode)->i_wb_work); | ||
580 | } | ||
581 | |||
582 | static inline int ceph_queue_page_invalidation(struct inode *inode) | ||
583 | { | ||
584 | return queue_work(ceph_inode_to_client(inode)->pg_inv_wq, | ||
585 | &ceph_inode(inode)->i_pg_inv_work); | ||
586 | } | ||
587 | |||
588 | 576 | ||
589 | /* | 577 | /* |
590 | * we keep buffered readdir results attached to file->private_data | 578 | * we keep buffered readdir results attached to file->private_data |
@@ -772,10 +760,11 @@ extern int ceph_readdir_prepopulate(struct ceph_mds_request *req, | |||
772 | extern int ceph_inode_holds_cap(struct inode *inode, int mask); | 760 | extern int ceph_inode_holds_cap(struct inode *inode, int mask); |
773 | 761 | ||
774 | extern int ceph_inode_set_size(struct inode *inode, loff_t size); | 762 | extern int ceph_inode_set_size(struct inode *inode, loff_t size); |
775 | extern void ceph_inode_writeback(struct work_struct *work); | ||
776 | extern void ceph_vmtruncate_work(struct work_struct *work); | ||
777 | extern void __ceph_do_pending_vmtruncate(struct inode *inode); | 763 | extern void __ceph_do_pending_vmtruncate(struct inode *inode); |
778 | extern void __ceph_queue_vmtruncate(struct inode *inode); | 764 | extern void ceph_queue_vmtruncate(struct inode *inode); |
765 | |||
766 | extern void ceph_queue_invalidate(struct inode *inode); | ||
767 | extern void ceph_queue_writeback(struct inode *inode); | ||
779 | 768 | ||
780 | extern int ceph_do_getattr(struct inode *inode, int mask); | 769 | extern int ceph_do_getattr(struct inode *inode, int mask); |
781 | extern int ceph_permission(struct inode *inode, int mask); | 770 | extern int ceph_permission(struct inode *inode, int mask); |