aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ceph/addr.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ceph/addr.c')
-rw-r--r--fs/ceph/addr.c44
1 files changed, 19 insertions, 25 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index a9005d862ed4..efbc604001c8 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
87 87
88 /* dirty the head */ 88 /* dirty the head */
89 spin_lock(&inode->i_lock); 89 spin_lock(&inode->i_lock);
90 if (ci->i_wrbuffer_ref_head == 0) 90 if (ci->i_head_snapc == NULL)
91 ci->i_head_snapc = ceph_get_snap_context(snapc); 91 ci->i_head_snapc = ceph_get_snap_context(snapc);
92 ++ci->i_wrbuffer_ref_head; 92 ++ci->i_wrbuffer_ref_head;
93 if (ci->i_wrbuffer_ref == 0) 93 if (ci->i_wrbuffer_ref == 0)
@@ -105,13 +105,7 @@ static int ceph_set_page_dirty(struct page *page)
105 spin_lock_irq(&mapping->tree_lock); 105 spin_lock_irq(&mapping->tree_lock);
106 if (page->mapping) { /* Race with truncate? */ 106 if (page->mapping) { /* Race with truncate? */
107 WARN_ON_ONCE(!PageUptodate(page)); 107 WARN_ON_ONCE(!PageUptodate(page));
108 108 account_page_dirtied(page, page->mapping);
109 if (mapping_cap_account_dirty(mapping)) {
110 __inc_zone_page_state(page, NR_FILE_DIRTY);
111 __inc_bdi_stat(mapping->backing_dev_info,
112 BDI_RECLAIMABLE);
113 task_io_account_write(PAGE_CACHE_SIZE);
114 }
115 radix_tree_tag_set(&mapping->page_tree, 109 radix_tree_tag_set(&mapping->page_tree,
116 page_index(page), PAGECACHE_TAG_DIRTY); 110 page_index(page), PAGECACHE_TAG_DIRTY);
117 111
@@ -274,7 +268,6 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
274 struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; 268 struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
275 int rc = 0; 269 int rc = 0;
276 struct page **pages; 270 struct page **pages;
277 struct pagevec pvec;
278 loff_t offset; 271 loff_t offset;
279 u64 len; 272 u64 len;
280 273
@@ -297,8 +290,6 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
297 if (rc < 0) 290 if (rc < 0)
298 goto out; 291 goto out;
299 292
300 /* set uptodate and add to lru in pagevec-sized chunks */
301 pagevec_init(&pvec, 0);
302 for (; !list_empty(page_list) && len > 0; 293 for (; !list_empty(page_list) && len > 0;
303 rc -= PAGE_CACHE_SIZE, len -= PAGE_CACHE_SIZE) { 294 rc -= PAGE_CACHE_SIZE, len -= PAGE_CACHE_SIZE) {
304 struct page *page = 295 struct page *page =
@@ -312,7 +303,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
312 zero_user_segment(page, s, PAGE_CACHE_SIZE); 303 zero_user_segment(page, s, PAGE_CACHE_SIZE);
313 } 304 }
314 305
315 if (add_to_page_cache(page, mapping, page->index, GFP_NOFS)) { 306 if (add_to_page_cache_lru(page, mapping, page->index,
307 GFP_NOFS)) {
316 page_cache_release(page); 308 page_cache_release(page);
317 dout("readpages %p add_to_page_cache failed %p\n", 309 dout("readpages %p add_to_page_cache failed %p\n",
318 inode, page); 310 inode, page);
@@ -323,10 +315,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
323 flush_dcache_page(page); 315 flush_dcache_page(page);
324 SetPageUptodate(page); 316 SetPageUptodate(page);
325 unlock_page(page); 317 unlock_page(page);
326 if (pagevec_add(&pvec, page) == 0) 318 page_cache_release(page);
327 pagevec_lru_add_file(&pvec); /* add to lru */
328 } 319 }
329 pagevec_lru_add_file(&pvec);
330 rc = 0; 320 rc = 0;
331 321
332out: 322out:
@@ -356,7 +346,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
356 break; 346 break;
357 } 347 }
358 } 348 }
359 if (!snapc && ci->i_head_snapc) { 349 if (!snapc && ci->i_wrbuffer_ref_head) {
360 snapc = ceph_get_snap_context(ci->i_head_snapc); 350 snapc = ceph_get_snap_context(ci->i_head_snapc);
361 dout(" head snapc %p has %d dirty pages\n", 351 dout(" head snapc %p has %d dirty pages\n",
362 snapc, ci->i_wrbuffer_ref_head); 352 snapc, ci->i_wrbuffer_ref_head);
@@ -421,8 +411,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
421 if (i_size < page_off + len) 411 if (i_size < page_off + len)
422 len = i_size - page_off; 412 len = i_size - page_off;
423 413
424 dout("writepage %p page %p index %lu on %llu~%u\n", 414 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
425 inode, page, page->index, page_off, len); 415 inode, page, page->index, page_off, len, snapc);
426 416
427 writeback_stat = atomic_long_inc_return(&client->writeback_count); 417 writeback_stat = atomic_long_inc_return(&client->writeback_count);
428 if (writeback_stat > 418 if (writeback_stat >
@@ -557,7 +547,7 @@ static void writepages_finish(struct ceph_osd_request *req,
557 * page truncation thread, possibly losing some data that 547 * page truncation thread, possibly losing some data that
558 * raced its way in 548 * raced its way in
559 */ 549 */
560 if ((issued & CEPH_CAP_FILE_CACHE) == 0) 550 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
561 generic_error_remove_page(inode->i_mapping, page); 551 generic_error_remove_page(inode->i_mapping, page);
562 552
563 unlock_page(page); 553 unlock_page(page);
@@ -568,7 +558,7 @@ static void writepages_finish(struct ceph_osd_request *req,
568 ceph_release_pages(req->r_pages, req->r_num_pages); 558 ceph_release_pages(req->r_pages, req->r_num_pages);
569 if (req->r_pages_from_pool) 559 if (req->r_pages_from_pool)
570 mempool_free(req->r_pages, 560 mempool_free(req->r_pages,
571 ceph_client(inode->i_sb)->wb_pagevec_pool); 561 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
572 else 562 else
573 kfree(req->r_pages); 563 kfree(req->r_pages);
574 ceph_osdc_put_request(req); 564 ceph_osdc_put_request(req);
@@ -776,7 +766,8 @@ get_more_pages:
776 /* ok */ 766 /* ok */
777 if (locked_pages == 0) { 767 if (locked_pages == 0) {
778 /* prepare async write request */ 768 /* prepare async write request */
779 offset = page->index << PAGE_CACHE_SHIFT; 769 offset = (unsigned long long)page->index
770 << PAGE_CACHE_SHIFT;
780 len = wsize; 771 len = wsize;
781 req = ceph_osdc_new_request(&client->osdc, 772 req = ceph_osdc_new_request(&client->osdc,
782 &ci->i_layout, 773 &ci->i_layout,
@@ -802,9 +793,12 @@ get_more_pages:
802 dout("%p will write page %p idx %lu\n", 793 dout("%p will write page %p idx %lu\n",
803 inode, page, page->index); 794 inode, page, page->index);
804 795
805 writeback_stat = atomic_long_inc_return(&client->writeback_count); 796 writeback_stat =
806 if (writeback_stat > CONGESTION_ON_THRESH(client->mount_args->congestion_kb)) { 797 atomic_long_inc_return(&client->writeback_count);
807 set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC); 798 if (writeback_stat > CONGESTION_ON_THRESH(
799 client->mount_args->congestion_kb)) {
800 set_bdi_congested(&client->backing_dev_info,
801 BLK_RW_ASYNC);
808 } 802 }
809 803
810 set_page_writeback(page); 804 set_page_writeback(page);
@@ -1041,7 +1035,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
1041 *pagep = page; 1035 *pagep = page;
1042 1036
1043 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1037 dout("write_begin file %p inode %p page %p %d~%d\n", file,
1044 inode, page, (int)pos, (int)len); 1038 inode, page, (int)pos, (int)len);
1045 1039
1046 r = ceph_update_writeable_page(file, pos, len, page); 1040 r = ceph_update_writeable_page(file, pos, len, page);
1047 } while (r == -EAGAIN); 1041 } while (r == -EAGAIN);