aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-04-04 22:37:28 -0400
committerTejun Heo <tj@kernel.org>2010-04-04 22:37:28 -0400
commit336f5899d287f06d8329e208fc14ce50f7ec9698 (patch)
tree9b762d450d5eb248a6ff8317badb7e223d93ed58 /fs
parenta4ab2773205e8b94c18625455f85e3b6bb9d7ad6 (diff)
parentdb217dece3003df0841bacf9556b5c06aa097dae (diff)
Merge branch 'master' into export-slabh
Diffstat (limited to 'fs')
-rw-r--r--fs/fat/namei_vfat.c6
-rw-r--r--fs/logfs/dev_bdev.c9
-rw-r--r--fs/logfs/dir.c4
-rw-r--r--fs/logfs/journal.c7
-rw-r--r--fs/logfs/logfs.h1
-rw-r--r--fs/logfs/readwrite.c13
-rw-r--r--fs/logfs/segment.c54
-rw-r--r--fs/logfs/super.c15
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/task_mmu.c87
-rw-r--r--fs/reiserfs/super.c10
11 files changed, 115 insertions, 96 deletions
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c1ef50154868..6fcc7e71fbaa 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
309{ 309{
310 struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options; 310 struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
311 wchar_t *ip, *ext_start, *end, *name_start; 311 wchar_t *ip, *ext_start, *end, *name_start;
312 unsigned char base[9], ext[4], buf[8], *p; 312 unsigned char base[9], ext[4], buf[5], *p;
313 unsigned char charbuf[NLS_MAX_CHARSET_SIZE]; 313 unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
314 int chl, chi; 314 int chl, chi;
315 int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen; 315 int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
467 return 0; 467 return 0;
468 } 468 }
469 469
470 i = jiffies & 0xffff; 470 i = jiffies;
471 sz = (jiffies >> 16) & 0x7; 471 sz = (jiffies >> 16) & 0x7;
472 if (baselen > 2) { 472 if (baselen > 2) {
473 baselen = numtail2_baselen; 473 baselen = numtail2_baselen;
@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
476 name_res[baselen + 4] = '~'; 476 name_res[baselen + 4] = '~';
477 name_res[baselen + 5] = '1' + sz; 477 name_res[baselen + 5] = '1' + sz;
478 while (1) { 478 while (1) {
479 sprintf(buf, "%04X", i); 479 snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
480 memcpy(&name_res[baselen], buf, 4); 480 memcpy(&name_res[baselen], buf, 4);
481 if (vfat_find_form(dir, name_res) < 0) 481 if (vfat_find_form(dir, name_res) < 0)
482 break; 482 break;
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 18e8c144c7f1..243c00071f76 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -81,6 +81,7 @@ static void writeseg_end_io(struct bio *bio, int err)
81 prefetchw(&bvec->bv_page->flags); 81 prefetchw(&bvec->bv_page->flags);
82 82
83 end_page_writeback(page); 83 end_page_writeback(page);
84 page_cache_release(page);
84 } while (bvec >= bio->bi_io_vec); 85 } while (bvec >= bio->bi_io_vec);
85 bio_put(bio); 86 bio_put(bio);
86 if (atomic_dec_and_test(&super->s_pending_writes)) 87 if (atomic_dec_and_test(&super->s_pending_writes))
@@ -98,8 +99,10 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
98 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 99 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
99 int i; 100 int i;
100 101
102 if (max_pages > BIO_MAX_PAGES)
103 max_pages = BIO_MAX_PAGES;
101 bio = bio_alloc(GFP_NOFS, max_pages); 104 bio = bio_alloc(GFP_NOFS, max_pages);
102 BUG_ON(!bio); /* FIXME: handle this */ 105 BUG_ON(!bio);
103 106
104 for (i = 0; i < nr_pages; i++) { 107 for (i = 0; i < nr_pages; i++) {
105 if (i >= max_pages) { 108 if (i >= max_pages) {
@@ -192,8 +195,10 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
192 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 195 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
193 int i; 196 int i;
194 197
198 if (max_pages > BIO_MAX_PAGES)
199 max_pages = BIO_MAX_PAGES;
195 bio = bio_alloc(GFP_NOFS, max_pages); 200 bio = bio_alloc(GFP_NOFS, max_pages);
196 BUG_ON(!bio); /* FIXME: handle this */ 201 BUG_ON(!bio);
197 202
198 for (i = 0; i < nr_pages; i++) { 203 for (i = 0; i < nr_pages; i++) {
199 if (i >= max_pages) { 204 if (i >= max_pages) {
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index e1cb99566100..2396a85c0f55 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -303,12 +303,12 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
303 (filler_t *)logfs_readpage, NULL); 303 (filler_t *)logfs_readpage, NULL);
304 if (IS_ERR(page)) 304 if (IS_ERR(page))
305 return PTR_ERR(page); 305 return PTR_ERR(page);
306 dd = kmap_atomic(page, KM_USER0); 306 dd = kmap(page);
307 BUG_ON(dd->namelen == 0); 307 BUG_ON(dd->namelen == 0);
308 308
309 full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), 309 full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
310 pos, be64_to_cpu(dd->ino), dd->type); 310 pos, be64_to_cpu(dd->ino), dd->type);
311 kunmap_atomic(dd, KM_USER0); 311 kunmap(page);
312 page_cache_release(page); 312 page_cache_release(page);
313 if (full) 313 if (full)
314 break; 314 break;
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index f186043e862a..33bd260b8309 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -801,6 +801,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
801{ 801{
802 struct logfs_super *super = logfs_super(sb); 802 struct logfs_super *super = logfs_super(sb);
803 struct logfs_area *area = super->s_journal_area; 803 struct logfs_area *area = super->s_journal_area;
804 struct btree_head32 *head = &super->s_reserved_segments;
804 u32 segno, ec; 805 u32 segno, ec;
805 int i, err; 806 int i, err;
806 807
@@ -808,6 +809,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
808 /* Drop old segments */ 809 /* Drop old segments */
809 journal_for_each(i) 810 journal_for_each(i)
810 if (super->s_journal_seg[i]) { 811 if (super->s_journal_seg[i]) {
812 btree_remove32(head, super->s_journal_seg[i]);
811 logfs_set_segment_unreserved(sb, 813 logfs_set_segment_unreserved(sb,
812 super->s_journal_seg[i], 814 super->s_journal_seg[i],
813 super->s_journal_ec[i]); 815 super->s_journal_ec[i]);
@@ -820,8 +822,13 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
820 super->s_journal_seg[i] = segno; 822 super->s_journal_seg[i] = segno;
821 super->s_journal_ec[i] = ec; 823 super->s_journal_ec[i] = ec;
822 logfs_set_segment_reserved(sb, segno); 824 logfs_set_segment_reserved(sb, segno);
825 err = btree_insert32(head, segno, (void *)1, GFP_KERNEL);
826 BUG_ON(err); /* mempool should prevent this */
827 err = logfs_erase_segment(sb, segno, 1);
828 BUG_ON(err); /* FIXME: remount-ro would be nicer */
823 } 829 }
824 /* Manually move journal_area */ 830 /* Manually move journal_area */
831 freeseg(sb, area->a_segno);
825 area->a_segno = super->s_journal_seg[0]; 832 area->a_segno = super->s_journal_seg[0];
826 area->a_is_open = 0; 833 area->a_is_open = 0;
827 area->a_used_bytes = 0; 834 area->a_used_bytes = 0;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 129779431373..b84b0eec6024 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -587,6 +587,7 @@ void move_page_to_btree(struct page *page);
587int logfs_init_mapping(struct super_block *sb); 587int logfs_init_mapping(struct super_block *sb);
588void logfs_sync_area(struct logfs_area *area); 588void logfs_sync_area(struct logfs_area *area);
589void logfs_sync_segments(struct super_block *sb); 589void logfs_sync_segments(struct super_block *sb);
590void freeseg(struct super_block *sb, u32 segno);
590 591
591/* area handling */ 592/* area handling */
592int logfs_init_areas(struct super_block *sb); 593int logfs_init_areas(struct super_block *sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index d5919af2c7a7..bff40253dfb2 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1595,7 +1595,6 @@ int logfs_delete(struct inode *inode, pgoff_t index,
1595 return ret; 1595 return ret;
1596} 1596}
1597 1597
1598/* Rewrite cannot mark the inode dirty but has to write it immediatly. */
1599int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, 1598int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
1600 gc_level_t gc_level, long flags) 1599 gc_level_t gc_level, long flags)
1601{ 1600{
@@ -1612,6 +1611,18 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
1612 if (level != 0) 1611 if (level != 0)
1613 alloc_indirect_block(inode, page, 0); 1612 alloc_indirect_block(inode, page, 0);
1614 err = logfs_write_buf(inode, page, flags); 1613 err = logfs_write_buf(inode, page, flags);
1614 if (!err && shrink_level(gc_level) == 0) {
1615 /* Rewrite cannot mark the inode dirty but has to
1616 * write it immediatly.
1617 * Q: Can't we just create an alias for the inode
1618 * instead? And if not, why not?
1619 */
1620 if (inode->i_ino == LOGFS_INO_MASTER)
1621 logfs_write_anchor(inode->i_sb);
1622 else {
1623 err = __logfs_write_inode(inode, flags);
1624 }
1625 }
1615 } 1626 }
1616 logfs_put_write_page(page); 1627 logfs_put_write_page(page);
1617 return err; 1628 return err;
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 614d7a6fda2d..801a3a141625 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -94,50 +94,58 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
94 } while (len); 94 } while (len);
95} 95}
96 96
97/* 97static void pad_partial_page(struct logfs_area *area)
98 * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
99 */
100static void pad_wbuf(struct logfs_area *area, int final)
101{ 98{
102 struct super_block *sb = area->a_sb; 99 struct super_block *sb = area->a_sb;
103 struct logfs_super *super = logfs_super(sb);
104 struct page *page; 100 struct page *page;
105 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 101 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
106 pgoff_t index = ofs >> PAGE_SHIFT; 102 pgoff_t index = ofs >> PAGE_SHIFT;
107 long offset = ofs & (PAGE_SIZE-1); 103 long offset = ofs & (PAGE_SIZE-1);
108 u32 len = PAGE_SIZE - offset; 104 u32 len = PAGE_SIZE - offset;
109 105
110 if (len == PAGE_SIZE) { 106 if (len % PAGE_SIZE) {
111 /* The math in this function can surely use some love */ 107 page = get_mapping_page(sb, index, 0);
112 len = 0;
113 }
114 if (len) {
115 BUG_ON(area->a_used_bytes >= super->s_segsize);
116
117 page = get_mapping_page(area->a_sb, index, 0);
118 BUG_ON(!page); /* FIXME: reserve a pool */ 108 BUG_ON(!page); /* FIXME: reserve a pool */
119 memset(page_address(page) + offset, 0xff, len); 109 memset(page_address(page) + offset, 0xff, len);
120 SetPagePrivate(page); 110 SetPagePrivate(page);
121 page_cache_release(page); 111 page_cache_release(page);
122 } 112 }
113}
123 114
124 if (!final) 115static void pad_full_pages(struct logfs_area *area)
125 return; 116{
117 struct super_block *sb = area->a_sb;
118 struct logfs_super *super = logfs_super(sb);
119 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
120 u32 len = super->s_segsize - area->a_used_bytes;
121 pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
122 pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
123 struct page *page;
126 124
127 area->a_used_bytes += len; 125 while (no_indizes) {
128 for ( ; area->a_used_bytes < super->s_segsize; 126 page = get_mapping_page(sb, index, 0);
129 area->a_used_bytes += PAGE_SIZE) {
130 /* Memset another page */
131 index++;
132 page = get_mapping_page(area->a_sb, index, 0);
133 BUG_ON(!page); /* FIXME: reserve a pool */ 127 BUG_ON(!page); /* FIXME: reserve a pool */
134 memset(page_address(page), 0xff, PAGE_SIZE); 128 SetPageUptodate(page);
129 memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
135 SetPagePrivate(page); 130 SetPagePrivate(page);
136 page_cache_release(page); 131 page_cache_release(page);
132 index++;
133 no_indizes--;
137 } 134 }
138} 135}
139 136
140/* 137/*
138 * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
139 * Also make sure we allocate (and memset) all pages for final writeout.
140 */
141static void pad_wbuf(struct logfs_area *area, int final)
142{
143 pad_partial_page(area);
144 if (final)
145 pad_full_pages(area);
146}
147
148/*
141 * We have to be careful with the alias tree. Since lookup is done by bix, 149 * We have to be careful with the alias tree. Since lookup is done by bix,
142 * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with 150 * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
143 * indirect blocks. So always use it through accessor functions. 151 * indirect blocks. So always use it through accessor functions.
@@ -684,7 +692,7 @@ int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow)
684 return 0; 692 return 0;
685} 693}
686 694
687static void freeseg(struct super_block *sb, u32 segno) 695void freeseg(struct super_block *sb, u32 segno)
688{ 696{
689 struct logfs_super *super = logfs_super(sb); 697 struct logfs_super *super = logfs_super(sb);
690 struct address_space *mapping = super->s_mapping_inode->i_mapping; 698 struct address_space *mapping = super->s_mapping_inode->i_mapping;
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 46990eafe052..b60bfac3263c 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -278,7 +278,7 @@ static int logfs_recover_sb(struct super_block *sb)
278 } 278 }
279 if (valid0 && valid1 && ds_cmp(ds0, ds1)) { 279 if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
280 printk(KERN_INFO"Superblocks don't match - fixing.\n"); 280 printk(KERN_INFO"Superblocks don't match - fixing.\n");
281 return write_one_sb(sb, super->s_devops->find_last_sb); 281 return logfs_write_sb(sb);
282 } 282 }
283 /* If neither is valid now, something's wrong. Didn't we properly 283 /* If neither is valid now, something's wrong. Didn't we properly
284 * check them before?!? */ 284 * check them before?!? */
@@ -290,6 +290,10 @@ static int logfs_make_writeable(struct super_block *sb)
290{ 290{
291 int err; 291 int err;
292 292
293 err = logfs_open_segfile(sb);
294 if (err)
295 return err;
296
293 /* Repair any broken superblock copies */ 297 /* Repair any broken superblock copies */
294 err = logfs_recover_sb(sb); 298 err = logfs_recover_sb(sb);
295 if (err) 299 if (err)
@@ -300,10 +304,6 @@ static int logfs_make_writeable(struct super_block *sb)
300 if (err) 304 if (err)
301 return err; 305 return err;
302 306
303 err = logfs_open_segfile(sb);
304 if (err)
305 return err;
306
307 /* Do one GC pass before any data gets dirtied */ 307 /* Do one GC pass before any data gets dirtied */
308 logfs_gc_pass(sb); 308 logfs_gc_pass(sb);
309 309
@@ -329,7 +329,7 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt)
329 329
330 sb->s_root = d_alloc_root(rootdir); 330 sb->s_root = d_alloc_root(rootdir);
331 if (!sb->s_root) 331 if (!sb->s_root)
332 goto fail; 332 goto fail2;
333 333
334 super->s_erase_page = alloc_pages(GFP_KERNEL, 0); 334 super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
335 if (!super->s_erase_page) 335 if (!super->s_erase_page)
@@ -573,8 +573,7 @@ int logfs_get_sb_device(struct file_system_type *type, int flags,
573 return 0; 573 return 0;
574 574
575err1: 575err1:
576 up_write(&sb->s_umount); 576 deactivate_locked_super(sb);
577 deactivate_super(sb);
578 return err; 577 return err;
579err0: 578err0:
580 kfree(super); 579 kfree(super);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9e82adc37b0c..7621db800a74 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -443,12 +443,13 @@ static const struct file_operations proc_lstats_operations = {
443unsigned long badness(struct task_struct *p, unsigned long uptime); 443unsigned long badness(struct task_struct *p, unsigned long uptime);
444static int proc_oom_score(struct task_struct *task, char *buffer) 444static int proc_oom_score(struct task_struct *task, char *buffer)
445{ 445{
446 unsigned long points; 446 unsigned long points = 0;
447 struct timespec uptime; 447 struct timespec uptime;
448 448
449 do_posix_clock_monotonic_gettime(&uptime); 449 do_posix_clock_monotonic_gettime(&uptime);
450 read_lock(&tasklist_lock); 450 read_lock(&tasklist_lock);
451 points = badness(task->group_leader, uptime.tv_sec); 451 if (pid_alive(task))
452 points = badness(task, uptime.tv_sec);
452 read_unlock(&tasklist_lock); 453 read_unlock(&tasklist_lock);
453 return sprintf(buffer, "%lu\n", points); 454 return sprintf(buffer, "%lu\n", points);
454} 455}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2d45889931f6..caf0337dff73 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -407,6 +407,7 @@ static int show_smap(struct seq_file *m, void *v)
407 407
408 memset(&mss, 0, sizeof mss); 408 memset(&mss, 0, sizeof mss);
409 mss.vma = vma; 409 mss.vma = vma;
410 /* mmap_sem is held in m_start */
410 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 411 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
411 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); 412 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
412 413
@@ -553,7 +554,8 @@ const struct file_operations proc_clear_refs_operations = {
553}; 554};
554 555
555struct pagemapread { 556struct pagemapread {
556 u64 __user *out, *end; 557 int pos, len;
558 u64 *buffer;
557}; 559};
558 560
559#define PM_ENTRY_BYTES sizeof(u64) 561#define PM_ENTRY_BYTES sizeof(u64)
@@ -576,10 +578,8 @@ struct pagemapread {
576static int add_to_pagemap(unsigned long addr, u64 pfn, 578static int add_to_pagemap(unsigned long addr, u64 pfn,
577 struct pagemapread *pm) 579 struct pagemapread *pm)
578{ 580{
579 if (put_user(pfn, pm->out)) 581 pm->buffer[pm->pos++] = pfn;
580 return -EFAULT; 582 if (pm->pos >= pm->len)
581 pm->out++;
582 if (pm->out >= pm->end)
583 return PM_END_OF_BUFFER; 583 return PM_END_OF_BUFFER;
584 return 0; 584 return 0;
585} 585}
@@ -721,21 +721,20 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
721 * determine which areas of memory are actually mapped and llseek to 721 * determine which areas of memory are actually mapped and llseek to
722 * skip over unmapped regions. 722 * skip over unmapped regions.
723 */ 723 */
724#define PAGEMAP_WALK_SIZE (PMD_SIZE)
724static ssize_t pagemap_read(struct file *file, char __user *buf, 725static ssize_t pagemap_read(struct file *file, char __user *buf,
725 size_t count, loff_t *ppos) 726 size_t count, loff_t *ppos)
726{ 727{
727 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 728 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
728 struct page **pages, *page;
729 unsigned long uaddr, uend;
730 struct mm_struct *mm; 729 struct mm_struct *mm;
731 struct pagemapread pm; 730 struct pagemapread pm;
732 int pagecount;
733 int ret = -ESRCH; 731 int ret = -ESRCH;
734 struct mm_walk pagemap_walk = {}; 732 struct mm_walk pagemap_walk = {};
735 unsigned long src; 733 unsigned long src;
736 unsigned long svpfn; 734 unsigned long svpfn;
737 unsigned long start_vaddr; 735 unsigned long start_vaddr;
738 unsigned long end_vaddr; 736 unsigned long end_vaddr;
737 int copied = 0;
739 738
740 if (!task) 739 if (!task)
741 goto out; 740 goto out;
@@ -758,35 +757,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
758 if (!mm) 757 if (!mm)
759 goto out_task; 758 goto out_task;
760 759
761 760 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
762 uaddr = (unsigned long)buf & PAGE_MASK; 761 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
763 uend = (unsigned long)(buf + count);
764 pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
765 ret = 0;
766 if (pagecount == 0)
767 goto out_mm;
768 pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
769 ret = -ENOMEM; 762 ret = -ENOMEM;
770 if (!pages) 763 if (!pm.buffer)
771 goto out_mm; 764 goto out_mm;
772 765
773 down_read(&current->mm->mmap_sem);
774 ret = get_user_pages(current, current->mm, uaddr, pagecount,
775 1, 0, pages, NULL);
776 up_read(&current->mm->mmap_sem);
777
778 if (ret < 0)
779 goto out_free;
780
781 if (ret != pagecount) {
782 pagecount = ret;
783 ret = -EFAULT;
784 goto out_pages;
785 }
786
787 pm.out = (u64 __user *)buf;
788 pm.end = (u64 __user *)(buf + count);
789
790 pagemap_walk.pmd_entry = pagemap_pte_range; 766 pagemap_walk.pmd_entry = pagemap_pte_range;
791 pagemap_walk.pte_hole = pagemap_pte_hole; 767 pagemap_walk.pte_hole = pagemap_pte_hole;
792 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 768 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
@@ -808,23 +784,36 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
808 * user buffer is tracked in "pm", and the walk 784 * user buffer is tracked in "pm", and the walk
809 * will stop when we hit the end of the buffer. 785 * will stop when we hit the end of the buffer.
810 */ 786 */
811 ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk); 787 ret = 0;
812 if (ret == PM_END_OF_BUFFER) 788 while (count && (start_vaddr < end_vaddr)) {
813 ret = 0; 789 int len;
814 /* don't need mmap_sem for these, but this looks cleaner */ 790 unsigned long end;
815 *ppos += (char __user *)pm.out - buf; 791
816 if (!ret) 792 pm.pos = 0;
817 ret = (char __user *)pm.out - buf; 793 end = start_vaddr + PAGEMAP_WALK_SIZE;
818 794 /* overflow ? */
819out_pages: 795 if (end < start_vaddr || end > end_vaddr)
820 for (; pagecount; pagecount--) { 796 end = end_vaddr;
821 page = pages[pagecount-1]; 797 down_read(&mm->mmap_sem);
822 if (!PageReserved(page)) 798 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
823 SetPageDirty(page); 799 up_read(&mm->mmap_sem);
824 page_cache_release(page); 800 start_vaddr = end;
801
802 len = min(count, PM_ENTRY_BYTES * pm.pos);
803 if (copy_to_user(buf, pm.buffer, len) < 0) {
804 ret = -EFAULT;
805 goto out_free;
806 }
807 copied += len;
808 buf += len;
809 count -= len;
825 } 810 }
811 *ppos += copied;
812 if (!ret || ret == PM_END_OF_BUFFER)
813 ret = copied;
814
826out_free: 815out_free:
827 kfree(pages); 816 kfree(pm.buffer);
828out_mm: 817out_mm:
829 mmput(mm); 818 mmput(mm);
830out_task: 819out_task:
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index d8fd90d83ab3..59125fb36d42 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1619,10 +1619,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1619 save_mount_options(s, data); 1619 save_mount_options(s, data);
1620 1620
1621 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); 1621 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
1622 if (!sbi) { 1622 if (!sbi)
1623 errval = -ENOMEM; 1623 return -ENOMEM;
1624 goto error_alloc;
1625 }
1626 s->s_fs_info = sbi; 1624 s->s_fs_info = sbi;
1627 /* Set default values for options: non-aggressive tails, RO on errors */ 1625 /* Set default values for options: non-aggressive tails, RO on errors */
1628 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); 1626 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
@@ -1879,12 +1877,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1879 return (0); 1877 return (0);
1880 1878
1881error: 1879error:
1882 reiserfs_write_unlock(s);
1883error_alloc:
1884 if (jinit_done) { /* kill the commit thread, free journal ram */ 1880 if (jinit_done) { /* kill the commit thread, free journal ram */
1885 journal_release_error(NULL, s); 1881 journal_release_error(NULL, s);
1886 } 1882 }
1887 1883
1884 reiserfs_write_unlock(s);
1885
1888 reiserfs_free_bitmap_cache(s); 1886 reiserfs_free_bitmap_cache(s);
1889 if (SB_BUFFER_WITH_SB(s)) 1887 if (SB_BUFFER_WITH_SB(s))
1890 brelse(SB_BUFFER_WITH_SB(s)); 1888 brelse(SB_BUFFER_WITH_SB(s));