aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/ntfs/super.c25
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c81
4 files changed, 28 insertions, 93 deletions
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index e22de8397b74..d32ee9412cb9 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -567,7 +567,7 @@ static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
567 else BUG(); 567 else BUG();
568 } 568 }
569 } 569 }
570 list->rb_node = NULL; 570 *list = RB_ROOT;
571} 571}
572 572
573static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) 573static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1cf39dfaee7a..0de1db6cddbf 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -31,6 +31,7 @@
31#include <linux/vfs.h> 31#include <linux/vfs.h>
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/smp_lock.h> 33#include <linux/smp_lock.h>
34#include <linux/bitmap.h>
34 35
35#include "sysctl.h" 36#include "sysctl.h"
36#include "logfile.h" 37#include "logfile.h"
@@ -2458,7 +2459,6 @@ static void ntfs_put_super(struct super_block *sb)
2458static s64 get_nr_free_clusters(ntfs_volume *vol) 2459static s64 get_nr_free_clusters(ntfs_volume *vol)
2459{ 2460{
2460 s64 nr_free = vol->nr_clusters; 2461 s64 nr_free = vol->nr_clusters;
2461 u32 *kaddr;
2462 struct address_space *mapping = vol->lcnbmp_ino->i_mapping; 2462 struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
2463 struct page *page; 2463 struct page *page;
2464 pgoff_t index, max_index; 2464 pgoff_t index, max_index;
@@ -2477,7 +2477,8 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2477 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2477 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
2478 max_index, PAGE_CACHE_SIZE / 4); 2478 max_index, PAGE_CACHE_SIZE / 4);
2479 for (index = 0; index < max_index; index++) { 2479 for (index = 0; index < max_index; index++) {
2480 unsigned int i; 2480 unsigned long *kaddr;
2481
2481 /* 2482 /*
2482 * Read the page from page cache, getting it from backing store 2483 * Read the page from page cache, getting it from backing store
2483 * if necessary, and increment the use count. 2484 * if necessary, and increment the use count.
@@ -2490,16 +2491,16 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2490 nr_free -= PAGE_CACHE_SIZE * 8; 2491 nr_free -= PAGE_CACHE_SIZE * 8;
2491 continue; 2492 continue;
2492 } 2493 }
2493 kaddr = (u32*)kmap_atomic(page, KM_USER0); 2494 kaddr = kmap_atomic(page, KM_USER0);
2494 /* 2495 /*
2495 * For each 4 bytes, subtract the number of set bits. If this 2496 * Subtract the number of set bits. If this
2496 * is the last page and it is partial we don't really care as 2497 * is the last page and it is partial we don't really care as
2497 * it just means we do a little extra work but it won't affect 2498 * it just means we do a little extra work but it won't affect
2498 * the result as all out of range bytes are set to zero by 2499 * the result as all out of range bytes are set to zero by
2499 * ntfs_readpage(). 2500 * ntfs_readpage().
2500 */ 2501 */
2501 for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) 2502 nr_free -= bitmap_weight(kaddr,
2502 nr_free -= (s64)hweight32(kaddr[i]); 2503 PAGE_CACHE_SIZE * BITS_PER_BYTE);
2503 kunmap_atomic(kaddr, KM_USER0); 2504 kunmap_atomic(kaddr, KM_USER0);
2504 page_cache_release(page); 2505 page_cache_release(page);
2505 } 2506 }
@@ -2538,7 +2539,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2538static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, 2539static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2539 s64 nr_free, const pgoff_t max_index) 2540 s64 nr_free, const pgoff_t max_index)
2540{ 2541{
2541 u32 *kaddr;
2542 struct address_space *mapping = vol->mftbmp_ino->i_mapping; 2542 struct address_space *mapping = vol->mftbmp_ino->i_mapping;
2543 struct page *page; 2543 struct page *page;
2544 pgoff_t index; 2544 pgoff_t index;
@@ -2548,7 +2548,8 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2548 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2548 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
2549 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); 2549 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
2550 for (index = 0; index < max_index; index++) { 2550 for (index = 0; index < max_index; index++) {
2551 unsigned int i; 2551 unsigned long *kaddr;
2552
2552 /* 2553 /*
2553 * Read the page from page cache, getting it from backing store 2554 * Read the page from page cache, getting it from backing store
2554 * if necessary, and increment the use count. 2555 * if necessary, and increment the use count.
@@ -2561,16 +2562,16 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2561 nr_free -= PAGE_CACHE_SIZE * 8; 2562 nr_free -= PAGE_CACHE_SIZE * 8;
2562 continue; 2563 continue;
2563 } 2564 }
2564 kaddr = (u32*)kmap_atomic(page, KM_USER0); 2565 kaddr = kmap_atomic(page, KM_USER0);
2565 /* 2566 /*
2566 * For each 4 bytes, subtract the number of set bits. If this 2567 * Subtract the number of set bits. If this
2567 * is the last page and it is partial we don't really care as 2568 * is the last page and it is partial we don't really care as
2568 * it just means we do a little extra work but it won't affect 2569 * it just means we do a little extra work but it won't affect
2569 * the result as all out of range bytes are set to zero by 2570 * the result as all out of range bytes are set to zero by
2570 * ntfs_readpage(). 2571 * ntfs_readpage().
2571 */ 2572 */
2572 for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) 2573 nr_free -= bitmap_weight(kaddr,
2573 nr_free -= (s64)hweight32(kaddr[i]); 2574 PAGE_CACHE_SIZE * BITS_PER_BYTE);
2574 kunmap_atomic(kaddr, KM_USER0); 2575 kunmap_atomic(kaddr, KM_USER0);
2575 page_cache_release(page); 2576 page_cache_release(page);
2576 } 2577 }
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 9083357f9e44..99628508cb11 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -932,6 +932,9 @@ xfs_aops_discard_page(
932 if (!xfs_is_delayed_page(page, IOMAP_DELAY)) 932 if (!xfs_is_delayed_page(page, IOMAP_DELAY))
933 goto out_invalidate; 933 goto out_invalidate;
934 934
935 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
936 goto out_invalidate;
937
935 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 938 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
936 "page discard on page %p, inode 0x%llx, offset %llu.", 939 "page discard on page %p, inode 0x%llx, offset %llu.",
937 page, ip->i_ino, offset); 940 page, ip->i_ino, offset);
@@ -964,8 +967,10 @@ xfs_aops_discard_page(
964 967
965 if (error) { 968 if (error) {
966 /* something screwed, just bail */ 969 /* something screwed, just bail */
967 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 970 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
968 "page discard failed delalloc mapping lookup."); 971 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
972 "page discard failed delalloc mapping lookup.");
973 }
969 break; 974 break;
970 } 975 }
971 if (!nimaps) { 976 if (!nimaps) {
@@ -991,8 +996,10 @@ xfs_aops_discard_page(
991 ASSERT(!flist.xbf_count && !flist.xbf_first); 996 ASSERT(!flist.xbf_count && !flist.xbf_first);
992 if (error) { 997 if (error) {
993 /* something screwed, just bail */ 998 /* something screwed, just bail */
994 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 999 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1000 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
995 "page discard unable to remove delalloc mapping."); 1001 "page discard unable to remove delalloc mapping.");
1002 }
996 break; 1003 break;
997 } 1004 }
998next_buffer: 1005next_buffer:
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 6f76ba85f193..bd111b7e1daa 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -168,75 +168,6 @@ test_page_region(
168} 168}
169 169
170/* 170/*
171 * Mapping of multi-page buffers into contiguous virtual space
172 */
173
174typedef struct a_list {
175 void *vm_addr;
176 struct a_list *next;
177} a_list_t;
178
179static a_list_t *as_free_head;
180static int as_list_len;
181static DEFINE_SPINLOCK(as_lock);
182
183/*
184 * Try to batch vunmaps because they are costly.
185 */
186STATIC void
187free_address(
188 void *addr)
189{
190 a_list_t *aentry;
191
192#ifdef CONFIG_XEN
193 /*
194 * Xen needs to be able to make sure it can get an exclusive
195 * RO mapping of pages it wants to turn into a pagetable. If
196 * a newly allocated page is also still being vmap()ed by xfs,
197 * it will cause pagetable construction to fail. This is a
198 * quick workaround to always eagerly unmap pages so that Xen
199 * is happy.
200 */
201 vunmap(addr);
202 return;
203#endif
204
205 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
206 if (likely(aentry)) {
207 spin_lock(&as_lock);
208 aentry->next = as_free_head;
209 aentry->vm_addr = addr;
210 as_free_head = aentry;
211 as_list_len++;
212 spin_unlock(&as_lock);
213 } else {
214 vunmap(addr);
215 }
216}
217
218STATIC void
219purge_addresses(void)
220{
221 a_list_t *aentry, *old;
222
223 if (as_free_head == NULL)
224 return;
225
226 spin_lock(&as_lock);
227 aentry = as_free_head;
228 as_free_head = NULL;
229 as_list_len = 0;
230 spin_unlock(&as_lock);
231
232 while ((old = aentry) != NULL) {
233 vunmap(aentry->vm_addr);
234 aentry = aentry->next;
235 kfree(old);
236 }
237}
238
239/*
240 * Internal xfs_buf_t object manipulation 171 * Internal xfs_buf_t object manipulation
241 */ 172 */
242 173
@@ -337,7 +268,8 @@ xfs_buf_free(
337 uint i; 268 uint i;
338 269
339 if (xfs_buf_is_vmapped(bp)) 270 if (xfs_buf_is_vmapped(bp))
340 free_address(bp->b_addr - bp->b_offset); 271 vm_unmap_ram(bp->b_addr - bp->b_offset,
272 bp->b_page_count);
341 273
342 for (i = 0; i < bp->b_page_count; i++) { 274 for (i = 0; i < bp->b_page_count; i++) {
343 struct page *page = bp->b_pages[i]; 275 struct page *page = bp->b_pages[i];
@@ -457,10 +389,8 @@ _xfs_buf_map_pages(
457 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 389 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
458 bp->b_flags |= XBF_MAPPED; 390 bp->b_flags |= XBF_MAPPED;
459 } else if (flags & XBF_MAPPED) { 391 } else if (flags & XBF_MAPPED) {
460 if (as_list_len > 64) 392 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
461 purge_addresses(); 393 -1, PAGE_KERNEL);
462 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
463 VM_MAP, PAGE_KERNEL);
464 if (unlikely(bp->b_addr == NULL)) 394 if (unlikely(bp->b_addr == NULL))
465 return -ENOMEM; 395 return -ENOMEM;
466 bp->b_addr += bp->b_offset; 396 bp->b_addr += bp->b_offset;
@@ -1955,9 +1885,6 @@ xfsbufd(
1955 xfs_buf_iostrategy(bp); 1885 xfs_buf_iostrategy(bp);
1956 count++; 1886 count++;
1957 } 1887 }
1958
1959 if (as_list_len > 0)
1960 purge_addresses();
1961 if (count) 1888 if (count)
1962 blk_run_address_space(target->bt_mapping); 1889 blk_run_address_space(target->bt_mapping);
1963 1890