aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-09-12 10:43:03 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-09-12 10:43:03 -0400
commit89ecf38c7aee6eb3f6aaf40a6d196ddff4b6d4a8 (patch)
tree5ef720a9cb7fa8320e4a813ce53bcb82c2d42aff
parent5d46770f5f8bb0eff0a82596860958be13e7baf1 (diff)
NTFS: Mask out __GFP_HIGHMEM when doing kmalloc() in __ntfs_malloc() as it
otherwise causes a BUG(). Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
-rw-r--r--fs/ntfs/ChangeLog3
-rw-r--r--fs/ntfs/malloc.h2
2 files changed, 1 insertions, 4 deletions
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 7f4007242893..49eafbdb15c1 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -34,9 +34,6 @@ ToDo/Notes:
34 journals with two different restart pages. We sanity check both and 34 journals with two different restart pages. We sanity check both and
35 either use the only sane one or the more recent one of the two in the 35 either use the only sane one or the more recent one of the two in the
36 case that both are valid. 36 case that both are valid.
37 - Modify fs/ntfs/malloc.h::ntfs_malloc_nofs() to do the kmalloc() based
38 allocations with __GFP_HIGHMEM, analogous to how the vmalloc() based
39 allocations are done.
40 - Add fs/ntfs/malloc.h::ntfs_malloc_nofs_nofail() which is analogous to 37 - Add fs/ntfs/malloc.h::ntfs_malloc_nofs_nofail() which is analogous to
41 ntfs_malloc_nofs() but it performs allocations with __GFP_NOFAIL and 38 ntfs_malloc_nofs() but it performs allocations with __GFP_NOFAIL and
42 hence cannot fail. 39 hence cannot fail.
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h
index 9994e019a3cf..3288bcc2c4aa 100644
--- a/fs/ntfs/malloc.h
+++ b/fs/ntfs/malloc.h
@@ -45,7 +45,7 @@ static inline void *__ntfs_malloc(unsigned long size,
45 if (likely(size <= PAGE_SIZE)) { 45 if (likely(size <= PAGE_SIZE)) {
46 BUG_ON(!size); 46 BUG_ON(!size);
47 /* kmalloc() has per-CPU caches so is faster for now. */ 47 /* kmalloc() has per-CPU caches so is faster for now. */
48 return kmalloc(PAGE_SIZE, gfp_mask); 48 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM);
49 /* return (void *)__get_free_page(gfp_mask); */ 49 /* return (void *)__get_free_page(gfp_mask); */
50 } 50 }
51 if (likely(size >> PAGE_SHIFT < num_physpages)) 51 if (likely(size >> PAGE_SHIFT < num_physpages))