diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/9p/Kconfig | 5 | ||||
-rw-r--r-- | fs/9p/vfs_inode_dotl.c | 11 | ||||
-rw-r--r-- | fs/Kconfig | 18 | ||||
-rw-r--r-- | fs/block_dev.c | 17 | ||||
-rw-r--r-- | fs/ceph/addr.c | 5 | ||||
-rw-r--r-- | fs/ceph/caps.c | 61 | ||||
-rw-r--r-- | fs/ceph/dir.c | 7 | ||||
-rw-r--r-- | fs/ceph/export.c | 25 | ||||
-rw-r--r-- | fs/ceph/mds_client.c | 7 | ||||
-rw-r--r-- | fs/ceph/mds_client.h | 1 | ||||
-rw-r--r-- | fs/dcache.c | 8 | ||||
-rw-r--r-- | fs/drop_caches.c | 5 | ||||
-rw-r--r-- | fs/exec.c | 12 | ||||
-rw-r--r-- | fs/fscache/operation.c | 10 | ||||
-rw-r--r-- | fs/fscache/page.c | 13 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 5 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 12 | ||||
-rw-r--r-- | fs/gfs2/quota.h | 4 | ||||
-rw-r--r-- | fs/hugetlbfs/inode.c | 4 | ||||
-rw-r--r-- | fs/inode.c | 9 | ||||
-rw-r--r-- | fs/mbcache.c | 10 | ||||
-rw-r--r-- | fs/ncpfs/inode.c | 4 | ||||
-rw-r--r-- | fs/nfs/dir.c | 5 | ||||
-rw-r--r-- | fs/nfs/internal.h | 2 | ||||
-rw-r--r-- | fs/partitions/check.c | 8 | ||||
-rw-r--r-- | fs/proc/internal.h | 8 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 204 | ||||
-rw-r--r-- | fs/quota/dquot.c | 5 | ||||
-rw-r--r-- | fs/splice.c | 33 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 4 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 5 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm.c | 6 |
32 files changed, 391 insertions, 142 deletions
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig index 814ac4e213a8..0a93dc1cb4ac 100644 --- a/fs/9p/Kconfig +++ b/fs/9p/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config 9P_FS | 1 | config 9P_FS |
2 | tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" | 2 | tristate "Plan 9 Resource Sharing Support (9P2000)" |
3 | depends on INET && NET_9P && EXPERIMENTAL | 3 | depends on INET && NET_9P |
4 | help | 4 | help |
5 | If you say Y here, you will get experimental support for | 5 | If you say Y here, you will get experimental support for |
6 | Plan 9 resource sharing via the 9P2000 protocol. | 6 | Plan 9 resource sharing via the 9P2000 protocol. |
@@ -10,7 +10,6 @@ config 9P_FS | |||
10 | If unsure, say N. | 10 | If unsure, say N. |
11 | 11 | ||
12 | if 9P_FS | 12 | if 9P_FS |
13 | |||
14 | config 9P_FSCACHE | 13 | config 9P_FSCACHE |
15 | bool "Enable 9P client caching support (EXPERIMENTAL)" | 14 | bool "Enable 9P client caching support (EXPERIMENTAL)" |
16 | depends on EXPERIMENTAL | 15 | depends on EXPERIMENTAL |
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 82a7c38ddad0..691c78f58bef 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c | |||
@@ -259,7 +259,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, | |||
259 | if (IS_ERR(inode_fid)) { | 259 | if (IS_ERR(inode_fid)) { |
260 | err = PTR_ERR(inode_fid); | 260 | err = PTR_ERR(inode_fid); |
261 | mutex_unlock(&v9inode->v_mutex); | 261 | mutex_unlock(&v9inode->v_mutex); |
262 | goto error; | 262 | goto err_clunk_old_fid; |
263 | } | 263 | } |
264 | v9inode->writeback_fid = (void *) inode_fid; | 264 | v9inode->writeback_fid = (void *) inode_fid; |
265 | } | 265 | } |
@@ -267,8 +267,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, | |||
267 | /* Since we are opening a file, assign the open fid to the file */ | 267 | /* Since we are opening a file, assign the open fid to the file */ |
268 | filp = lookup_instantiate_filp(nd, dentry, generic_file_open); | 268 | filp = lookup_instantiate_filp(nd, dentry, generic_file_open); |
269 | if (IS_ERR(filp)) { | 269 | if (IS_ERR(filp)) { |
270 | p9_client_clunk(ofid); | 270 | err = PTR_ERR(filp); |
271 | return PTR_ERR(filp); | 271 | goto err_clunk_old_fid; |
272 | } | 272 | } |
273 | filp->private_data = ofid; | 273 | filp->private_data = ofid; |
274 | #ifdef CONFIG_9P_FSCACHE | 274 | #ifdef CONFIG_9P_FSCACHE |
@@ -278,10 +278,11 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, | |||
278 | return 0; | 278 | return 0; |
279 | 279 | ||
280 | error: | 280 | error: |
281 | if (ofid) | ||
282 | p9_client_clunk(ofid); | ||
283 | if (fid) | 281 | if (fid) |
284 | p9_client_clunk(fid); | 282 | p9_client_clunk(fid); |
283 | err_clunk_old_fid: | ||
284 | if (ofid) | ||
285 | p9_client_clunk(ofid); | ||
285 | return err; | 286 | return err; |
286 | } | 287 | } |
287 | 288 | ||
diff --git a/fs/Kconfig b/fs/Kconfig index f3aa9b08b228..979992dcb386 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -121,9 +121,25 @@ config TMPFS | |||
121 | 121 | ||
122 | See <file:Documentation/filesystems/tmpfs.txt> for details. | 122 | See <file:Documentation/filesystems/tmpfs.txt> for details. |
123 | 123 | ||
124 | config TMPFS_XATTR | ||
125 | bool "Tmpfs extended attributes" | ||
126 | depends on TMPFS | ||
127 | default n | ||
128 | help | ||
129 | Extended attributes are name:value pairs associated with inodes by | ||
130 | the kernel or by users (see the attr(5) manual page, or visit | ||
131 | <http://acl.bestbits.at/> for details). | ||
132 | |||
133 | Currently this enables support for the trusted.* and | ||
134 | security.* namespaces. | ||
135 | |||
136 | If unsure, say N. | ||
137 | |||
138 | You need this for POSIX ACL support on tmpfs. | ||
139 | |||
124 | config TMPFS_POSIX_ACL | 140 | config TMPFS_POSIX_ACL |
125 | bool "Tmpfs POSIX Access Control Lists" | 141 | bool "Tmpfs POSIX Access Control Lists" |
126 | depends on TMPFS | 142 | depends on TMPFS_XATTR |
127 | select GENERIC_ACL | 143 | select GENERIC_ACL |
128 | help | 144 | help |
129 | POSIX Access Control Lists (ACLs) support permissions for users and | 145 | POSIX Access Control Lists (ACLs) support permissions for users and |
diff --git a/fs/block_dev.c b/fs/block_dev.c index bf9c7a720371..1f2b19978333 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1238,6 +1238,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) | |||
1238 | res = __blkdev_get(bdev, mode, 0); | 1238 | res = __blkdev_get(bdev, mode, 0); |
1239 | 1239 | ||
1240 | if (whole) { | 1240 | if (whole) { |
1241 | struct gendisk *disk = whole->bd_disk; | ||
1242 | |||
1241 | /* finish claiming */ | 1243 | /* finish claiming */ |
1242 | mutex_lock(&bdev->bd_mutex); | 1244 | mutex_lock(&bdev->bd_mutex); |
1243 | spin_lock(&bdev_lock); | 1245 | spin_lock(&bdev_lock); |
@@ -1264,15 +1266,16 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) | |||
1264 | spin_unlock(&bdev_lock); | 1266 | spin_unlock(&bdev_lock); |
1265 | 1267 | ||
1266 | /* | 1268 | /* |
1267 | * Block event polling for write claims. Any write | 1269 | * Block event polling for write claims if requested. Any |
1268 | * holder makes the write_holder state stick until all | 1270 | * write holder makes the write_holder state stick until |
1269 | * are released. This is good enough and tracking | 1271 | * all are released. This is good enough and tracking |
1270 | * individual writeable reference is too fragile given | 1272 | * individual writeable reference is too fragile given the |
1271 | * the way @mode is used in blkdev_get/put(). | 1273 | * way @mode is used in blkdev_get/put(). |
1272 | */ | 1274 | */ |
1273 | if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { | 1275 | if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) && |
1276 | !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { | ||
1274 | bdev->bd_write_holder = true; | 1277 | bdev->bd_write_holder = true; |
1275 | disk_block_events(bdev->bd_disk); | 1278 | disk_block_events(disk); |
1276 | } | 1279 | } |
1277 | 1280 | ||
1278 | mutex_unlock(&bdev->bd_mutex); | 1281 | mutex_unlock(&bdev->bd_mutex); |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 38b8ab554924..33da49dc3cc6 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -848,7 +848,8 @@ get_more_pages: | |||
848 | op->payload_len = cpu_to_le32(len); | 848 | op->payload_len = cpu_to_le32(len); |
849 | req->r_request->hdr.data_len = cpu_to_le32(len); | 849 | req->r_request->hdr.data_len = cpu_to_le32(len); |
850 | 850 | ||
851 | ceph_osdc_start_request(&fsc->client->osdc, req, true); | 851 | rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); |
852 | BUG_ON(rc); | ||
852 | req = NULL; | 853 | req = NULL; |
853 | 854 | ||
854 | /* continue? */ | 855 | /* continue? */ |
@@ -880,8 +881,6 @@ release_pvec_pages: | |||
880 | out: | 881 | out: |
881 | if (req) | 882 | if (req) |
882 | ceph_osdc_put_request(req); | 883 | ceph_osdc_put_request(req); |
883 | if (rc > 0) | ||
884 | rc = 0; /* vfs expects us to return 0 */ | ||
885 | ceph_put_snap_context(snapc); | 884 | ceph_put_snap_context(snapc); |
886 | dout("writepages done, rc = %d\n", rc); | 885 | dout("writepages done, rc = %d\n", rc); |
887 | return rc; | 886 | return rc; |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 2a5404c1c42f..1f72b00447c4 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -569,7 +569,8 @@ retry: | |||
569 | list_add_tail(&cap->session_caps, &session->s_caps); | 569 | list_add_tail(&cap->session_caps, &session->s_caps); |
570 | session->s_nr_caps++; | 570 | session->s_nr_caps++; |
571 | spin_unlock(&session->s_cap_lock); | 571 | spin_unlock(&session->s_cap_lock); |
572 | } | 572 | } else if (new_cap) |
573 | ceph_put_cap(mdsc, new_cap); | ||
573 | 574 | ||
574 | if (!ci->i_snap_realm) { | 575 | if (!ci->i_snap_realm) { |
575 | /* | 576 | /* |
@@ -2634,6 +2635,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, | |||
2634 | struct ceph_mds_session *session, | 2635 | struct ceph_mds_session *session, |
2635 | int *open_target_sessions) | 2636 | int *open_target_sessions) |
2636 | { | 2637 | { |
2638 | struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; | ||
2637 | struct ceph_inode_info *ci = ceph_inode(inode); | 2639 | struct ceph_inode_info *ci = ceph_inode(inode); |
2638 | int mds = session->s_mds; | 2640 | int mds = session->s_mds; |
2639 | unsigned mseq = le32_to_cpu(ex->migrate_seq); | 2641 | unsigned mseq = le32_to_cpu(ex->migrate_seq); |
@@ -2670,6 +2672,19 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, | |||
2670 | * export targets, so that we get the matching IMPORT | 2672 | * export targets, so that we get the matching IMPORT |
2671 | */ | 2673 | */ |
2672 | *open_target_sessions = 1; | 2674 | *open_target_sessions = 1; |
2675 | |||
2676 | /* | ||
2677 | * we can't flush dirty caps that we've seen the | ||
2678 | * EXPORT but no IMPORT for | ||
2679 | */ | ||
2680 | spin_lock(&mdsc->cap_dirty_lock); | ||
2681 | if (!list_empty(&ci->i_dirty_item)) { | ||
2682 | dout(" moving %p to cap_dirty_migrating\n", | ||
2683 | inode); | ||
2684 | list_move(&ci->i_dirty_item, | ||
2685 | &mdsc->cap_dirty_migrating); | ||
2686 | } | ||
2687 | spin_unlock(&mdsc->cap_dirty_lock); | ||
2673 | } | 2688 | } |
2674 | __ceph_remove_cap(cap); | 2689 | __ceph_remove_cap(cap); |
2675 | } | 2690 | } |
@@ -2707,6 +2722,13 @@ static void handle_cap_import(struct ceph_mds_client *mdsc, | |||
2707 | ci->i_cap_exporting_issued = 0; | 2722 | ci->i_cap_exporting_issued = 0; |
2708 | ci->i_cap_exporting_mseq = 0; | 2723 | ci->i_cap_exporting_mseq = 0; |
2709 | ci->i_cap_exporting_mds = -1; | 2724 | ci->i_cap_exporting_mds = -1; |
2725 | |||
2726 | spin_lock(&mdsc->cap_dirty_lock); | ||
2727 | if (!list_empty(&ci->i_dirty_item)) { | ||
2728 | dout(" moving %p back to cap_dirty\n", inode); | ||
2729 | list_move(&ci->i_dirty_item, &mdsc->cap_dirty); | ||
2730 | } | ||
2731 | spin_unlock(&mdsc->cap_dirty_lock); | ||
2710 | } else { | 2732 | } else { |
2711 | dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", | 2733 | dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", |
2712 | inode, ci, mds, mseq); | 2734 | inode, ci, mds, mseq); |
@@ -2910,38 +2932,16 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) | |||
2910 | */ | 2932 | */ |
2911 | void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) | 2933 | void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) |
2912 | { | 2934 | { |
2913 | struct ceph_inode_info *ci, *nci = NULL; | 2935 | struct ceph_inode_info *ci; |
2914 | struct inode *inode, *ninode = NULL; | 2936 | struct inode *inode; |
2915 | struct list_head *p, *n; | ||
2916 | 2937 | ||
2917 | dout("flush_dirty_caps\n"); | 2938 | dout("flush_dirty_caps\n"); |
2918 | spin_lock(&mdsc->cap_dirty_lock); | 2939 | spin_lock(&mdsc->cap_dirty_lock); |
2919 | list_for_each_safe(p, n, &mdsc->cap_dirty) { | 2940 | while (!list_empty(&mdsc->cap_dirty)) { |
2920 | if (nci) { | 2941 | ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info, |
2921 | ci = nci; | 2942 | i_dirty_item); |
2922 | inode = ninode; | 2943 | inode = igrab(&ci->vfs_inode); |
2923 | ci->i_ceph_flags &= ~CEPH_I_NOFLUSH; | 2944 | dout("flush_dirty_caps %p\n", inode); |
2924 | dout("flush_dirty_caps inode %p (was next inode)\n", | ||
2925 | inode); | ||
2926 | } else { | ||
2927 | ci = list_entry(p, struct ceph_inode_info, | ||
2928 | i_dirty_item); | ||
2929 | inode = igrab(&ci->vfs_inode); | ||
2930 | BUG_ON(!inode); | ||
2931 | dout("flush_dirty_caps inode %p\n", inode); | ||
2932 | } | ||
2933 | if (n != &mdsc->cap_dirty) { | ||
2934 | nci = list_entry(n, struct ceph_inode_info, | ||
2935 | i_dirty_item); | ||
2936 | ninode = igrab(&nci->vfs_inode); | ||
2937 | BUG_ON(!ninode); | ||
2938 | nci->i_ceph_flags |= CEPH_I_NOFLUSH; | ||
2939 | dout("flush_dirty_caps next inode %p, noflush\n", | ||
2940 | ninode); | ||
2941 | } else { | ||
2942 | nci = NULL; | ||
2943 | ninode = NULL; | ||
2944 | } | ||
2945 | spin_unlock(&mdsc->cap_dirty_lock); | 2945 | spin_unlock(&mdsc->cap_dirty_lock); |
2946 | if (inode) { | 2946 | if (inode) { |
2947 | ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, | 2947 | ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, |
@@ -2951,6 +2951,7 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) | |||
2951 | spin_lock(&mdsc->cap_dirty_lock); | 2951 | spin_lock(&mdsc->cap_dirty_lock); |
2952 | } | 2952 | } |
2953 | spin_unlock(&mdsc->cap_dirty_lock); | 2953 | spin_unlock(&mdsc->cap_dirty_lock); |
2954 | dout("flush_dirty_caps done\n"); | ||
2954 | } | 2955 | } |
2955 | 2956 | ||
2956 | /* | 2957 | /* |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 1a867a3601ae..33729e822bb9 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -360,7 +360,7 @@ more: | |||
360 | rinfo = &fi->last_readdir->r_reply_info; | 360 | rinfo = &fi->last_readdir->r_reply_info; |
361 | dout("readdir frag %x num %d off %d chunkoff %d\n", frag, | 361 | dout("readdir frag %x num %d off %d chunkoff %d\n", frag, |
362 | rinfo->dir_nr, off, fi->offset); | 362 | rinfo->dir_nr, off, fi->offset); |
363 | while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) { | 363 | while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { |
364 | u64 pos = ceph_make_fpos(frag, off); | 364 | u64 pos = ceph_make_fpos(frag, off); |
365 | struct ceph_mds_reply_inode *in = | 365 | struct ceph_mds_reply_inode *in = |
366 | rinfo->dir_in[off - fi->offset].in; | 366 | rinfo->dir_in[off - fi->offset].in; |
@@ -1066,16 +1066,17 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, | |||
1066 | struct inode *inode = file->f_dentry->d_inode; | 1066 | struct inode *inode = file->f_dentry->d_inode; |
1067 | struct ceph_inode_info *ci = ceph_inode(inode); | 1067 | struct ceph_inode_info *ci = ceph_inode(inode); |
1068 | int left; | 1068 | int left; |
1069 | const int bufsize = 1024; | ||
1069 | 1070 | ||
1070 | if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) | 1071 | if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) |
1071 | return -EISDIR; | 1072 | return -EISDIR; |
1072 | 1073 | ||
1073 | if (!cf->dir_info) { | 1074 | if (!cf->dir_info) { |
1074 | cf->dir_info = kmalloc(1024, GFP_NOFS); | 1075 | cf->dir_info = kmalloc(bufsize, GFP_NOFS); |
1075 | if (!cf->dir_info) | 1076 | if (!cf->dir_info) |
1076 | return -ENOMEM; | 1077 | return -ENOMEM; |
1077 | cf->dir_info_len = | 1078 | cf->dir_info_len = |
1078 | sprintf(cf->dir_info, | 1079 | snprintf(cf->dir_info, bufsize, |
1079 | "entries: %20lld\n" | 1080 | "entries: %20lld\n" |
1080 | " files: %20lld\n" | 1081 | " files: %20lld\n" |
1081 | " subdirs: %20lld\n" | 1082 | " subdirs: %20lld\n" |
diff --git a/fs/ceph/export.c b/fs/ceph/export.c index e41056174bf8..a610d3d67488 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c | |||
@@ -86,6 +86,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, | |||
86 | static struct dentry *__fh_to_dentry(struct super_block *sb, | 86 | static struct dentry *__fh_to_dentry(struct super_block *sb, |
87 | struct ceph_nfs_fh *fh) | 87 | struct ceph_nfs_fh *fh) |
88 | { | 88 | { |
89 | struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc; | ||
89 | struct inode *inode; | 90 | struct inode *inode; |
90 | struct dentry *dentry; | 91 | struct dentry *dentry; |
91 | struct ceph_vino vino; | 92 | struct ceph_vino vino; |
@@ -95,8 +96,24 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, | |||
95 | vino.ino = fh->ino; | 96 | vino.ino = fh->ino; |
96 | vino.snap = CEPH_NOSNAP; | 97 | vino.snap = CEPH_NOSNAP; |
97 | inode = ceph_find_inode(sb, vino); | 98 | inode = ceph_find_inode(sb, vino); |
98 | if (!inode) | 99 | if (!inode) { |
99 | return ERR_PTR(-ESTALE); | 100 | struct ceph_mds_request *req; |
101 | |||
102 | req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO, | ||
103 | USE_ANY_MDS); | ||
104 | if (IS_ERR(req)) | ||
105 | return ERR_CAST(req); | ||
106 | |||
107 | req->r_ino1 = vino; | ||
108 | req->r_num_caps = 1; | ||
109 | err = ceph_mdsc_do_request(mdsc, NULL, req); | ||
110 | inode = req->r_target_inode; | ||
111 | if (inode) | ||
112 | igrab(inode); | ||
113 | ceph_mdsc_put_request(req); | ||
114 | if (!inode) | ||
115 | return ERR_PTR(-ESTALE); | ||
116 | } | ||
100 | 117 | ||
101 | dentry = d_obtain_alias(inode); | 118 | dentry = d_obtain_alias(inode); |
102 | if (IS_ERR(dentry)) { | 119 | if (IS_ERR(dentry)) { |
@@ -148,8 +165,10 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb, | |||
148 | snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash); | 165 | snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash); |
149 | req->r_num_caps = 1; | 166 | req->r_num_caps = 1; |
150 | err = ceph_mdsc_do_request(mdsc, NULL, req); | 167 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
168 | inode = req->r_target_inode; | ||
169 | if (inode) | ||
170 | igrab(inode); | ||
151 | ceph_mdsc_put_request(req); | 171 | ceph_mdsc_put_request(req); |
152 | inode = ceph_find_inode(sb, vino); | ||
153 | if (!inode) | 172 | if (!inode) |
154 | return ERR_PTR(err ? err : -ESTALE); | 173 | return ERR_PTR(err ? err : -ESTALE); |
155 | } | 174 | } |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index d0fae4ce9ba5..79743d146be6 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -578,6 +578,7 @@ static void __register_request(struct ceph_mds_client *mdsc, | |||
578 | if (dir) { | 578 | if (dir) { |
579 | struct ceph_inode_info *ci = ceph_inode(dir); | 579 | struct ceph_inode_info *ci = ceph_inode(dir); |
580 | 580 | ||
581 | ihold(dir); | ||
581 | spin_lock(&ci->i_unsafe_lock); | 582 | spin_lock(&ci->i_unsafe_lock); |
582 | req->r_unsafe_dir = dir; | 583 | req->r_unsafe_dir = dir; |
583 | list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); | 584 | list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); |
@@ -598,6 +599,9 @@ static void __unregister_request(struct ceph_mds_client *mdsc, | |||
598 | spin_lock(&ci->i_unsafe_lock); | 599 | spin_lock(&ci->i_unsafe_lock); |
599 | list_del_init(&req->r_unsafe_dir_item); | 600 | list_del_init(&req->r_unsafe_dir_item); |
600 | spin_unlock(&ci->i_unsafe_lock); | 601 | spin_unlock(&ci->i_unsafe_lock); |
602 | |||
603 | iput(req->r_unsafe_dir); | ||
604 | req->r_unsafe_dir = NULL; | ||
601 | } | 605 | } |
602 | 606 | ||
603 | ceph_mdsc_put_request(req); | 607 | ceph_mdsc_put_request(req); |
@@ -2691,7 +2695,6 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |||
2691 | { | 2695 | { |
2692 | struct super_block *sb = mdsc->fsc->sb; | 2696 | struct super_block *sb = mdsc->fsc->sb; |
2693 | struct inode *inode; | 2697 | struct inode *inode; |
2694 | struct ceph_inode_info *ci; | ||
2695 | struct dentry *parent, *dentry; | 2698 | struct dentry *parent, *dentry; |
2696 | struct ceph_dentry_info *di; | 2699 | struct ceph_dentry_info *di; |
2697 | int mds = session->s_mds; | 2700 | int mds = session->s_mds; |
@@ -2728,7 +2731,6 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |||
2728 | dout("handle_lease no inode %llx\n", vino.ino); | 2731 | dout("handle_lease no inode %llx\n", vino.ino); |
2729 | goto release; | 2732 | goto release; |
2730 | } | 2733 | } |
2731 | ci = ceph_inode(inode); | ||
2732 | 2734 | ||
2733 | /* dentry */ | 2735 | /* dentry */ |
2734 | parent = d_find_alias(inode); | 2736 | parent = d_find_alias(inode); |
@@ -3002,6 +3004,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) | |||
3002 | spin_lock_init(&mdsc->snap_flush_lock); | 3004 | spin_lock_init(&mdsc->snap_flush_lock); |
3003 | mdsc->cap_flush_seq = 0; | 3005 | mdsc->cap_flush_seq = 0; |
3004 | INIT_LIST_HEAD(&mdsc->cap_dirty); | 3006 | INIT_LIST_HEAD(&mdsc->cap_dirty); |
3007 | INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); | ||
3005 | mdsc->num_cap_flushing = 0; | 3008 | mdsc->num_cap_flushing = 0; |
3006 | spin_lock_init(&mdsc->cap_dirty_lock); | 3009 | spin_lock_init(&mdsc->cap_dirty_lock); |
3007 | init_waitqueue_head(&mdsc->cap_flushing_wq); | 3010 | init_waitqueue_head(&mdsc->cap_flushing_wq); |
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 4e3a9cc0bba6..7d8a0d662d56 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
@@ -278,6 +278,7 @@ struct ceph_mds_client { | |||
278 | 278 | ||
279 | u64 cap_flush_seq; | 279 | u64 cap_flush_seq; |
280 | struct list_head cap_dirty; /* inodes with dirty caps */ | 280 | struct list_head cap_dirty; /* inodes with dirty caps */ |
281 | struct list_head cap_dirty_migrating; /* ...that are migration... */ | ||
281 | int num_cap_flushing; /* # caps we are flushing */ | 282 | int num_cap_flushing; /* # caps we are flushing */ |
282 | spinlock_t cap_dirty_lock; /* protects above items */ | 283 | spinlock_t cap_dirty_lock; /* protects above items */ |
283 | wait_queue_head_t cap_flushing_wq; | 284 | wait_queue_head_t cap_flushing_wq; |
diff --git a/fs/dcache.c b/fs/dcache.c index 18b2a1f10ed8..37f72ee5bf7c 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -1220,7 +1220,7 @@ void shrink_dcache_parent(struct dentry * parent) | |||
1220 | EXPORT_SYMBOL(shrink_dcache_parent); | 1220 | EXPORT_SYMBOL(shrink_dcache_parent); |
1221 | 1221 | ||
1222 | /* | 1222 | /* |
1223 | * Scan `nr' dentries and return the number which remain. | 1223 | * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain. |
1224 | * | 1224 | * |
1225 | * We need to avoid reentering the filesystem if the caller is performing a | 1225 | * We need to avoid reentering the filesystem if the caller is performing a |
1226 | * GFP_NOFS allocation attempt. One example deadlock is: | 1226 | * GFP_NOFS allocation attempt. One example deadlock is: |
@@ -1231,8 +1231,12 @@ EXPORT_SYMBOL(shrink_dcache_parent); | |||
1231 | * | 1231 | * |
1232 | * In this case we return -1 to tell the caller that we baled. | 1232 | * In this case we return -1 to tell the caller that we baled. |
1233 | */ | 1233 | */ |
1234 | static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) | 1234 | static int shrink_dcache_memory(struct shrinker *shrink, |
1235 | struct shrink_control *sc) | ||
1235 | { | 1236 | { |
1237 | int nr = sc->nr_to_scan; | ||
1238 | gfp_t gfp_mask = sc->gfp_mask; | ||
1239 | |||
1236 | if (nr) { | 1240 | if (nr) { |
1237 | if (!(gfp_mask & __GFP_FS)) | 1241 | if (!(gfp_mask & __GFP_FS)) |
1238 | return -1; | 1242 | return -1; |
diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 98b77c89494c..c00e055b6282 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c | |||
@@ -40,9 +40,12 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) | |||
40 | static void drop_slab(void) | 40 | static void drop_slab(void) |
41 | { | 41 | { |
42 | int nr_objects; | 42 | int nr_objects; |
43 | struct shrink_control shrink = { | ||
44 | .gfp_mask = GFP_KERNEL, | ||
45 | }; | ||
43 | 46 | ||
44 | do { | 47 | do { |
45 | nr_objects = shrink_slab(1000, GFP_KERNEL, 1000); | 48 | nr_objects = shrink_slab(&shrink, 1000, 1000); |
46 | } while (nr_objects > 10); | 49 | } while (nr_objects > 10); |
47 | } | 50 | } |
48 | 51 | ||
@@ -200,7 +200,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
200 | 200 | ||
201 | #ifdef CONFIG_STACK_GROWSUP | 201 | #ifdef CONFIG_STACK_GROWSUP |
202 | if (write) { | 202 | if (write) { |
203 | ret = expand_stack_downwards(bprm->vma, pos); | 203 | ret = expand_downwards(bprm->vma, pos); |
204 | if (ret < 0) | 204 | if (ret < 0) |
205 | return NULL; | 205 | return NULL; |
206 | } | 206 | } |
@@ -600,7 +600,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
600 | unsigned long length = old_end - old_start; | 600 | unsigned long length = old_end - old_start; |
601 | unsigned long new_start = old_start - shift; | 601 | unsigned long new_start = old_start - shift; |
602 | unsigned long new_end = old_end - shift; | 602 | unsigned long new_end = old_end - shift; |
603 | struct mmu_gather *tlb; | 603 | struct mmu_gather tlb; |
604 | 604 | ||
605 | BUG_ON(new_start > new_end); | 605 | BUG_ON(new_start > new_end); |
606 | 606 | ||
@@ -626,12 +626,12 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
626 | return -ENOMEM; | 626 | return -ENOMEM; |
627 | 627 | ||
628 | lru_add_drain(); | 628 | lru_add_drain(); |
629 | tlb = tlb_gather_mmu(mm, 0); | 629 | tlb_gather_mmu(&tlb, mm, 0); |
630 | if (new_end > old_start) { | 630 | if (new_end > old_start) { |
631 | /* | 631 | /* |
632 | * when the old and new regions overlap clear from new_end. | 632 | * when the old and new regions overlap clear from new_end. |
633 | */ | 633 | */ |
634 | free_pgd_range(tlb, new_end, old_end, new_end, | 634 | free_pgd_range(&tlb, new_end, old_end, new_end, |
635 | vma->vm_next ? vma->vm_next->vm_start : 0); | 635 | vma->vm_next ? vma->vm_next->vm_start : 0); |
636 | } else { | 636 | } else { |
637 | /* | 637 | /* |
@@ -640,10 +640,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
640 | * have constraints on va-space that make this illegal (IA64) - | 640 | * have constraints on va-space that make this illegal (IA64) - |
641 | * for the others its just a little faster. | 641 | * for the others its just a little faster. |
642 | */ | 642 | */ |
643 | free_pgd_range(tlb, old_start, old_end, new_end, | 643 | free_pgd_range(&tlb, old_start, old_end, new_end, |
644 | vma->vm_next ? vma->vm_next->vm_start : 0); | 644 | vma->vm_next ? vma->vm_next->vm_start : 0); |
645 | } | 645 | } |
646 | tlb_finish_mmu(tlb, new_end, old_end); | 646 | tlb_finish_mmu(&tlb, new_end, old_end); |
647 | 647 | ||
648 | /* | 648 | /* |
649 | * Shrink the vma to just the new range. Always succeeds. | 649 | * Shrink the vma to just the new range. Always succeeds. |
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index 48a18f184d50..30afdfa7aec7 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c | |||
@@ -33,8 +33,6 @@ void fscache_enqueue_operation(struct fscache_operation *op) | |||
33 | _enter("{OBJ%x OP%x,%u}", | 33 | _enter("{OBJ%x OP%x,%u}", |
34 | op->object->debug_id, op->debug_id, atomic_read(&op->usage)); | 34 | op->object->debug_id, op->debug_id, atomic_read(&op->usage)); |
35 | 35 | ||
36 | fscache_set_op_state(op, "EnQ"); | ||
37 | |||
38 | ASSERT(list_empty(&op->pend_link)); | 36 | ASSERT(list_empty(&op->pend_link)); |
39 | ASSERT(op->processor != NULL); | 37 | ASSERT(op->processor != NULL); |
40 | ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); | 38 | ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); |
@@ -66,8 +64,6 @@ EXPORT_SYMBOL(fscache_enqueue_operation); | |||
66 | static void fscache_run_op(struct fscache_object *object, | 64 | static void fscache_run_op(struct fscache_object *object, |
67 | struct fscache_operation *op) | 65 | struct fscache_operation *op) |
68 | { | 66 | { |
69 | fscache_set_op_state(op, "Run"); | ||
70 | |||
71 | object->n_in_progress++; | 67 | object->n_in_progress++; |
72 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) | 68 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) |
73 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); | 69 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); |
@@ -88,8 +84,6 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
88 | 84 | ||
89 | _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); | 85 | _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); |
90 | 86 | ||
91 | fscache_set_op_state(op, "SubmitX"); | ||
92 | |||
93 | spin_lock(&object->lock); | 87 | spin_lock(&object->lock); |
94 | ASSERTCMP(object->n_ops, >=, object->n_in_progress); | 88 | ASSERTCMP(object->n_ops, >=, object->n_in_progress); |
95 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); | 89 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); |
@@ -194,8 +188,6 @@ int fscache_submit_op(struct fscache_object *object, | |||
194 | 188 | ||
195 | ASSERTCMP(atomic_read(&op->usage), >, 0); | 189 | ASSERTCMP(atomic_read(&op->usage), >, 0); |
196 | 190 | ||
197 | fscache_set_op_state(op, "Submit"); | ||
198 | |||
199 | spin_lock(&object->lock); | 191 | spin_lock(&object->lock); |
200 | ASSERTCMP(object->n_ops, >=, object->n_in_progress); | 192 | ASSERTCMP(object->n_ops, >=, object->n_in_progress); |
201 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); | 193 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); |
@@ -335,8 +327,6 @@ void fscache_put_operation(struct fscache_operation *op) | |||
335 | if (!atomic_dec_and_test(&op->usage)) | 327 | if (!atomic_dec_and_test(&op->usage)) |
336 | return; | 328 | return; |
337 | 329 | ||
338 | fscache_set_op_state(op, "Put"); | ||
339 | |||
340 | _debug("PUT OP"); | 330 | _debug("PUT OP"); |
341 | if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) | 331 | if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) |
342 | BUG(); | 332 | BUG(); |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 41c441c2058d..a2a5d19ece6a 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -155,11 +155,9 @@ static void fscache_attr_changed_op(struct fscache_operation *op) | |||
155 | fscache_stat(&fscache_n_attr_changed_calls); | 155 | fscache_stat(&fscache_n_attr_changed_calls); |
156 | 156 | ||
157 | if (fscache_object_is_active(object)) { | 157 | if (fscache_object_is_active(object)) { |
158 | fscache_set_op_state(op, "CallFS"); | ||
159 | fscache_stat(&fscache_n_cop_attr_changed); | 158 | fscache_stat(&fscache_n_cop_attr_changed); |
160 | ret = object->cache->ops->attr_changed(object); | 159 | ret = object->cache->ops->attr_changed(object); |
161 | fscache_stat_d(&fscache_n_cop_attr_changed); | 160 | fscache_stat_d(&fscache_n_cop_attr_changed); |
162 | fscache_set_op_state(op, "Done"); | ||
163 | if (ret < 0) | 161 | if (ret < 0) |
164 | fscache_abort_object(object); | 162 | fscache_abort_object(object); |
165 | } | 163 | } |
@@ -190,7 +188,6 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) | |||
190 | 188 | ||
191 | fscache_operation_init(op, fscache_attr_changed_op, NULL); | 189 | fscache_operation_init(op, fscache_attr_changed_op, NULL); |
192 | op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); | 190 | op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); |
193 | fscache_set_op_name(op, "Attr"); | ||
194 | 191 | ||
195 | spin_lock(&cookie->lock); | 192 | spin_lock(&cookie->lock); |
196 | 193 | ||
@@ -257,7 +254,6 @@ static struct fscache_retrieval *fscache_alloc_retrieval( | |||
257 | op->context = context; | 254 | op->context = context; |
258 | op->start_time = jiffies; | 255 | op->start_time = jiffies; |
259 | INIT_LIST_HEAD(&op->to_do); | 256 | INIT_LIST_HEAD(&op->to_do); |
260 | fscache_set_op_name(&op->op, "Retr"); | ||
261 | return op; | 257 | return op; |
262 | } | 258 | } |
263 | 259 | ||
@@ -368,7 +364,6 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, | |||
368 | _leave(" = -ENOMEM"); | 364 | _leave(" = -ENOMEM"); |
369 | return -ENOMEM; | 365 | return -ENOMEM; |
370 | } | 366 | } |
371 | fscache_set_op_name(&op->op, "RetrRA1"); | ||
372 | 367 | ||
373 | spin_lock(&cookie->lock); | 368 | spin_lock(&cookie->lock); |
374 | 369 | ||
@@ -487,7 +482,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, | |||
487 | op = fscache_alloc_retrieval(mapping, end_io_func, context); | 482 | op = fscache_alloc_retrieval(mapping, end_io_func, context); |
488 | if (!op) | 483 | if (!op) |
489 | return -ENOMEM; | 484 | return -ENOMEM; |
490 | fscache_set_op_name(&op->op, "RetrRAN"); | ||
491 | 485 | ||
492 | spin_lock(&cookie->lock); | 486 | spin_lock(&cookie->lock); |
493 | 487 | ||
@@ -589,7 +583,6 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, | |||
589 | op = fscache_alloc_retrieval(page->mapping, NULL, NULL); | 583 | op = fscache_alloc_retrieval(page->mapping, NULL, NULL); |
590 | if (!op) | 584 | if (!op) |
591 | return -ENOMEM; | 585 | return -ENOMEM; |
592 | fscache_set_op_name(&op->op, "RetrAL1"); | ||
593 | 586 | ||
594 | spin_lock(&cookie->lock); | 587 | spin_lock(&cookie->lock); |
595 | 588 | ||
@@ -662,8 +655,6 @@ static void fscache_write_op(struct fscache_operation *_op) | |||
662 | 655 | ||
663 | _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); | 656 | _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); |
664 | 657 | ||
665 | fscache_set_op_state(&op->op, "GetPage"); | ||
666 | |||
667 | spin_lock(&object->lock); | 658 | spin_lock(&object->lock); |
668 | cookie = object->cookie; | 659 | cookie = object->cookie; |
669 | 660 | ||
@@ -698,15 +689,12 @@ static void fscache_write_op(struct fscache_operation *_op) | |||
698 | spin_unlock(&cookie->stores_lock); | 689 | spin_unlock(&cookie->stores_lock); |
699 | spin_unlock(&object->lock); | 690 | spin_unlock(&object->lock); |
700 | 691 | ||
701 | fscache_set_op_state(&op->op, "Store"); | ||
702 | fscache_stat(&fscache_n_store_pages); | 692 | fscache_stat(&fscache_n_store_pages); |
703 | fscache_stat(&fscache_n_cop_write_page); | 693 | fscache_stat(&fscache_n_cop_write_page); |
704 | ret = object->cache->ops->write_page(op, page); | 694 | ret = object->cache->ops->write_page(op, page); |
705 | fscache_stat_d(&fscache_n_cop_write_page); | 695 | fscache_stat_d(&fscache_n_cop_write_page); |
706 | fscache_set_op_state(&op->op, "EndWrite"); | ||
707 | fscache_end_page_write(object, page); | 696 | fscache_end_page_write(object, page); |
708 | if (ret < 0) { | 697 | if (ret < 0) { |
709 | fscache_set_op_state(&op->op, "Abort"); | ||
710 | fscache_abort_object(object); | 698 | fscache_abort_object(object); |
711 | } else { | 699 | } else { |
712 | fscache_enqueue_operation(&op->op); | 700 | fscache_enqueue_operation(&op->op); |
@@ -778,7 +766,6 @@ int __fscache_write_page(struct fscache_cookie *cookie, | |||
778 | fscache_operation_init(&op->op, fscache_write_op, | 766 | fscache_operation_init(&op->op, fscache_write_op, |
779 | fscache_release_write_op); | 767 | fscache_release_write_op); |
780 | op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING); | 768 | op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING); |
781 | fscache_set_op_name(&op->op, "Write1"); | ||
782 | 769 | ||
783 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | 770 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); |
784 | if (ret < 0) | 771 | if (ret < 0) |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index a2a6abbccc07..2792a790e50b 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1346,11 +1346,14 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | |||
1346 | } | 1346 | } |
1347 | 1347 | ||
1348 | 1348 | ||
1349 | static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) | 1349 | static int gfs2_shrink_glock_memory(struct shrinker *shrink, |
1350 | struct shrink_control *sc) | ||
1350 | { | 1351 | { |
1351 | struct gfs2_glock *gl; | 1352 | struct gfs2_glock *gl; |
1352 | int may_demote; | 1353 | int may_demote; |
1353 | int nr_skipped = 0; | 1354 | int nr_skipped = 0; |
1355 | int nr = sc->nr_to_scan; | ||
1356 | gfp_t gfp_mask = sc->gfp_mask; | ||
1354 | LIST_HEAD(skipped); | 1357 | LIST_HEAD(skipped); |
1355 | 1358 | ||
1356 | if (nr == 0) | 1359 | if (nr == 0) |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index e23d9864c418..42e8d23bc047 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -38,6 +38,7 @@ | |||
38 | 38 | ||
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/mm.h> | ||
41 | #include <linux/spinlock.h> | 42 | #include <linux/spinlock.h> |
42 | #include <linux/completion.h> | 43 | #include <linux/completion.h> |
43 | #include <linux/buffer_head.h> | 44 | #include <linux/buffer_head.h> |
@@ -77,19 +78,20 @@ static LIST_HEAD(qd_lru_list); | |||
77 | static atomic_t qd_lru_count = ATOMIC_INIT(0); | 78 | static atomic_t qd_lru_count = ATOMIC_INIT(0); |
78 | static DEFINE_SPINLOCK(qd_lru_lock); | 79 | static DEFINE_SPINLOCK(qd_lru_lock); |
79 | 80 | ||
80 | int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) | 81 | int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc) |
81 | { | 82 | { |
82 | struct gfs2_quota_data *qd; | 83 | struct gfs2_quota_data *qd; |
83 | struct gfs2_sbd *sdp; | 84 | struct gfs2_sbd *sdp; |
85 | int nr_to_scan = sc->nr_to_scan; | ||
84 | 86 | ||
85 | if (nr == 0) | 87 | if (nr_to_scan == 0) |
86 | goto out; | 88 | goto out; |
87 | 89 | ||
88 | if (!(gfp_mask & __GFP_FS)) | 90 | if (!(sc->gfp_mask & __GFP_FS)) |
89 | return -1; | 91 | return -1; |
90 | 92 | ||
91 | spin_lock(&qd_lru_lock); | 93 | spin_lock(&qd_lru_lock); |
92 | while (nr && !list_empty(&qd_lru_list)) { | 94 | while (nr_to_scan && !list_empty(&qd_lru_list)) { |
93 | qd = list_entry(qd_lru_list.next, | 95 | qd = list_entry(qd_lru_list.next, |
94 | struct gfs2_quota_data, qd_reclaim); | 96 | struct gfs2_quota_data, qd_reclaim); |
95 | sdp = qd->qd_gl->gl_sbd; | 97 | sdp = qd->qd_gl->gl_sbd; |
@@ -110,7 +112,7 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) | |||
110 | spin_unlock(&qd_lru_lock); | 112 | spin_unlock(&qd_lru_lock); |
111 | kmem_cache_free(gfs2_quotad_cachep, qd); | 113 | kmem_cache_free(gfs2_quotad_cachep, qd); |
112 | spin_lock(&qd_lru_lock); | 114 | spin_lock(&qd_lru_lock); |
113 | nr--; | 115 | nr_to_scan--; |
114 | } | 116 | } |
115 | spin_unlock(&qd_lru_lock); | 117 | spin_unlock(&qd_lru_lock); |
116 | 118 | ||
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index e7d236ca48bd..90bf1c302a98 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | struct gfs2_inode; | 13 | struct gfs2_inode; |
14 | struct gfs2_sbd; | 14 | struct gfs2_sbd; |
15 | struct shrink_control; | ||
15 | 16 | ||
16 | #define NO_QUOTA_CHANGE ((u32)-1) | 17 | #define NO_QUOTA_CHANGE ((u32)-1) |
17 | 18 | ||
@@ -51,7 +52,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) | |||
51 | return ret; | 52 | return ret; |
52 | } | 53 | } |
53 | 54 | ||
54 | extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); | 55 | extern int gfs2_shrink_qd_memory(struct shrinker *shrink, |
56 | struct shrink_control *sc); | ||
55 | extern const struct quotactl_ops gfs2_quotactl_ops; | 57 | extern const struct quotactl_ops gfs2_quotactl_ops; |
56 | 58 | ||
57 | #endif /* __QUOTA_DOT_H__ */ | 59 | #endif /* __QUOTA_DOT_H__ */ |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index b9eeb1cd03ff..e7a035781b7d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -412,10 +412,10 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |||
412 | pgoff = offset >> PAGE_SHIFT; | 412 | pgoff = offset >> PAGE_SHIFT; |
413 | 413 | ||
414 | i_size_write(inode, offset); | 414 | i_size_write(inode, offset); |
415 | spin_lock(&mapping->i_mmap_lock); | 415 | mutex_lock(&mapping->i_mmap_mutex); |
416 | if (!prio_tree_empty(&mapping->i_mmap)) | 416 | if (!prio_tree_empty(&mapping->i_mmap)) |
417 | hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); | 417 | hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); |
418 | spin_unlock(&mapping->i_mmap_lock); | 418 | mutex_unlock(&mapping->i_mmap_mutex); |
419 | truncate_hugepages(inode, offset); | 419 | truncate_hugepages(inode, offset); |
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
diff --git a/fs/inode.c b/fs/inode.c index 05f4fa521325..990d284877a1 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -326,12 +326,11 @@ void address_space_init_once(struct address_space *mapping) | |||
326 | memset(mapping, 0, sizeof(*mapping)); | 326 | memset(mapping, 0, sizeof(*mapping)); |
327 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); | 327 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); |
328 | spin_lock_init(&mapping->tree_lock); | 328 | spin_lock_init(&mapping->tree_lock); |
329 | spin_lock_init(&mapping->i_mmap_lock); | 329 | mutex_init(&mapping->i_mmap_mutex); |
330 | INIT_LIST_HEAD(&mapping->private_list); | 330 | INIT_LIST_HEAD(&mapping->private_list); |
331 | spin_lock_init(&mapping->private_lock); | 331 | spin_lock_init(&mapping->private_lock); |
332 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); | 332 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); |
333 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); | 333 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); |
334 | mutex_init(&mapping->unmap_mutex); | ||
335 | } | 334 | } |
336 | EXPORT_SYMBOL(address_space_init_once); | 335 | EXPORT_SYMBOL(address_space_init_once); |
337 | 336 | ||
@@ -752,8 +751,12 @@ static void prune_icache(int nr_to_scan) | |||
752 | * This function is passed the number of inodes to scan, and it returns the | 751 | * This function is passed the number of inodes to scan, and it returns the |
753 | * total number of remaining possibly-reclaimable inodes. | 752 | * total number of remaining possibly-reclaimable inodes. |
754 | */ | 753 | */ |
755 | static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) | 754 | static int shrink_icache_memory(struct shrinker *shrink, |
755 | struct shrink_control *sc) | ||
756 | { | 756 | { |
757 | int nr = sc->nr_to_scan; | ||
758 | gfp_t gfp_mask = sc->gfp_mask; | ||
759 | |||
757 | if (nr) { | 760 | if (nr) { |
758 | /* | 761 | /* |
759 | * Nasty deadlock avoidance. We may hold various FS locks, | 762 | * Nasty deadlock avoidance. We may hold various FS locks, |
diff --git a/fs/mbcache.c b/fs/mbcache.c index 2f174be06555..8c32ef3ba88e 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -90,7 +90,8 @@ static DEFINE_SPINLOCK(mb_cache_spinlock); | |||
90 | * What the mbcache registers as to get shrunk dynamically. | 90 | * What the mbcache registers as to get shrunk dynamically. |
91 | */ | 91 | */ |
92 | 92 | ||
93 | static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); | 93 | static int mb_cache_shrink_fn(struct shrinker *shrink, |
94 | struct shrink_control *sc); | ||
94 | 95 | ||
95 | static struct shrinker mb_cache_shrinker = { | 96 | static struct shrinker mb_cache_shrinker = { |
96 | .shrink = mb_cache_shrink_fn, | 97 | .shrink = mb_cache_shrink_fn, |
@@ -156,18 +157,19 @@ forget: | |||
156 | * gets low. | 157 | * gets low. |
157 | * | 158 | * |
158 | * @shrink: (ignored) | 159 | * @shrink: (ignored) |
159 | * @nr_to_scan: Number of objects to scan | 160 | * @sc: shrink_control passed from reclaim |
160 | * @gfp_mask: (ignored) | ||
161 | * | 161 | * |
162 | * Returns the number of objects which are present in the cache. | 162 | * Returns the number of objects which are present in the cache. |
163 | */ | 163 | */ |
164 | static int | 164 | static int |
165 | mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | 165 | mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) |
166 | { | 166 | { |
167 | LIST_HEAD(free_list); | 167 | LIST_HEAD(free_list); |
168 | struct mb_cache *cache; | 168 | struct mb_cache *cache; |
169 | struct mb_cache_entry *entry, *tmp; | 169 | struct mb_cache_entry *entry, *tmp; |
170 | int count = 0; | 170 | int count = 0; |
171 | int nr_to_scan = sc->nr_to_scan; | ||
172 | gfp_t gfp_mask = sc->gfp_mask; | ||
171 | 173 | ||
172 | mb_debug("trying to free %d entries", nr_to_scan); | 174 | mb_debug("trying to free %d entries", nr_to_scan); |
173 | spin_lock(&mb_cache_spinlock); | 175 | spin_lock(&mb_cache_spinlock); |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 0250e4ce4893..202f370526a7 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -461,7 +461,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
461 | #endif | 461 | #endif |
462 | struct ncp_entry_info finfo; | 462 | struct ncp_entry_info finfo; |
463 | 463 | ||
464 | data.wdog_pid = NULL; | 464 | memset(&data, 0, sizeof(data)); |
465 | server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); | 465 | server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); |
466 | if (!server) | 466 | if (!server) |
467 | return -ENOMEM; | 467 | return -ENOMEM; |
@@ -496,7 +496,6 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
496 | struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data; | 496 | struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data; |
497 | 497 | ||
498 | data.flags = md->flags; | 498 | data.flags = md->flags; |
499 | data.int_flags = 0; | ||
500 | data.mounted_uid = md->mounted_uid; | 499 | data.mounted_uid = md->mounted_uid; |
501 | data.wdog_pid = find_get_pid(md->wdog_pid); | 500 | data.wdog_pid = find_get_pid(md->wdog_pid); |
502 | data.ncp_fd = md->ncp_fd; | 501 | data.ncp_fd = md->ncp_fd; |
@@ -507,7 +506,6 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
507 | data.file_mode = md->file_mode; | 506 | data.file_mode = md->file_mode; |
508 | data.dir_mode = md->dir_mode; | 507 | data.dir_mode = md->dir_mode; |
509 | data.info_fd = -1; | 508 | data.info_fd = -1; |
510 | data.mounted_vol[0] = 0; | ||
511 | } | 509 | } |
512 | break; | 510 | break; |
513 | default: | 511 | default: |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 7237672216c8..424e47773a84 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -2042,11 +2042,14 @@ static void nfs_access_free_list(struct list_head *head) | |||
2042 | } | 2042 | } |
2043 | } | 2043 | } |
2044 | 2044 | ||
2045 | int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | 2045 | int nfs_access_cache_shrinker(struct shrinker *shrink, |
2046 | struct shrink_control *sc) | ||
2046 | { | 2047 | { |
2047 | LIST_HEAD(head); | 2048 | LIST_HEAD(head); |
2048 | struct nfs_inode *nfsi, *next; | 2049 | struct nfs_inode *nfsi, *next; |
2049 | struct nfs_access_entry *cache; | 2050 | struct nfs_access_entry *cache; |
2051 | int nr_to_scan = sc->nr_to_scan; | ||
2052 | gfp_t gfp_mask = sc->gfp_mask; | ||
2050 | 2053 | ||
2051 | if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) | 2054 | if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) |
2052 | return (nr_to_scan == 0) ? 0 : -1; | 2055 | return (nr_to_scan == 0) ? 0 : -1; |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index ce118ce885dd..2df6ca7b5898 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -234,7 +234,7 @@ extern int nfs_init_client(struct nfs_client *clp, | |||
234 | 234 | ||
235 | /* dir.c */ | 235 | /* dir.c */ |
236 | extern int nfs_access_cache_shrinker(struct shrinker *shrink, | 236 | extern int nfs_access_cache_shrinker(struct shrinker *shrink, |
237 | int nr_to_scan, gfp_t gfp_mask); | 237 | struct shrink_control *sc); |
238 | 238 | ||
239 | /* inode.c */ | 239 | /* inode.c */ |
240 | extern struct workqueue_struct *nfsiod_workqueue; | 240 | extern struct workqueue_struct *nfsiod_workqueue; |
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index d545e97d99c3..8ed4d3433199 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -255,7 +255,11 @@ ssize_t part_discard_alignment_show(struct device *dev, | |||
255 | struct device_attribute *attr, char *buf) | 255 | struct device_attribute *attr, char *buf) |
256 | { | 256 | { |
257 | struct hd_struct *p = dev_to_part(dev); | 257 | struct hd_struct *p = dev_to_part(dev); |
258 | return sprintf(buf, "%u\n", p->discard_alignment); | 258 | struct gendisk *disk = dev_to_disk(dev); |
259 | |||
260 | return sprintf(buf, "%u\n", | ||
261 | queue_limit_discard_alignment(&disk->queue->limits, | ||
262 | p->start_sect)); | ||
259 | } | 263 | } |
260 | 264 | ||
261 | ssize_t part_stat_show(struct device *dev, | 265 | ssize_t part_stat_show(struct device *dev, |
@@ -449,8 +453,6 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, | |||
449 | p->start_sect = start; | 453 | p->start_sect = start; |
450 | p->alignment_offset = | 454 | p->alignment_offset = |
451 | queue_limit_alignment_offset(&disk->queue->limits, start); | 455 | queue_limit_alignment_offset(&disk->queue->limits, start); |
452 | p->discard_alignment = | ||
453 | queue_limit_discard_alignment(&disk->queue->limits, start); | ||
454 | p->nr_sects = len; | 456 | p->nr_sects = len; |
455 | p->partno = partno; | 457 | p->partno = partno; |
456 | p->policy = get_disk_ro(disk); | 458 | p->policy = get_disk_ro(disk); |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index c03e8d3a3a5b..3763b436e69d 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -61,6 +61,14 @@ extern const struct file_operations proc_pagemap_operations; | |||
61 | extern const struct file_operations proc_net_operations; | 61 | extern const struct file_operations proc_net_operations; |
62 | extern const struct inode_operations proc_net_inode_operations; | 62 | extern const struct inode_operations proc_net_inode_operations; |
63 | 63 | ||
64 | struct proc_maps_private { | ||
65 | struct pid *pid; | ||
66 | struct task_struct *task; | ||
67 | #ifdef CONFIG_MMU | ||
68 | struct vm_area_struct *tail_vma; | ||
69 | #endif | ||
70 | }; | ||
71 | |||
64 | void proc_init_inodecache(void); | 72 | void proc_init_inodecache(void); |
65 | 73 | ||
66 | static inline struct pid *proc_pid(struct inode *inode) | 74 | static inline struct pid *proc_pid(struct inode *inode) |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 318d8654989b..2c9db29ea358 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -858,7 +858,192 @@ const struct file_operations proc_pagemap_operations = { | |||
858 | #endif /* CONFIG_PROC_PAGE_MONITOR */ | 858 | #endif /* CONFIG_PROC_PAGE_MONITOR */ |
859 | 859 | ||
860 | #ifdef CONFIG_NUMA | 860 | #ifdef CONFIG_NUMA |
861 | extern int show_numa_map(struct seq_file *m, void *v); | 861 | |
862 | struct numa_maps { | ||
863 | struct vm_area_struct *vma; | ||
864 | unsigned long pages; | ||
865 | unsigned long anon; | ||
866 | unsigned long active; | ||
867 | unsigned long writeback; | ||
868 | unsigned long mapcount_max; | ||
869 | unsigned long dirty; | ||
870 | unsigned long swapcache; | ||
871 | unsigned long node[MAX_NUMNODES]; | ||
872 | }; | ||
873 | |||
874 | struct numa_maps_private { | ||
875 | struct proc_maps_private proc_maps; | ||
876 | struct numa_maps md; | ||
877 | }; | ||
878 | |||
879 | static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty) | ||
880 | { | ||
881 | int count = page_mapcount(page); | ||
882 | |||
883 | md->pages++; | ||
884 | if (pte_dirty || PageDirty(page)) | ||
885 | md->dirty++; | ||
886 | |||
887 | if (PageSwapCache(page)) | ||
888 | md->swapcache++; | ||
889 | |||
890 | if (PageActive(page) || PageUnevictable(page)) | ||
891 | md->active++; | ||
892 | |||
893 | if (PageWriteback(page)) | ||
894 | md->writeback++; | ||
895 | |||
896 | if (PageAnon(page)) | ||
897 | md->anon++; | ||
898 | |||
899 | if (count > md->mapcount_max) | ||
900 | md->mapcount_max = count; | ||
901 | |||
902 | md->node[page_to_nid(page)]++; | ||
903 | } | ||
904 | |||
905 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | ||
906 | unsigned long end, struct mm_walk *walk) | ||
907 | { | ||
908 | struct numa_maps *md; | ||
909 | spinlock_t *ptl; | ||
910 | pte_t *orig_pte; | ||
911 | pte_t *pte; | ||
912 | |||
913 | md = walk->private; | ||
914 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | ||
915 | do { | ||
916 | struct page *page; | ||
917 | int nid; | ||
918 | |||
919 | if (!pte_present(*pte)) | ||
920 | continue; | ||
921 | |||
922 | page = vm_normal_page(md->vma, addr, *pte); | ||
923 | if (!page) | ||
924 | continue; | ||
925 | |||
926 | if (PageReserved(page)) | ||
927 | continue; | ||
928 | |||
929 | nid = page_to_nid(page); | ||
930 | if (!node_isset(nid, node_states[N_HIGH_MEMORY])) | ||
931 | continue; | ||
932 | |||
933 | gather_stats(page, md, pte_dirty(*pte)); | ||
934 | |||
935 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
936 | pte_unmap_unlock(orig_pte, ptl); | ||
937 | return 0; | ||
938 | } | ||
939 | #ifdef CONFIG_HUGETLB_PAGE | ||
940 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | ||
941 | unsigned long addr, unsigned long end, struct mm_walk *walk) | ||
942 | { | ||
943 | struct numa_maps *md; | ||
944 | struct page *page; | ||
945 | |||
946 | if (pte_none(*pte)) | ||
947 | return 0; | ||
948 | |||
949 | page = pte_page(*pte); | ||
950 | if (!page) | ||
951 | return 0; | ||
952 | |||
953 | md = walk->private; | ||
954 | gather_stats(page, md, pte_dirty(*pte)); | ||
955 | return 0; | ||
956 | } | ||
957 | |||
958 | #else | ||
959 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | ||
960 | unsigned long addr, unsigned long end, struct mm_walk *walk) | ||
961 | { | ||
962 | return 0; | ||
963 | } | ||
964 | #endif | ||
965 | |||
966 | /* | ||
967 | * Display pages allocated per node and memory policy via /proc. | ||
968 | */ | ||
969 | static int show_numa_map(struct seq_file *m, void *v) | ||
970 | { | ||
971 | struct numa_maps_private *numa_priv = m->private; | ||
972 | struct proc_maps_private *proc_priv = &numa_priv->proc_maps; | ||
973 | struct vm_area_struct *vma = v; | ||
974 | struct numa_maps *md = &numa_priv->md; | ||
975 | struct file *file = vma->vm_file; | ||
976 | struct mm_struct *mm = vma->vm_mm; | ||
977 | struct mm_walk walk = {}; | ||
978 | struct mempolicy *pol; | ||
979 | int n; | ||
980 | char buffer[50]; | ||
981 | |||
982 | if (!mm) | ||
983 | return 0; | ||
984 | |||
985 | /* Ensure we start with an empty set of numa_maps statistics. */ | ||
986 | memset(md, 0, sizeof(*md)); | ||
987 | |||
988 | md->vma = vma; | ||
989 | |||
990 | walk.hugetlb_entry = gather_hugetbl_stats; | ||
991 | walk.pmd_entry = gather_pte_stats; | ||
992 | walk.private = md; | ||
993 | walk.mm = mm; | ||
994 | |||
995 | pol = get_vma_policy(proc_priv->task, vma, vma->vm_start); | ||
996 | mpol_to_str(buffer, sizeof(buffer), pol, 0); | ||
997 | mpol_cond_put(pol); | ||
998 | |||
999 | seq_printf(m, "%08lx %s", vma->vm_start, buffer); | ||
1000 | |||
1001 | if (file) { | ||
1002 | seq_printf(m, " file="); | ||
1003 | seq_path(m, &file->f_path, "\n\t= "); | ||
1004 | } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { | ||
1005 | seq_printf(m, " heap"); | ||
1006 | } else if (vma->vm_start <= mm->start_stack && | ||
1007 | vma->vm_end >= mm->start_stack) { | ||
1008 | seq_printf(m, " stack"); | ||
1009 | } | ||
1010 | |||
1011 | walk_page_range(vma->vm_start, vma->vm_end, &walk); | ||
1012 | |||
1013 | if (!md->pages) | ||
1014 | goto out; | ||
1015 | |||
1016 | if (md->anon) | ||
1017 | seq_printf(m, " anon=%lu", md->anon); | ||
1018 | |||
1019 | if (md->dirty) | ||
1020 | seq_printf(m, " dirty=%lu", md->dirty); | ||
1021 | |||
1022 | if (md->pages != md->anon && md->pages != md->dirty) | ||
1023 | seq_printf(m, " mapped=%lu", md->pages); | ||
1024 | |||
1025 | if (md->mapcount_max > 1) | ||
1026 | seq_printf(m, " mapmax=%lu", md->mapcount_max); | ||
1027 | |||
1028 | if (md->swapcache) | ||
1029 | seq_printf(m, " swapcache=%lu", md->swapcache); | ||
1030 | |||
1031 | if (md->active < md->pages && !is_vm_hugetlb_page(vma)) | ||
1032 | seq_printf(m, " active=%lu", md->active); | ||
1033 | |||
1034 | if (md->writeback) | ||
1035 | seq_printf(m, " writeback=%lu", md->writeback); | ||
1036 | |||
1037 | for_each_node_state(n, N_HIGH_MEMORY) | ||
1038 | if (md->node[n]) | ||
1039 | seq_printf(m, " N%d=%lu", n, md->node[n]); | ||
1040 | out: | ||
1041 | seq_putc(m, '\n'); | ||
1042 | |||
1043 | if (m->count < m->size) | ||
1044 | m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0; | ||
1045 | return 0; | ||
1046 | } | ||
862 | 1047 | ||
863 | static const struct seq_operations proc_pid_numa_maps_op = { | 1048 | static const struct seq_operations proc_pid_numa_maps_op = { |
864 | .start = m_start, | 1049 | .start = m_start, |
@@ -869,7 +1054,20 @@ static const struct seq_operations proc_pid_numa_maps_op = { | |||
869 | 1054 | ||
870 | static int numa_maps_open(struct inode *inode, struct file *file) | 1055 | static int numa_maps_open(struct inode *inode, struct file *file) |
871 | { | 1056 | { |
872 | return do_maps_open(inode, file, &proc_pid_numa_maps_op); | 1057 | struct numa_maps_private *priv; |
1058 | int ret = -ENOMEM; | ||
1059 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
1060 | if (priv) { | ||
1061 | priv->proc_maps.pid = proc_pid(inode); | ||
1062 | ret = seq_open(file, &proc_pid_numa_maps_op); | ||
1063 | if (!ret) { | ||
1064 | struct seq_file *m = file->private_data; | ||
1065 | m->private = priv; | ||
1066 | } else { | ||
1067 | kfree(priv); | ||
1068 | } | ||
1069 | } | ||
1070 | return ret; | ||
873 | } | 1071 | } |
874 | 1072 | ||
875 | const struct file_operations proc_numa_maps_operations = { | 1073 | const struct file_operations proc_numa_maps_operations = { |
@@ -878,4 +1076,4 @@ const struct file_operations proc_numa_maps_operations = { | |||
878 | .llseek = seq_lseek, | 1076 | .llseek = seq_lseek, |
879 | .release = seq_release_private, | 1077 | .release = seq_release_private, |
880 | }; | 1078 | }; |
881 | #endif | 1079 | #endif /* CONFIG_NUMA */ |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index d3c032f5fa0a..5b572c89e6c4 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -691,8 +691,11 @@ static void prune_dqcache(int count) | |||
691 | * This is called from kswapd when we think we need some | 691 | * This is called from kswapd when we think we need some |
692 | * more memory | 692 | * more memory |
693 | */ | 693 | */ |
694 | static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) | 694 | static int shrink_dqcache_memory(struct shrinker *shrink, |
695 | struct shrink_control *sc) | ||
695 | { | 696 | { |
697 | int nr = sc->nr_to_scan; | ||
698 | |||
696 | if (nr) { | 699 | if (nr) { |
697 | spin_lock(&dq_list_lock); | 700 | spin_lock(&dq_list_lock); |
698 | prune_dqcache(nr); | 701 | prune_dqcache(nr); |
diff --git a/fs/splice.c b/fs/splice.c index 50a5d978da16..aa866d309695 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -162,6 +162,14 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = { | |||
162 | .get = generic_pipe_buf_get, | 162 | .get = generic_pipe_buf_get, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static void wakeup_pipe_readers(struct pipe_inode_info *pipe) | ||
166 | { | ||
167 | smp_mb(); | ||
168 | if (waitqueue_active(&pipe->wait)) | ||
169 | wake_up_interruptible(&pipe->wait); | ||
170 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | ||
171 | } | ||
172 | |||
165 | /** | 173 | /** |
166 | * splice_to_pipe - fill passed data into a pipe | 174 | * splice_to_pipe - fill passed data into a pipe |
167 | * @pipe: pipe to fill | 175 | * @pipe: pipe to fill |
@@ -247,12 +255,8 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, | |||
247 | 255 | ||
248 | pipe_unlock(pipe); | 256 | pipe_unlock(pipe); |
249 | 257 | ||
250 | if (do_wakeup) { | 258 | if (do_wakeup) |
251 | smp_mb(); | 259 | wakeup_pipe_readers(pipe); |
252 | if (waitqueue_active(&pipe->wait)) | ||
253 | wake_up_interruptible(&pipe->wait); | ||
254 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | ||
255 | } | ||
256 | 260 | ||
257 | while (page_nr < spd_pages) | 261 | while (page_nr < spd_pages) |
258 | spd->spd_release(spd, page_nr++); | 262 | spd->spd_release(spd, page_nr++); |
@@ -1892,12 +1896,9 @@ retry: | |||
1892 | /* | 1896 | /* |
1893 | * If we put data in the output pipe, wakeup any potential readers. | 1897 | * If we put data in the output pipe, wakeup any potential readers. |
1894 | */ | 1898 | */ |
1895 | if (ret > 0) { | 1899 | if (ret > 0) |
1896 | smp_mb(); | 1900 | wakeup_pipe_readers(opipe); |
1897 | if (waitqueue_active(&opipe->wait)) | 1901 | |
1898 | wake_up_interruptible(&opipe->wait); | ||
1899 | kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN); | ||
1900 | } | ||
1901 | if (input_wakeup) | 1902 | if (input_wakeup) |
1902 | wakeup_pipe_writers(ipipe); | 1903 | wakeup_pipe_writers(ipipe); |
1903 | 1904 | ||
@@ -1976,12 +1977,8 @@ static int link_pipe(struct pipe_inode_info *ipipe, | |||
1976 | /* | 1977 | /* |
1977 | * If we put data in the output pipe, wakeup any potential readers. | 1978 | * If we put data in the output pipe, wakeup any potential readers. |
1978 | */ | 1979 | */ |
1979 | if (ret > 0) { | 1980 | if (ret > 0) |
1980 | smp_mb(); | 1981 | wakeup_pipe_readers(opipe); |
1981 | if (waitqueue_active(&opipe->wait)) | ||
1982 | wake_up_interruptible(&opipe->wait); | ||
1983 | kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN); | ||
1984 | } | ||
1985 | 1982 | ||
1986 | return ret; | 1983 | return ret; |
1987 | } | 1984 | } |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 52b2b5da566e..5e68099db2a5 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -1422,12 +1422,12 @@ restart: | |||
1422 | int | 1422 | int |
1423 | xfs_buftarg_shrink( | 1423 | xfs_buftarg_shrink( |
1424 | struct shrinker *shrink, | 1424 | struct shrinker *shrink, |
1425 | int nr_to_scan, | 1425 | struct shrink_control *sc) |
1426 | gfp_t mask) | ||
1427 | { | 1426 | { |
1428 | struct xfs_buftarg *btp = container_of(shrink, | 1427 | struct xfs_buftarg *btp = container_of(shrink, |
1429 | struct xfs_buftarg, bt_shrinker); | 1428 | struct xfs_buftarg, bt_shrinker); |
1430 | struct xfs_buf *bp; | 1429 | struct xfs_buf *bp; |
1430 | int nr_to_scan = sc->nr_to_scan; | ||
1431 | LIST_HEAD(dispose); | 1431 | LIST_HEAD(dispose); |
1432 | 1432 | ||
1433 | if (!nr_to_scan) | 1433 | if (!nr_to_scan) |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index cb1bb2080e44..8ecad5ff9f9b 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -1032,13 +1032,14 @@ xfs_reclaim_inodes( | |||
1032 | static int | 1032 | static int |
1033 | xfs_reclaim_inode_shrink( | 1033 | xfs_reclaim_inode_shrink( |
1034 | struct shrinker *shrink, | 1034 | struct shrinker *shrink, |
1035 | int nr_to_scan, | 1035 | struct shrink_control *sc) |
1036 | gfp_t gfp_mask) | ||
1037 | { | 1036 | { |
1038 | struct xfs_mount *mp; | 1037 | struct xfs_mount *mp; |
1039 | struct xfs_perag *pag; | 1038 | struct xfs_perag *pag; |
1040 | xfs_agnumber_t ag; | 1039 | xfs_agnumber_t ag; |
1041 | int reclaimable; | 1040 | int reclaimable; |
1041 | int nr_to_scan = sc->nr_to_scan; | ||
1042 | gfp_t gfp_mask = sc->gfp_mask; | ||
1042 | 1043 | ||
1043 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); | 1044 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); |
1044 | if (nr_to_scan) { | 1045 | if (nr_to_scan) { |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 69228aa8605a..b94dace4e785 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -60,7 +60,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); | |||
60 | 60 | ||
61 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); | 61 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); |
62 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); | 62 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); |
63 | STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); | 63 | STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); |
64 | 64 | ||
65 | static struct shrinker xfs_qm_shaker = { | 65 | static struct shrinker xfs_qm_shaker = { |
66 | .shrink = xfs_qm_shake, | 66 | .shrink = xfs_qm_shake, |
@@ -2009,10 +2009,10 @@ xfs_qm_shake_freelist( | |||
2009 | STATIC int | 2009 | STATIC int |
2010 | xfs_qm_shake( | 2010 | xfs_qm_shake( |
2011 | struct shrinker *shrink, | 2011 | struct shrinker *shrink, |
2012 | int nr_to_scan, | 2012 | struct shrink_control *sc) |
2013 | gfp_t gfp_mask) | ||
2014 | { | 2013 | { |
2015 | int ndqused, nfree, n; | 2014 | int ndqused, nfree, n; |
2015 | gfp_t gfp_mask = sc->gfp_mask; | ||
2016 | 2016 | ||
2017 | if (!kmem_shake_allow(gfp_mask)) | 2017 | if (!kmem_shake_allow(gfp_mask)) |
2018 | return 0; | 2018 | return 0; |