diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/ctree.c | 129 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 20 | ||||
-rw-r--r-- | fs/ceph/auth_x.c | 3 | ||||
-rw-r--r-- | fs/ceph/mds_client.c | 35 | ||||
-rw-r--r-- | fs/ceph/mds_client.h | 1 | ||||
-rw-r--r-- | fs/ceph/messenger.c | 71 | ||||
-rw-r--r-- | fs/ceph/osdmap.c | 1 | ||||
-rw-r--r-- | fs/cifs/cifsfs.c | 6 | ||||
-rw-r--r-- | fs/cifs/dns_resolve.c | 69 | ||||
-rw-r--r-- | fs/cifs/dns_resolve.h | 4 | ||||
-rw-r--r-- | fs/dcache.c | 2 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 2 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 2 | ||||
-rw-r--r-- | fs/gfs2/quota.h | 2 | ||||
-rw-r--r-- | fs/inode.c | 2 | ||||
-rw-r--r-- | fs/jffs2/xattr.c | 2 | ||||
-rw-r--r-- | fs/mbcache.c | 5 | ||||
-rw-r--r-- | fs/nfs/dir.c | 2 | ||||
-rw-r--r-- | fs/nfs/internal.h | 3 | ||||
-rw-r--r-- | fs/quota/dquot.c | 2 | ||||
-rw-r--r-- | fs/ubifs/shrinker.c | 2 | ||||
-rw-r--r-- | fs/ubifs/ubifs.h | 2 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 5 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_super.c | 2 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 130 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.h | 2 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_trace.h | 3 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm.c | 7 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.h | 2 |
29 files changed, 376 insertions, 142 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 0d1d966b0fe4..c3df14ce2cc2 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -2304,12 +2304,17 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root, | |||
2304 | return ret; | 2304 | return ret; |
2305 | } | 2305 | } |
2306 | 2306 | ||
2307 | /* | ||
2308 | * min slot controls the lowest index we're willing to push to the | ||
2309 | * right. We'll push up to and including min_slot, but no lower | ||
2310 | */ | ||
2307 | static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, | 2311 | static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, |
2308 | struct btrfs_root *root, | 2312 | struct btrfs_root *root, |
2309 | struct btrfs_path *path, | 2313 | struct btrfs_path *path, |
2310 | int data_size, int empty, | 2314 | int data_size, int empty, |
2311 | struct extent_buffer *right, | 2315 | struct extent_buffer *right, |
2312 | int free_space, u32 left_nritems) | 2316 | int free_space, u32 left_nritems, |
2317 | u32 min_slot) | ||
2313 | { | 2318 | { |
2314 | struct extent_buffer *left = path->nodes[0]; | 2319 | struct extent_buffer *left = path->nodes[0]; |
2315 | struct extent_buffer *upper = path->nodes[1]; | 2320 | struct extent_buffer *upper = path->nodes[1]; |
@@ -2327,7 +2332,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, | |||
2327 | if (empty) | 2332 | if (empty) |
2328 | nr = 0; | 2333 | nr = 0; |
2329 | else | 2334 | else |
2330 | nr = 1; | 2335 | nr = max_t(u32, 1, min_slot); |
2331 | 2336 | ||
2332 | if (path->slots[0] >= left_nritems) | 2337 | if (path->slots[0] >= left_nritems) |
2333 | push_space += data_size; | 2338 | push_space += data_size; |
@@ -2469,10 +2474,14 @@ out_unlock: | |||
2469 | * | 2474 | * |
2470 | * returns 1 if the push failed because the other node didn't have enough | 2475 | * returns 1 if the push failed because the other node didn't have enough |
2471 | * room, 0 if everything worked out and < 0 if there were major errors. | 2476 | * room, 0 if everything worked out and < 0 if there were major errors. |
2477 | * | ||
2478 | * this will push starting from min_slot to the end of the leaf. It won't | ||
2479 | * push any slot lower than min_slot | ||
2472 | */ | 2480 | */ |
2473 | static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root | 2481 | static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root |
2474 | *root, struct btrfs_path *path, int data_size, | 2482 | *root, struct btrfs_path *path, |
2475 | int empty) | 2483 | int min_data_size, int data_size, |
2484 | int empty, u32 min_slot) | ||
2476 | { | 2485 | { |
2477 | struct extent_buffer *left = path->nodes[0]; | 2486 | struct extent_buffer *left = path->nodes[0]; |
2478 | struct extent_buffer *right; | 2487 | struct extent_buffer *right; |
@@ -2514,8 +2523,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root | |||
2514 | if (left_nritems == 0) | 2523 | if (left_nritems == 0) |
2515 | goto out_unlock; | 2524 | goto out_unlock; |
2516 | 2525 | ||
2517 | return __push_leaf_right(trans, root, path, data_size, empty, | 2526 | return __push_leaf_right(trans, root, path, min_data_size, empty, |
2518 | right, free_space, left_nritems); | 2527 | right, free_space, left_nritems, min_slot); |
2519 | out_unlock: | 2528 | out_unlock: |
2520 | btrfs_tree_unlock(right); | 2529 | btrfs_tree_unlock(right); |
2521 | free_extent_buffer(right); | 2530 | free_extent_buffer(right); |
@@ -2525,12 +2534,17 @@ out_unlock: | |||
2525 | /* | 2534 | /* |
2526 | * push some data in the path leaf to the left, trying to free up at | 2535 | * push some data in the path leaf to the left, trying to free up at |
2527 | * least data_size bytes. returns zero if the push worked, nonzero otherwise | 2536 | * least data_size bytes. returns zero if the push worked, nonzero otherwise |
2537 | * | ||
2538 | * max_slot can put a limit on how far into the leaf we'll push items. The | ||
2539 | * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the | ||
2540 | * items | ||
2528 | */ | 2541 | */ |
2529 | static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, | 2542 | static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, |
2530 | struct btrfs_root *root, | 2543 | struct btrfs_root *root, |
2531 | struct btrfs_path *path, int data_size, | 2544 | struct btrfs_path *path, int data_size, |
2532 | int empty, struct extent_buffer *left, | 2545 | int empty, struct extent_buffer *left, |
2533 | int free_space, int right_nritems) | 2546 | int free_space, u32 right_nritems, |
2547 | u32 max_slot) | ||
2534 | { | 2548 | { |
2535 | struct btrfs_disk_key disk_key; | 2549 | struct btrfs_disk_key disk_key; |
2536 | struct extent_buffer *right = path->nodes[0]; | 2550 | struct extent_buffer *right = path->nodes[0]; |
@@ -2549,9 +2563,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, | |||
2549 | slot = path->slots[1]; | 2563 | slot = path->slots[1]; |
2550 | 2564 | ||
2551 | if (empty) | 2565 | if (empty) |
2552 | nr = right_nritems; | 2566 | nr = min(right_nritems, max_slot); |
2553 | else | 2567 | else |
2554 | nr = right_nritems - 1; | 2568 | nr = min(right_nritems - 1, max_slot); |
2555 | 2569 | ||
2556 | for (i = 0; i < nr; i++) { | 2570 | for (i = 0; i < nr; i++) { |
2557 | item = btrfs_item_nr(right, i); | 2571 | item = btrfs_item_nr(right, i); |
@@ -2712,10 +2726,14 @@ out: | |||
2712 | /* | 2726 | /* |
2713 | * push some data in the path leaf to the left, trying to free up at | 2727 | * push some data in the path leaf to the left, trying to free up at |
2714 | * least data_size bytes. returns zero if the push worked, nonzero otherwise | 2728 | * least data_size bytes. returns zero if the push worked, nonzero otherwise |
2729 | * | ||
2730 | * max_slot can put a limit on how far into the leaf we'll push items. The | ||
2731 | * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the | ||
2732 | * items | ||
2715 | */ | 2733 | */ |
2716 | static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root | 2734 | static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root |
2717 | *root, struct btrfs_path *path, int data_size, | 2735 | *root, struct btrfs_path *path, int min_data_size, |
2718 | int empty) | 2736 | int data_size, int empty, u32 max_slot) |
2719 | { | 2737 | { |
2720 | struct extent_buffer *right = path->nodes[0]; | 2738 | struct extent_buffer *right = path->nodes[0]; |
2721 | struct extent_buffer *left; | 2739 | struct extent_buffer *left; |
@@ -2761,8 +2779,9 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root | |||
2761 | goto out; | 2779 | goto out; |
2762 | } | 2780 | } |
2763 | 2781 | ||
2764 | return __push_leaf_left(trans, root, path, data_size, | 2782 | return __push_leaf_left(trans, root, path, min_data_size, |
2765 | empty, left, free_space, right_nritems); | 2783 | empty, left, free_space, right_nritems, |
2784 | max_slot); | ||
2766 | out: | 2785 | out: |
2767 | btrfs_tree_unlock(left); | 2786 | btrfs_tree_unlock(left); |
2768 | free_extent_buffer(left); | 2787 | free_extent_buffer(left); |
@@ -2855,6 +2874,64 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans, | |||
2855 | } | 2874 | } |
2856 | 2875 | ||
2857 | /* | 2876 | /* |
2877 | * double splits happen when we need to insert a big item in the middle | ||
2878 | * of a leaf. A double split can leave us with 3 mostly empty leaves: | ||
2879 | * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] | ||
2880 | * A B C | ||
2881 | * | ||
2882 | * We avoid this by trying to push the items on either side of our target | ||
2883 | * into the adjacent leaves. If all goes well we can avoid the double split | ||
2884 | * completely. | ||
2885 | */ | ||
2886 | static noinline int push_for_double_split(struct btrfs_trans_handle *trans, | ||
2887 | struct btrfs_root *root, | ||
2888 | struct btrfs_path *path, | ||
2889 | int data_size) | ||
2890 | { | ||
2891 | int ret; | ||
2892 | int progress = 0; | ||
2893 | int slot; | ||
2894 | u32 nritems; | ||
2895 | |||
2896 | slot = path->slots[0]; | ||
2897 | |||
2898 | /* | ||
2899 | * try to push all the items after our slot into the | ||
2900 | * right leaf | ||
2901 | */ | ||
2902 | ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot); | ||
2903 | if (ret < 0) | ||
2904 | return ret; | ||
2905 | |||
2906 | if (ret == 0) | ||
2907 | progress++; | ||
2908 | |||
2909 | nritems = btrfs_header_nritems(path->nodes[0]); | ||
2910 | /* | ||
2911 | * our goal is to get our slot at the start or end of a leaf. If | ||
2912 | * we've done so we're done | ||
2913 | */ | ||
2914 | if (path->slots[0] == 0 || path->slots[0] == nritems) | ||
2915 | return 0; | ||
2916 | |||
2917 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) | ||
2918 | return 0; | ||
2919 | |||
2920 | /* try to push all the items before our slot into the next leaf */ | ||
2921 | slot = path->slots[0]; | ||
2922 | ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot); | ||
2923 | if (ret < 0) | ||
2924 | return ret; | ||
2925 | |||
2926 | if (ret == 0) | ||
2927 | progress++; | ||
2928 | |||
2929 | if (progress) | ||
2930 | return 0; | ||
2931 | return 1; | ||
2932 | } | ||
2933 | |||
2934 | /* | ||
2858 | * split the path's leaf in two, making sure there is at least data_size | 2935 | * split the path's leaf in two, making sure there is at least data_size |
2859 | * available for the resulting leaf level of the path. | 2936 | * available for the resulting leaf level of the path. |
2860 | * | 2937 | * |
@@ -2876,6 +2953,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, | |||
2876 | int wret; | 2953 | int wret; |
2877 | int split; | 2954 | int split; |
2878 | int num_doubles = 0; | 2955 | int num_doubles = 0; |
2956 | int tried_avoid_double = 0; | ||
2879 | 2957 | ||
2880 | l = path->nodes[0]; | 2958 | l = path->nodes[0]; |
2881 | slot = path->slots[0]; | 2959 | slot = path->slots[0]; |
@@ -2884,12 +2962,14 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, | |||
2884 | return -EOVERFLOW; | 2962 | return -EOVERFLOW; |
2885 | 2963 | ||
2886 | /* first try to make some room by pushing left and right */ | 2964 | /* first try to make some room by pushing left and right */ |
2887 | if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) { | 2965 | if (data_size) { |
2888 | wret = push_leaf_right(trans, root, path, data_size, 0); | 2966 | wret = push_leaf_right(trans, root, path, data_size, |
2967 | data_size, 0, 0); | ||
2889 | if (wret < 0) | 2968 | if (wret < 0) |
2890 | return wret; | 2969 | return wret; |
2891 | if (wret) { | 2970 | if (wret) { |
2892 | wret = push_leaf_left(trans, root, path, data_size, 0); | 2971 | wret = push_leaf_left(trans, root, path, data_size, |
2972 | data_size, 0, (u32)-1); | ||
2893 | if (wret < 0) | 2973 | if (wret < 0) |
2894 | return wret; | 2974 | return wret; |
2895 | } | 2975 | } |
@@ -2923,6 +3003,8 @@ again: | |||
2923 | if (mid != nritems && | 3003 | if (mid != nritems && |
2924 | leaf_space_used(l, mid, nritems - mid) + | 3004 | leaf_space_used(l, mid, nritems - mid) + |
2925 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { | 3005 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { |
3006 | if (data_size && !tried_avoid_double) | ||
3007 | goto push_for_double; | ||
2926 | split = 2; | 3008 | split = 2; |
2927 | } | 3009 | } |
2928 | } | 3010 | } |
@@ -2939,6 +3021,8 @@ again: | |||
2939 | if (mid != nritems && | 3021 | if (mid != nritems && |
2940 | leaf_space_used(l, mid, nritems - mid) + | 3022 | leaf_space_used(l, mid, nritems - mid) + |
2941 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { | 3023 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { |
3024 | if (data_size && !tried_avoid_double) | ||
3025 | goto push_for_double; | ||
2942 | split = 2 ; | 3026 | split = 2 ; |
2943 | } | 3027 | } |
2944 | } | 3028 | } |
@@ -3019,6 +3103,13 @@ again: | |||
3019 | } | 3103 | } |
3020 | 3104 | ||
3021 | return ret; | 3105 | return ret; |
3106 | |||
3107 | push_for_double: | ||
3108 | push_for_double_split(trans, root, path, data_size); | ||
3109 | tried_avoid_double = 1; | ||
3110 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) | ||
3111 | return 0; | ||
3112 | goto again; | ||
3022 | } | 3113 | } |
3023 | 3114 | ||
3024 | static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, | 3115 | static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, |
@@ -3915,13 +4006,15 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
3915 | extent_buffer_get(leaf); | 4006 | extent_buffer_get(leaf); |
3916 | 4007 | ||
3917 | btrfs_set_path_blocking(path); | 4008 | btrfs_set_path_blocking(path); |
3918 | wret = push_leaf_left(trans, root, path, 1, 1); | 4009 | wret = push_leaf_left(trans, root, path, 1, 1, |
4010 | 1, (u32)-1); | ||
3919 | if (wret < 0 && wret != -ENOSPC) | 4011 | if (wret < 0 && wret != -ENOSPC) |
3920 | ret = wret; | 4012 | ret = wret; |
3921 | 4013 | ||
3922 | if (path->nodes[0] == leaf && | 4014 | if (path->nodes[0] == leaf && |
3923 | btrfs_header_nritems(leaf)) { | 4015 | btrfs_header_nritems(leaf)) { |
3924 | wret = push_leaf_right(trans, root, path, 1, 1); | 4016 | wret = push_leaf_right(trans, root, path, 1, |
4017 | 1, 1, 0); | ||
3925 | if (wret < 0 && wret != -ENOSPC) | 4018 | if (wret < 0 && wret != -ENOSPC) |
3926 | ret = wret; | 4019 | ret = wret; |
3927 | } | 4020 | } |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4dbaf89b1337..9254b3d58dbe 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1458,7 +1458,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1458 | */ | 1458 | */ |
1459 | 1459 | ||
1460 | /* the destination must be opened for writing */ | 1460 | /* the destination must be opened for writing */ |
1461 | if (!(file->f_mode & FMODE_WRITE)) | 1461 | if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) |
1462 | return -EINVAL; | 1462 | return -EINVAL; |
1463 | 1463 | ||
1464 | ret = mnt_want_write(file->f_path.mnt); | 1464 | ret = mnt_want_write(file->f_path.mnt); |
@@ -1511,7 +1511,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1511 | 1511 | ||
1512 | /* determine range to clone */ | 1512 | /* determine range to clone */ |
1513 | ret = -EINVAL; | 1513 | ret = -EINVAL; |
1514 | if (off >= src->i_size || off + len > src->i_size) | 1514 | if (off + len > src->i_size || off + len < off) |
1515 | goto out_unlock; | 1515 | goto out_unlock; |
1516 | if (len == 0) | 1516 | if (len == 0) |
1517 | olen = len = src->i_size - off; | 1517 | olen = len = src->i_size - off; |
@@ -1578,6 +1578,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1578 | u64 disko = 0, diskl = 0; | 1578 | u64 disko = 0, diskl = 0; |
1579 | u64 datao = 0, datal = 0; | 1579 | u64 datao = 0, datal = 0; |
1580 | u8 comp; | 1580 | u8 comp; |
1581 | u64 endoff; | ||
1581 | 1582 | ||
1582 | size = btrfs_item_size_nr(leaf, slot); | 1583 | size = btrfs_item_size_nr(leaf, slot); |
1583 | read_extent_buffer(leaf, buf, | 1584 | read_extent_buffer(leaf, buf, |
@@ -1712,9 +1713,18 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1712 | btrfs_release_path(root, path); | 1713 | btrfs_release_path(root, path); |
1713 | 1714 | ||
1714 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 1715 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
1715 | if (new_key.offset + datal > inode->i_size) | 1716 | |
1716 | btrfs_i_size_write(inode, | 1717 | /* |
1717 | new_key.offset + datal); | 1718 | * we round up to the block size at eof when |
1719 | * determining which extents to clone above, | ||
1720 | * but shouldn't round up the file size | ||
1721 | */ | ||
1722 | endoff = new_key.offset + datal; | ||
1723 | if (endoff > off+olen) | ||
1724 | endoff = off+olen; | ||
1725 | if (endoff > inode->i_size) | ||
1726 | btrfs_i_size_write(inode, endoff); | ||
1727 | |||
1718 | BTRFS_I(inode)->flags = BTRFS_I(src)->flags; | 1728 | BTRFS_I(inode)->flags = BTRFS_I(src)->flags; |
1719 | ret = btrfs_update_inode(trans, root, inode); | 1729 | ret = btrfs_update_inode(trans, root, inode); |
1720 | BUG_ON(ret); | 1730 | BUG_ON(ret); |
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c index 3fe49042d8ad..6d44053ecff1 100644 --- a/fs/ceph/auth_x.c +++ b/fs/ceph/auth_x.c | |||
@@ -613,6 +613,9 @@ static void ceph_x_destroy(struct ceph_auth_client *ac) | |||
613 | remove_ticket_handler(ac, th); | 613 | remove_ticket_handler(ac, th); |
614 | } | 614 | } |
615 | 615 | ||
616 | if (xi->auth_authorizer.buf) | ||
617 | ceph_buffer_put(xi->auth_authorizer.buf); | ||
618 | |||
616 | kfree(ac->private); | 619 | kfree(ac->private); |
617 | ac->private = NULL; | 620 | ac->private = NULL; |
618 | } | 621 | } |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 3ab79f6c4ce8..416c08d315db 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -1514,6 +1514,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |||
1514 | ceph_encode_filepath(&p, end, ino1, path1); | 1514 | ceph_encode_filepath(&p, end, ino1, path1); |
1515 | ceph_encode_filepath(&p, end, ino2, path2); | 1515 | ceph_encode_filepath(&p, end, ino2, path2); |
1516 | 1516 | ||
1517 | /* make note of release offset, in case we need to replay */ | ||
1518 | req->r_request_release_offset = p - msg->front.iov_base; | ||
1519 | |||
1517 | /* cap releases */ | 1520 | /* cap releases */ |
1518 | releases = 0; | 1521 | releases = 0; |
1519 | if (req->r_inode_drop) | 1522 | if (req->r_inode_drop) |
@@ -1580,6 +1583,32 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, | |||
1580 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, | 1583 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, |
1581 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | 1584 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); |
1582 | 1585 | ||
1586 | if (req->r_got_unsafe) { | ||
1587 | /* | ||
1588 | * Replay. Do not regenerate message (and rebuild | ||
1589 | * paths, etc.); just use the original message. | ||
1590 | * Rebuilding paths will break for renames because | ||
1591 | * d_move mangles the src name. | ||
1592 | */ | ||
1593 | msg = req->r_request; | ||
1594 | rhead = msg->front.iov_base; | ||
1595 | |||
1596 | flags = le32_to_cpu(rhead->flags); | ||
1597 | flags |= CEPH_MDS_FLAG_REPLAY; | ||
1598 | rhead->flags = cpu_to_le32(flags); | ||
1599 | |||
1600 | if (req->r_target_inode) | ||
1601 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | ||
1602 | |||
1603 | rhead->num_retry = req->r_attempts - 1; | ||
1604 | |||
1605 | /* remove cap/dentry releases from message */ | ||
1606 | rhead->num_releases = 0; | ||
1607 | msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset); | ||
1608 | msg->front.iov_len = req->r_request_release_offset; | ||
1609 | return 0; | ||
1610 | } | ||
1611 | |||
1583 | if (req->r_request) { | 1612 | if (req->r_request) { |
1584 | ceph_msg_put(req->r_request); | 1613 | ceph_msg_put(req->r_request); |
1585 | req->r_request = NULL; | 1614 | req->r_request = NULL; |
@@ -1601,13 +1630,9 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, | |||
1601 | rhead->flags = cpu_to_le32(flags); | 1630 | rhead->flags = cpu_to_le32(flags); |
1602 | rhead->num_fwd = req->r_num_fwd; | 1631 | rhead->num_fwd = req->r_num_fwd; |
1603 | rhead->num_retry = req->r_attempts - 1; | 1632 | rhead->num_retry = req->r_attempts - 1; |
1633 | rhead->ino = 0; | ||
1604 | 1634 | ||
1605 | dout(" r_locked_dir = %p\n", req->r_locked_dir); | 1635 | dout(" r_locked_dir = %p\n", req->r_locked_dir); |
1606 | |||
1607 | if (req->r_target_inode && req->r_got_unsafe) | ||
1608 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | ||
1609 | else | ||
1610 | rhead->ino = 0; | ||
1611 | return 0; | 1636 | return 0; |
1612 | } | 1637 | } |
1613 | 1638 | ||
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index b292fa42a66d..952410c60d09 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
@@ -188,6 +188,7 @@ struct ceph_mds_request { | |||
188 | int r_old_inode_drop, r_old_inode_unless; | 188 | int r_old_inode_drop, r_old_inode_unless; |
189 | 189 | ||
190 | struct ceph_msg *r_request; /* original request */ | 190 | struct ceph_msg *r_request; /* original request */ |
191 | int r_request_release_offset; | ||
191 | struct ceph_msg *r_reply; | 192 | struct ceph_msg *r_reply; |
192 | struct ceph_mds_reply_info_parsed r_reply_info; | 193 | struct ceph_mds_reply_info_parsed r_reply_info; |
193 | int r_err; | 194 | int r_err; |
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 9ad43a310a41..15167b2daa55 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c | |||
@@ -43,7 +43,8 @@ static void ceph_fault(struct ceph_connection *con); | |||
43 | * nicely render a sockaddr as a string. | 43 | * nicely render a sockaddr as a string. |
44 | */ | 44 | */ |
45 | #define MAX_ADDR_STR 20 | 45 | #define MAX_ADDR_STR 20 |
46 | static char addr_str[MAX_ADDR_STR][40]; | 46 | #define MAX_ADDR_STR_LEN 60 |
47 | static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN]; | ||
47 | static DEFINE_SPINLOCK(addr_str_lock); | 48 | static DEFINE_SPINLOCK(addr_str_lock); |
48 | static int last_addr_str; | 49 | static int last_addr_str; |
49 | 50 | ||
@@ -52,7 +53,6 @@ const char *pr_addr(const struct sockaddr_storage *ss) | |||
52 | int i; | 53 | int i; |
53 | char *s; | 54 | char *s; |
54 | struct sockaddr_in *in4 = (void *)ss; | 55 | struct sockaddr_in *in4 = (void *)ss; |
55 | unsigned char *quad = (void *)&in4->sin_addr.s_addr; | ||
56 | struct sockaddr_in6 *in6 = (void *)ss; | 56 | struct sockaddr_in6 *in6 = (void *)ss; |
57 | 57 | ||
58 | spin_lock(&addr_str_lock); | 58 | spin_lock(&addr_str_lock); |
@@ -64,25 +64,13 @@ const char *pr_addr(const struct sockaddr_storage *ss) | |||
64 | 64 | ||
65 | switch (ss->ss_family) { | 65 | switch (ss->ss_family) { |
66 | case AF_INET: | 66 | case AF_INET: |
67 | sprintf(s, "%u.%u.%u.%u:%u", | 67 | snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr, |
68 | (unsigned int)quad[0], | 68 | (unsigned int)ntohs(in4->sin_port)); |
69 | (unsigned int)quad[1], | ||
70 | (unsigned int)quad[2], | ||
71 | (unsigned int)quad[3], | ||
72 | (unsigned int)ntohs(in4->sin_port)); | ||
73 | break; | 69 | break; |
74 | 70 | ||
75 | case AF_INET6: | 71 | case AF_INET6: |
76 | sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u", | 72 | snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr, |
77 | in6->sin6_addr.s6_addr16[0], | 73 | (unsigned int)ntohs(in6->sin6_port)); |
78 | in6->sin6_addr.s6_addr16[1], | ||
79 | in6->sin6_addr.s6_addr16[2], | ||
80 | in6->sin6_addr.s6_addr16[3], | ||
81 | in6->sin6_addr.s6_addr16[4], | ||
82 | in6->sin6_addr.s6_addr16[5], | ||
83 | in6->sin6_addr.s6_addr16[6], | ||
84 | in6->sin6_addr.s6_addr16[7], | ||
85 | (unsigned int)ntohs(in6->sin6_port)); | ||
86 | break; | 74 | break; |
87 | 75 | ||
88 | default: | 76 | default: |
@@ -215,12 +203,13 @@ static void set_sock_callbacks(struct socket *sock, | |||
215 | */ | 203 | */ |
216 | static struct socket *ceph_tcp_connect(struct ceph_connection *con) | 204 | static struct socket *ceph_tcp_connect(struct ceph_connection *con) |
217 | { | 205 | { |
218 | struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr; | 206 | struct sockaddr_storage *paddr = &con->peer_addr.in_addr; |
219 | struct socket *sock; | 207 | struct socket *sock; |
220 | int ret; | 208 | int ret; |
221 | 209 | ||
222 | BUG_ON(con->sock); | 210 | BUG_ON(con->sock); |
223 | ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); | 211 | ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, |
212 | IPPROTO_TCP, &sock); | ||
224 | if (ret) | 213 | if (ret) |
225 | return ERR_PTR(ret); | 214 | return ERR_PTR(ret); |
226 | con->sock = sock; | 215 | con->sock = sock; |
@@ -234,7 +223,8 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con) | |||
234 | 223 | ||
235 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); | 224 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); |
236 | 225 | ||
237 | ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK); | 226 | ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), |
227 | O_NONBLOCK); | ||
238 | if (ret == -EINPROGRESS) { | 228 | if (ret == -EINPROGRESS) { |
239 | dout("connect %s EINPROGRESS sk_state = %u\n", | 229 | dout("connect %s EINPROGRESS sk_state = %u\n", |
240 | pr_addr(&con->peer_addr.in_addr), | 230 | pr_addr(&con->peer_addr.in_addr), |
@@ -1009,19 +999,32 @@ int ceph_parse_ips(const char *c, const char *end, | |||
1009 | struct sockaddr_in *in4 = (void *)ss; | 999 | struct sockaddr_in *in4 = (void *)ss; |
1010 | struct sockaddr_in6 *in6 = (void *)ss; | 1000 | struct sockaddr_in6 *in6 = (void *)ss; |
1011 | int port; | 1001 | int port; |
1002 | char delim = ','; | ||
1003 | |||
1004 | if (*p == '[') { | ||
1005 | delim = ']'; | ||
1006 | p++; | ||
1007 | } | ||
1012 | 1008 | ||
1013 | memset(ss, 0, sizeof(*ss)); | 1009 | memset(ss, 0, sizeof(*ss)); |
1014 | if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr, | 1010 | if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr, |
1015 | ',', &ipend)) { | 1011 | delim, &ipend)) |
1016 | ss->ss_family = AF_INET; | 1012 | ss->ss_family = AF_INET; |
1017 | } else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr, | 1013 | else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr, |
1018 | ',', &ipend)) { | 1014 | delim, &ipend)) |
1019 | ss->ss_family = AF_INET6; | 1015 | ss->ss_family = AF_INET6; |
1020 | } else { | 1016 | else |
1021 | goto bad; | 1017 | goto bad; |
1022 | } | ||
1023 | p = ipend; | 1018 | p = ipend; |
1024 | 1019 | ||
1020 | if (delim == ']') { | ||
1021 | if (*p != ']') { | ||
1022 | dout("missing matching ']'\n"); | ||
1023 | goto bad; | ||
1024 | } | ||
1025 | p++; | ||
1026 | } | ||
1027 | |||
1025 | /* port? */ | 1028 | /* port? */ |
1026 | if (p < end && *p == ':') { | 1029 | if (p < end && *p == ':') { |
1027 | port = 0; | 1030 | port = 0; |
@@ -1055,7 +1058,7 @@ int ceph_parse_ips(const char *c, const char *end, | |||
1055 | return 0; | 1058 | return 0; |
1056 | 1059 | ||
1057 | bad: | 1060 | bad: |
1058 | pr_err("parse_ips bad ip '%s'\n", c); | 1061 | pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); |
1059 | return -EINVAL; | 1062 | return -EINVAL; |
1060 | } | 1063 | } |
1061 | 1064 | ||
@@ -2015,20 +2018,20 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) | |||
2015 | { | 2018 | { |
2016 | mutex_lock(&con->mutex); | 2019 | mutex_lock(&con->mutex); |
2017 | if (!list_empty(&msg->list_head)) { | 2020 | if (!list_empty(&msg->list_head)) { |
2018 | dout("con_revoke %p msg %p\n", con, msg); | 2021 | dout("con_revoke %p msg %p - was on queue\n", con, msg); |
2019 | list_del_init(&msg->list_head); | 2022 | list_del_init(&msg->list_head); |
2020 | ceph_msg_put(msg); | 2023 | ceph_msg_put(msg); |
2021 | msg->hdr.seq = 0; | 2024 | msg->hdr.seq = 0; |
2022 | if (con->out_msg == msg) { | 2025 | } |
2023 | ceph_msg_put(con->out_msg); | 2026 | if (con->out_msg == msg) { |
2024 | con->out_msg = NULL; | 2027 | dout("con_revoke %p msg %p - was sending\n", con, msg); |
2025 | } | 2028 | con->out_msg = NULL; |
2026 | if (con->out_kvec_is_msg) { | 2029 | if (con->out_kvec_is_msg) { |
2027 | con->out_skip = con->out_kvec_bytes; | 2030 | con->out_skip = con->out_kvec_bytes; |
2028 | con->out_kvec_is_msg = false; | 2031 | con->out_kvec_is_msg = false; |
2029 | } | 2032 | } |
2030 | } else { | 2033 | ceph_msg_put(msg); |
2031 | dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg); | 2034 | msg->hdr.seq = 0; |
2032 | } | 2035 | } |
2033 | mutex_unlock(&con->mutex); | 2036 | mutex_unlock(&con->mutex); |
2034 | } | 2037 | } |
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 50ce64ebd330..277f8b339577 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c | |||
@@ -568,6 +568,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
568 | if (ev > CEPH_PG_POOL_VERSION) { | 568 | if (ev > CEPH_PG_POOL_VERSION) { |
569 | pr_warning("got unknown v %d > %d of ceph_pg_pool\n", | 569 | pr_warning("got unknown v %d > %d of ceph_pg_pool\n", |
570 | ev, CEPH_PG_POOL_VERSION); | 570 | ev, CEPH_PG_POOL_VERSION); |
571 | kfree(pi); | ||
571 | goto bad; | 572 | goto bad; |
572 | } | 573 | } |
573 | __decode_pool(p, pi); | 574 | __decode_pool(p, pi); |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 484e52bb40bb..2cb1a70214d7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -923,7 +923,7 @@ init_cifs(void) | |||
923 | goto out_unregister_filesystem; | 923 | goto out_unregister_filesystem; |
924 | #endif | 924 | #endif |
925 | #ifdef CONFIG_CIFS_DFS_UPCALL | 925 | #ifdef CONFIG_CIFS_DFS_UPCALL |
926 | rc = register_key_type(&key_type_dns_resolver); | 926 | rc = cifs_init_dns_resolver(); |
927 | if (rc) | 927 | if (rc) |
928 | goto out_unregister_key_type; | 928 | goto out_unregister_key_type; |
929 | #endif | 929 | #endif |
@@ -935,7 +935,7 @@ init_cifs(void) | |||
935 | 935 | ||
936 | out_unregister_resolver_key: | 936 | out_unregister_resolver_key: |
937 | #ifdef CONFIG_CIFS_DFS_UPCALL | 937 | #ifdef CONFIG_CIFS_DFS_UPCALL |
938 | unregister_key_type(&key_type_dns_resolver); | 938 | cifs_exit_dns_resolver(); |
939 | out_unregister_key_type: | 939 | out_unregister_key_type: |
940 | #endif | 940 | #endif |
941 | #ifdef CONFIG_CIFS_UPCALL | 941 | #ifdef CONFIG_CIFS_UPCALL |
@@ -961,7 +961,7 @@ exit_cifs(void) | |||
961 | cifs_proc_clean(); | 961 | cifs_proc_clean(); |
962 | #ifdef CONFIG_CIFS_DFS_UPCALL | 962 | #ifdef CONFIG_CIFS_DFS_UPCALL |
963 | cifs_dfs_release_automount_timer(); | 963 | cifs_dfs_release_automount_timer(); |
964 | unregister_key_type(&key_type_dns_resolver); | 964 | cifs_exit_dns_resolver(); |
965 | #endif | 965 | #endif |
966 | #ifdef CONFIG_CIFS_UPCALL | 966 | #ifdef CONFIG_CIFS_UPCALL |
967 | unregister_key_type(&cifs_spnego_key_type); | 967 | unregister_key_type(&cifs_spnego_key_type); |
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c index 4db2c5e7283f..49315cbf742d 100644 --- a/fs/cifs/dns_resolve.c +++ b/fs/cifs/dns_resolve.c | |||
@@ -24,12 +24,16 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/keyctl.h> | ||
28 | #include <linux/key-type.h> | ||
27 | #include <keys/user-type.h> | 29 | #include <keys/user-type.h> |
28 | #include "dns_resolve.h" | 30 | #include "dns_resolve.h" |
29 | #include "cifsglob.h" | 31 | #include "cifsglob.h" |
30 | #include "cifsproto.h" | 32 | #include "cifsproto.h" |
31 | #include "cifs_debug.h" | 33 | #include "cifs_debug.h" |
32 | 34 | ||
35 | static const struct cred *dns_resolver_cache; | ||
36 | |||
33 | /* Checks if supplied name is IP address | 37 | /* Checks if supplied name is IP address |
34 | * returns: | 38 | * returns: |
35 | * 1 - name is IP | 39 | * 1 - name is IP |
@@ -94,6 +98,7 @@ struct key_type key_type_dns_resolver = { | |||
94 | int | 98 | int |
95 | dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) | 99 | dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) |
96 | { | 100 | { |
101 | const struct cred *saved_cred; | ||
97 | int rc = -EAGAIN; | 102 | int rc = -EAGAIN; |
98 | struct key *rkey = ERR_PTR(-EAGAIN); | 103 | struct key *rkey = ERR_PTR(-EAGAIN); |
99 | char *name; | 104 | char *name; |
@@ -133,8 +138,15 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) | |||
133 | goto skip_upcall; | 138 | goto skip_upcall; |
134 | } | 139 | } |
135 | 140 | ||
141 | saved_cred = override_creds(dns_resolver_cache); | ||
136 | rkey = request_key(&key_type_dns_resolver, name, ""); | 142 | rkey = request_key(&key_type_dns_resolver, name, ""); |
143 | revert_creds(saved_cred); | ||
137 | if (!IS_ERR(rkey)) { | 144 | if (!IS_ERR(rkey)) { |
145 | if (!(rkey->perm & KEY_USR_VIEW)) { | ||
146 | down_read(&rkey->sem); | ||
147 | rkey->perm |= KEY_USR_VIEW; | ||
148 | up_read(&rkey->sem); | ||
149 | } | ||
138 | len = rkey->type_data.x[0]; | 150 | len = rkey->type_data.x[0]; |
139 | data = rkey->payload.data; | 151 | data = rkey->payload.data; |
140 | } else { | 152 | } else { |
@@ -165,4 +177,61 @@ out: | |||
165 | return rc; | 177 | return rc; |
166 | } | 178 | } |
167 | 179 | ||
180 | int __init cifs_init_dns_resolver(void) | ||
181 | { | ||
182 | struct cred *cred; | ||
183 | struct key *keyring; | ||
184 | int ret; | ||
185 | |||
186 | printk(KERN_NOTICE "Registering the %s key type\n", | ||
187 | key_type_dns_resolver.name); | ||
188 | |||
189 | /* create an override credential set with a special thread keyring in | ||
190 | * which DNS requests are cached | ||
191 | * | ||
192 | * this is used to prevent malicious redirections from being installed | ||
193 | * with add_key(). | ||
194 | */ | ||
195 | cred = prepare_kernel_cred(NULL); | ||
196 | if (!cred) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred, | ||
200 | (KEY_POS_ALL & ~KEY_POS_SETATTR) | | ||
201 | KEY_USR_VIEW | KEY_USR_READ, | ||
202 | KEY_ALLOC_NOT_IN_QUOTA); | ||
203 | if (IS_ERR(keyring)) { | ||
204 | ret = PTR_ERR(keyring); | ||
205 | goto failed_put_cred; | ||
206 | } | ||
207 | |||
208 | ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL); | ||
209 | if (ret < 0) | ||
210 | goto failed_put_key; | ||
211 | |||
212 | ret = register_key_type(&key_type_dns_resolver); | ||
213 | if (ret < 0) | ||
214 | goto failed_put_key; | ||
215 | |||
216 | /* instruct request_key() to use this special keyring as a cache for | ||
217 | * the results it looks up */ | ||
218 | cred->thread_keyring = keyring; | ||
219 | cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; | ||
220 | dns_resolver_cache = cred; | ||
221 | return 0; | ||
222 | |||
223 | failed_put_key: | ||
224 | key_put(keyring); | ||
225 | failed_put_cred: | ||
226 | put_cred(cred); | ||
227 | return ret; | ||
228 | } | ||
168 | 229 | ||
230 | void __exit cifs_exit_dns_resolver(void) | ||
231 | { | ||
232 | key_revoke(dns_resolver_cache->thread_keyring); | ||
233 | unregister_key_type(&key_type_dns_resolver); | ||
234 | put_cred(dns_resolver_cache); | ||
235 | printk(KERN_NOTICE "Unregistered %s key type\n", | ||
236 | key_type_dns_resolver.name); | ||
237 | } | ||
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h index 966e9288930b..26b9eaa9f5ee 100644 --- a/fs/cifs/dns_resolve.h +++ b/fs/cifs/dns_resolve.h | |||
@@ -24,8 +24,8 @@ | |||
24 | #define _DNS_RESOLVE_H | 24 | #define _DNS_RESOLVE_H |
25 | 25 | ||
26 | #ifdef __KERNEL__ | 26 | #ifdef __KERNEL__ |
27 | #include <linux/key-type.h> | 27 | extern int __init cifs_init_dns_resolver(void); |
28 | extern struct key_type key_type_dns_resolver; | 28 | extern void __exit cifs_exit_dns_resolver(void); |
29 | extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); | 29 | extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); |
30 | #endif /* KERNEL */ | 30 | #endif /* KERNEL */ |
31 | 31 | ||
diff --git a/fs/dcache.c b/fs/dcache.c index c8c78ba07827..86d4db15473e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -896,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent); | |||
896 | * | 896 | * |
897 | * In this case we return -1 to tell the caller that we baled. | 897 | * In this case we return -1 to tell the caller that we baled. |
898 | */ | 898 | */ |
899 | static int shrink_dcache_memory(int nr, gfp_t gfp_mask) | 899 | static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
900 | { | 900 | { |
901 | if (nr) { | 901 | if (nr) { |
902 | if (!(gfp_mask & __GFP_FS)) | 902 | if (!(gfp_mask & __GFP_FS)) |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index dbab3fdc2582..0898f3ec8212 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1358,7 +1358,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | |||
1358 | } | 1358 | } |
1359 | 1359 | ||
1360 | 1360 | ||
1361 | static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | 1361 | static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
1362 | { | 1362 | { |
1363 | struct gfs2_glock *gl; | 1363 | struct gfs2_glock *gl; |
1364 | int may_demote; | 1364 | int may_demote; |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index b256d6f24288..8f02d3db8f42 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list); | |||
77 | static atomic_t qd_lru_count = ATOMIC_INIT(0); | 77 | static atomic_t qd_lru_count = ATOMIC_INIT(0); |
78 | static DEFINE_SPINLOCK(qd_lru_lock); | 78 | static DEFINE_SPINLOCK(qd_lru_lock); |
79 | 79 | ||
80 | int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) | 80 | int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
81 | { | 81 | { |
82 | struct gfs2_quota_data *qd; | 82 | struct gfs2_quota_data *qd; |
83 | struct gfs2_sbd *sdp; | 83 | struct gfs2_sbd *sdp; |
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 195f60c8bd14..e7d236ca48bd 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h | |||
@@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) | |||
51 | return ret; | 51 | return ret; |
52 | } | 52 | } |
53 | 53 | ||
54 | extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); | 54 | extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); |
55 | extern const struct quotactl_ops gfs2_quotactl_ops; | 55 | extern const struct quotactl_ops gfs2_quotactl_ops; |
56 | 56 | ||
57 | #endif /* __QUOTA_DOT_H__ */ | 57 | #endif /* __QUOTA_DOT_H__ */ |
diff --git a/fs/inode.c b/fs/inode.c index 2bee20ae3d65..722860b323a9 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan) | |||
512 | * This function is passed the number of inodes to scan, and it returns the | 512 | * This function is passed the number of inodes to scan, and it returns the |
513 | * total number of remaining possibly-reclaimable inodes. | 513 | * total number of remaining possibly-reclaimable inodes. |
514 | */ | 514 | */ |
515 | static int shrink_icache_memory(int nr, gfp_t gfp_mask) | 515 | static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
516 | { | 516 | { |
517 | if (nr) { | 517 | if (nr) { |
518 | /* | 518 | /* |
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index a2d58c96f1b4..d258e261bdc7 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c | |||
@@ -626,7 +626,7 @@ void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i | |||
626 | 626 | ||
627 | static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | 627 | static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) |
628 | { | 628 | { |
629 | /* success of check_xattr_ref_inode() means taht inode (ic) dose not have | 629 | /* success of check_xattr_ref_inode() means that inode (ic) dose not have |
630 | * duplicate name/value pairs. If duplicate name/value pair would be found, | 630 | * duplicate name/value pairs. If duplicate name/value pair would be found, |
631 | * one will be removed. | 631 | * one will be removed. |
632 | */ | 632 | */ |
diff --git a/fs/mbcache.c b/fs/mbcache.c index ec88ff3d04a9..e28f21b95344 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache) | |||
115 | * What the mbcache registers as to get shrunk dynamically. | 115 | * What the mbcache registers as to get shrunk dynamically. |
116 | */ | 116 | */ |
117 | 117 | ||
118 | static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); | 118 | static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); |
119 | 119 | ||
120 | static struct shrinker mb_cache_shrinker = { | 120 | static struct shrinker mb_cache_shrinker = { |
121 | .shrink = mb_cache_shrink_fn, | 121 | .shrink = mb_cache_shrink_fn, |
@@ -191,13 +191,14 @@ forget: | |||
191 | * This function is called by the kernel memory management when memory | 191 | * This function is called by the kernel memory management when memory |
192 | * gets low. | 192 | * gets low. |
193 | * | 193 | * |
194 | * @shrink: (ignored) | ||
194 | * @nr_to_scan: Number of objects to scan | 195 | * @nr_to_scan: Number of objects to scan |
195 | * @gfp_mask: (ignored) | 196 | * @gfp_mask: (ignored) |
196 | * | 197 | * |
197 | * Returns the number of objects which are present in the cache. | 198 | * Returns the number of objects which are present in the cache. |
198 | */ | 199 | */ |
199 | static int | 200 | static int |
200 | mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) | 201 | mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
201 | { | 202 | { |
202 | LIST_HEAD(free_list); | 203 | LIST_HEAD(free_list); |
203 | struct list_head *l, *ltmp; | 204 | struct list_head *l, *ltmp; |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 782b431ef91c..e60416d3f818 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head) | |||
1710 | } | 1710 | } |
1711 | } | 1711 | } |
1712 | 1712 | ||
1713 | int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) | 1713 | int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
1714 | { | 1714 | { |
1715 | LIST_HEAD(head); | 1715 | LIST_HEAD(head); |
1716 | struct nfs_inode *nfsi; | 1716 | struct nfs_inode *nfsi; |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d8bd619e386c..e70f44b9b3f4 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[]; | |||
205 | void nfs_close_context(struct nfs_open_context *ctx, int is_sync); | 205 | void nfs_close_context(struct nfs_open_context *ctx, int is_sync); |
206 | 206 | ||
207 | /* dir.c */ | 207 | /* dir.c */ |
208 | extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); | 208 | extern int nfs_access_cache_shrinker(struct shrinker *shrink, |
209 | int nr_to_scan, gfp_t gfp_mask); | ||
209 | 210 | ||
210 | /* inode.c */ | 211 | /* inode.c */ |
211 | extern struct workqueue_struct *nfsiod_workqueue; | 212 | extern struct workqueue_struct *nfsiod_workqueue; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 12c233da1b6b..437d2ca2de97 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -676,7 +676,7 @@ static void prune_dqcache(int count) | |||
676 | * This is called from kswapd when we think we need some | 676 | * This is called from kswapd when we think we need some |
677 | * more memory | 677 | * more memory |
678 | */ | 678 | */ |
679 | static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) | 679 | static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
680 | { | 680 | { |
681 | if (nr) { | 681 | if (nr) { |
682 | spin_lock(&dq_list_lock); | 682 | spin_lock(&dq_list_lock); |
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index 02feb59cefca..0b201114a5ad 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c | |||
@@ -277,7 +277,7 @@ static int kick_a_thread(void) | |||
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | int ubifs_shrinker(int nr, gfp_t gfp_mask) | 280 | int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
281 | { | 281 | { |
282 | int freed, contention = 0; | 282 | int freed, contention = 0; |
283 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); | 283 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 2eef553d50c8..04310878f449 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot); | |||
1575 | int ubifs_tnc_end_commit(struct ubifs_info *c); | 1575 | int ubifs_tnc_end_commit(struct ubifs_info *c); |
1576 | 1576 | ||
1577 | /* shrinker.c */ | 1577 | /* shrinker.c */ |
1578 | int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask); | 1578 | int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); |
1579 | 1579 | ||
1580 | /* commit.c */ | 1580 | /* commit.c */ |
1581 | int ubifs_bg_thread(void *info); | 1581 | int ubifs_bg_thread(void *info); |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 649ade8ef598..2ee3f7a60163 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | static kmem_zone_t *xfs_buf_zone; | 46 | static kmem_zone_t *xfs_buf_zone; |
47 | STATIC int xfsbufd(void *); | 47 | STATIC int xfsbufd(void *); |
48 | STATIC int xfsbufd_wakeup(int, gfp_t); | 48 | STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t); |
49 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); | 49 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); |
50 | static struct shrinker xfs_buf_shake = { | 50 | static struct shrinker xfs_buf_shake = { |
51 | .shrink = xfsbufd_wakeup, | 51 | .shrink = xfsbufd_wakeup, |
@@ -340,7 +340,7 @@ _xfs_buf_lookup_pages( | |||
340 | __func__, gfp_mask); | 340 | __func__, gfp_mask); |
341 | 341 | ||
342 | XFS_STATS_INC(xb_page_retries); | 342 | XFS_STATS_INC(xb_page_retries); |
343 | xfsbufd_wakeup(0, gfp_mask); | 343 | xfsbufd_wakeup(NULL, 0, gfp_mask); |
344 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 344 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
345 | goto retry; | 345 | goto retry; |
346 | } | 346 | } |
@@ -1762,6 +1762,7 @@ xfs_buf_runall_queues( | |||
1762 | 1762 | ||
1763 | STATIC int | 1763 | STATIC int |
1764 | xfsbufd_wakeup( | 1764 | xfsbufd_wakeup( |
1765 | struct shrinker *shrink, | ||
1765 | int priority, | 1766 | int priority, |
1766 | gfp_t mask) | 1767 | gfp_t mask) |
1767 | { | 1768 | { |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index f2d1718c9165..80938c736c27 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -1883,7 +1883,6 @@ init_xfs_fs(void) | |||
1883 | goto out_cleanup_procfs; | 1883 | goto out_cleanup_procfs; |
1884 | 1884 | ||
1885 | vfs_initquota(); | 1885 | vfs_initquota(); |
1886 | xfs_inode_shrinker_init(); | ||
1887 | 1886 | ||
1888 | error = register_filesystem(&xfs_fs_type); | 1887 | error = register_filesystem(&xfs_fs_type); |
1889 | if (error) | 1888 | if (error) |
@@ -1911,7 +1910,6 @@ exit_xfs_fs(void) | |||
1911 | { | 1910 | { |
1912 | vfs_exitquota(); | 1911 | vfs_exitquota(); |
1913 | unregister_filesystem(&xfs_fs_type); | 1912 | unregister_filesystem(&xfs_fs_type); |
1914 | xfs_inode_shrinker_destroy(); | ||
1915 | xfs_sysctl_unregister(); | 1913 | xfs_sysctl_unregister(); |
1916 | xfs_cleanup_procfs(); | 1914 | xfs_cleanup_procfs(); |
1917 | xfs_buf_terminate(); | 1915 | xfs_buf_terminate(); |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index ef7f0218bccb..a51a07c3a70c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -144,6 +144,41 @@ restart: | |||
144 | return last_error; | 144 | return last_error; |
145 | } | 145 | } |
146 | 146 | ||
147 | /* | ||
148 | * Select the next per-ag structure to iterate during the walk. The reclaim | ||
149 | * walk is optimised only to walk AGs with reclaimable inodes in them. | ||
150 | */ | ||
151 | static struct xfs_perag * | ||
152 | xfs_inode_ag_iter_next_pag( | ||
153 | struct xfs_mount *mp, | ||
154 | xfs_agnumber_t *first, | ||
155 | int tag) | ||
156 | { | ||
157 | struct xfs_perag *pag = NULL; | ||
158 | |||
159 | if (tag == XFS_ICI_RECLAIM_TAG) { | ||
160 | int found; | ||
161 | int ref; | ||
162 | |||
163 | spin_lock(&mp->m_perag_lock); | ||
164 | found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, | ||
165 | (void **)&pag, *first, 1, tag); | ||
166 | if (found <= 0) { | ||
167 | spin_unlock(&mp->m_perag_lock); | ||
168 | return NULL; | ||
169 | } | ||
170 | *first = pag->pag_agno + 1; | ||
171 | /* open coded pag reference increment */ | ||
172 | ref = atomic_inc_return(&pag->pag_ref); | ||
173 | spin_unlock(&mp->m_perag_lock); | ||
174 | trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_); | ||
175 | } else { | ||
176 | pag = xfs_perag_get(mp, *first); | ||
177 | (*first)++; | ||
178 | } | ||
179 | return pag; | ||
180 | } | ||
181 | |||
147 | int | 182 | int |
148 | xfs_inode_ag_iterator( | 183 | xfs_inode_ag_iterator( |
149 | struct xfs_mount *mp, | 184 | struct xfs_mount *mp, |
@@ -154,16 +189,15 @@ xfs_inode_ag_iterator( | |||
154 | int exclusive, | 189 | int exclusive, |
155 | int *nr_to_scan) | 190 | int *nr_to_scan) |
156 | { | 191 | { |
192 | struct xfs_perag *pag; | ||
157 | int error = 0; | 193 | int error = 0; |
158 | int last_error = 0; | 194 | int last_error = 0; |
159 | xfs_agnumber_t ag; | 195 | xfs_agnumber_t ag; |
160 | int nr; | 196 | int nr; |
161 | 197 | ||
162 | nr = nr_to_scan ? *nr_to_scan : INT_MAX; | 198 | nr = nr_to_scan ? *nr_to_scan : INT_MAX; |
163 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 199 | ag = 0; |
164 | struct xfs_perag *pag; | 200 | while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) { |
165 | |||
166 | pag = xfs_perag_get(mp, ag); | ||
167 | error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, | 201 | error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, |
168 | exclusive, &nr); | 202 | exclusive, &nr); |
169 | xfs_perag_put(pag); | 203 | xfs_perag_put(pag); |
@@ -640,6 +674,17 @@ __xfs_inode_set_reclaim_tag( | |||
640 | radix_tree_tag_set(&pag->pag_ici_root, | 674 | radix_tree_tag_set(&pag->pag_ici_root, |
641 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 675 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), |
642 | XFS_ICI_RECLAIM_TAG); | 676 | XFS_ICI_RECLAIM_TAG); |
677 | |||
678 | if (!pag->pag_ici_reclaimable) { | ||
679 | /* propagate the reclaim tag up into the perag radix tree */ | ||
680 | spin_lock(&ip->i_mount->m_perag_lock); | ||
681 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | ||
682 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | ||
683 | XFS_ICI_RECLAIM_TAG); | ||
684 | spin_unlock(&ip->i_mount->m_perag_lock); | ||
685 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | ||
686 | -1, _RET_IP_); | ||
687 | } | ||
643 | pag->pag_ici_reclaimable++; | 688 | pag->pag_ici_reclaimable++; |
644 | } | 689 | } |
645 | 690 | ||
@@ -674,6 +719,16 @@ __xfs_inode_clear_reclaim_tag( | |||
674 | radix_tree_tag_clear(&pag->pag_ici_root, | 719 | radix_tree_tag_clear(&pag->pag_ici_root, |
675 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | 720 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); |
676 | pag->pag_ici_reclaimable--; | 721 | pag->pag_ici_reclaimable--; |
722 | if (!pag->pag_ici_reclaimable) { | ||
723 | /* clear the reclaim tag from the perag radix tree */ | ||
724 | spin_lock(&ip->i_mount->m_perag_lock); | ||
725 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | ||
726 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | ||
727 | XFS_ICI_RECLAIM_TAG); | ||
728 | spin_unlock(&ip->i_mount->m_perag_lock); | ||
729 | trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, | ||
730 | -1, _RET_IP_); | ||
731 | } | ||
677 | } | 732 | } |
678 | 733 | ||
679 | /* | 734 | /* |
@@ -828,83 +883,52 @@ xfs_reclaim_inodes( | |||
828 | 883 | ||
829 | /* | 884 | /* |
830 | * Shrinker infrastructure. | 885 | * Shrinker infrastructure. |
831 | * | ||
832 | * This is all far more complex than it needs to be. It adds a global list of | ||
833 | * mounts because the shrinkers can only call a global context. We need to make | ||
834 | * the shrinkers pass a context to avoid the need for global state. | ||
835 | */ | 886 | */ |
836 | static LIST_HEAD(xfs_mount_list); | ||
837 | static struct rw_semaphore xfs_mount_list_lock; | ||
838 | |||
839 | static int | 887 | static int |
840 | xfs_reclaim_inode_shrink( | 888 | xfs_reclaim_inode_shrink( |
889 | struct shrinker *shrink, | ||
841 | int nr_to_scan, | 890 | int nr_to_scan, |
842 | gfp_t gfp_mask) | 891 | gfp_t gfp_mask) |
843 | { | 892 | { |
844 | struct xfs_mount *mp; | 893 | struct xfs_mount *mp; |
845 | struct xfs_perag *pag; | 894 | struct xfs_perag *pag; |
846 | xfs_agnumber_t ag; | 895 | xfs_agnumber_t ag; |
847 | int reclaimable = 0; | 896 | int reclaimable; |
848 | 897 | ||
898 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); | ||
849 | if (nr_to_scan) { | 899 | if (nr_to_scan) { |
850 | if (!(gfp_mask & __GFP_FS)) | 900 | if (!(gfp_mask & __GFP_FS)) |
851 | return -1; | 901 | return -1; |
852 | 902 | ||
853 | down_read(&xfs_mount_list_lock); | 903 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, |
854 | list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | ||
855 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, | ||
856 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); | 904 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); |
857 | if (nr_to_scan <= 0) | 905 | /* if we don't exhaust the scan, don't bother coming back */ |
858 | break; | 906 | if (nr_to_scan > 0) |
859 | } | 907 | return -1; |
860 | up_read(&xfs_mount_list_lock); | 908 | } |
861 | } | ||
862 | 909 | ||
863 | down_read(&xfs_mount_list_lock); | 910 | reclaimable = 0; |
864 | list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | 911 | ag = 0; |
865 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 912 | while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, |
866 | pag = xfs_perag_get(mp, ag); | 913 | XFS_ICI_RECLAIM_TAG))) { |
867 | reclaimable += pag->pag_ici_reclaimable; | 914 | reclaimable += pag->pag_ici_reclaimable; |
868 | xfs_perag_put(pag); | 915 | xfs_perag_put(pag); |
869 | } | ||
870 | } | 916 | } |
871 | up_read(&xfs_mount_list_lock); | ||
872 | return reclaimable; | 917 | return reclaimable; |
873 | } | 918 | } |
874 | 919 | ||
875 | static struct shrinker xfs_inode_shrinker = { | ||
876 | .shrink = xfs_reclaim_inode_shrink, | ||
877 | .seeks = DEFAULT_SEEKS, | ||
878 | }; | ||
879 | |||
880 | void __init | ||
881 | xfs_inode_shrinker_init(void) | ||
882 | { | ||
883 | init_rwsem(&xfs_mount_list_lock); | ||
884 | register_shrinker(&xfs_inode_shrinker); | ||
885 | } | ||
886 | |||
887 | void | ||
888 | xfs_inode_shrinker_destroy(void) | ||
889 | { | ||
890 | ASSERT(list_empty(&xfs_mount_list)); | ||
891 | unregister_shrinker(&xfs_inode_shrinker); | ||
892 | } | ||
893 | |||
894 | void | 920 | void |
895 | xfs_inode_shrinker_register( | 921 | xfs_inode_shrinker_register( |
896 | struct xfs_mount *mp) | 922 | struct xfs_mount *mp) |
897 | { | 923 | { |
898 | down_write(&xfs_mount_list_lock); | 924 | mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; |
899 | list_add_tail(&mp->m_mplist, &xfs_mount_list); | 925 | mp->m_inode_shrink.seeks = DEFAULT_SEEKS; |
900 | up_write(&xfs_mount_list_lock); | 926 | register_shrinker(&mp->m_inode_shrink); |
901 | } | 927 | } |
902 | 928 | ||
903 | void | 929 | void |
904 | xfs_inode_shrinker_unregister( | 930 | xfs_inode_shrinker_unregister( |
905 | struct xfs_mount *mp) | 931 | struct xfs_mount *mp) |
906 | { | 932 | { |
907 | down_write(&xfs_mount_list_lock); | 933 | unregister_shrinker(&mp->m_inode_shrink); |
908 | list_del(&mp->m_mplist); | ||
909 | up_write(&xfs_mount_list_lock); | ||
910 | } | 934 | } |
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index cdcbaaca9880..e28139aaa4aa 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h | |||
@@ -55,8 +55,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp, | |||
55 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), | 55 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), |
56 | int flags, int tag, int write_lock, int *nr_to_scan); | 56 | int flags, int tag, int write_lock, int *nr_to_scan); |
57 | 57 | ||
58 | void xfs_inode_shrinker_init(void); | ||
59 | void xfs_inode_shrinker_destroy(void); | ||
60 | void xfs_inode_shrinker_register(struct xfs_mount *mp); | 58 | void xfs_inode_shrinker_register(struct xfs_mount *mp); |
61 | void xfs_inode_shrinker_unregister(struct xfs_mount *mp); | 59 | void xfs_inode_shrinker_unregister(struct xfs_mount *mp); |
62 | 60 | ||
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 73d5aa117384..302820690904 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h | |||
@@ -124,7 +124,10 @@ DEFINE_EVENT(xfs_perag_class, name, \ | |||
124 | unsigned long caller_ip), \ | 124 | unsigned long caller_ip), \ |
125 | TP_ARGS(mp, agno, refcount, caller_ip)) | 125 | TP_ARGS(mp, agno, refcount, caller_ip)) |
126 | DEFINE_PERAG_REF_EVENT(xfs_perag_get); | 126 | DEFINE_PERAG_REF_EVENT(xfs_perag_get); |
127 | DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim); | ||
127 | DEFINE_PERAG_REF_EVENT(xfs_perag_put); | 128 | DEFINE_PERAG_REF_EVENT(xfs_perag_put); |
129 | DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); | ||
130 | DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); | ||
128 | 131 | ||
129 | TRACE_EVENT(xfs_attr_list_node_descend, | 132 | TRACE_EVENT(xfs_attr_list_node_descend, |
130 | TP_PROTO(struct xfs_attr_list_context *ctx, | 133 | TP_PROTO(struct xfs_attr_list_context *ctx, |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 8c117ff2e3ab..67c018392d62 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -69,7 +69,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); | |||
69 | 69 | ||
70 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); | 70 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); |
71 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); | 71 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); |
72 | STATIC int xfs_qm_shake(int, gfp_t); | 72 | STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); |
73 | 73 | ||
74 | static struct shrinker xfs_qm_shaker = { | 74 | static struct shrinker xfs_qm_shaker = { |
75 | .shrink = xfs_qm_shake, | 75 | .shrink = xfs_qm_shake, |
@@ -2117,7 +2117,10 @@ xfs_qm_shake_freelist( | |||
2117 | */ | 2117 | */ |
2118 | /* ARGSUSED */ | 2118 | /* ARGSUSED */ |
2119 | STATIC int | 2119 | STATIC int |
2120 | xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) | 2120 | xfs_qm_shake( |
2121 | struct shrinker *shrink, | ||
2122 | int nr_to_scan, | ||
2123 | gfp_t gfp_mask) | ||
2121 | { | 2124 | { |
2122 | int ndqused, nfree, n; | 2125 | int ndqused, nfree, n; |
2123 | 2126 | ||
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 1d2c7eed4eda..5761087ee8ea 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -259,7 +259,7 @@ typedef struct xfs_mount { | |||
259 | wait_queue_head_t m_wait_single_sync_task; | 259 | wait_queue_head_t m_wait_single_sync_task; |
260 | __int64_t m_update_flags; /* sb flags we need to update | 260 | __int64_t m_update_flags; /* sb flags we need to update |
261 | on the next remount,rw */ | 261 | on the next remount,rw */ |
262 | struct list_head m_mplist; /* inode shrinker mount list */ | 262 | struct shrinker m_inode_shrink; /* inode reclaim shrinker */ |
263 | } xfs_mount_t; | 263 | } xfs_mount_t; |
264 | 264 | ||
265 | /* | 265 | /* |