diff options
author | Peng Tao <bergwolf@gmail.com> | 2013-08-27 20:18:20 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-09-10 18:56:32 -0400 |
commit | fe92a0557a6f332119c51fdd2f3d574040989447 (patch) | |
tree | 92593a1f4bc325ba8efea831f194606a1b136dd1 | |
parent | cbc3769ecd74b183d3ba5e11264cf484d8572a00 (diff) |
staging/lustre/obdclass: convert lu_object shrinker to count/scan API
convert lu_object shrinker to new count/scan API.
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: Andreas Dilger <andreas.dilger@intel.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | drivers/staging/lustre/lustre/obdclass/lu_object.c | 98 |
1 files changed, 52 insertions, 46 deletions
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index c29ac1c2defd..3a3d5bc5a628 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c | |||
@@ -1779,7 +1779,6 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, | |||
1779 | } | 1779 | } |
1780 | EXPORT_SYMBOL(lu_env_refill_by_tags); | 1780 | EXPORT_SYMBOL(lu_env_refill_by_tags); |
1781 | 1781 | ||
1782 | static struct shrinker *lu_site_shrinker = NULL; | ||
1783 | 1782 | ||
1784 | typedef struct lu_site_stats{ | 1783 | typedef struct lu_site_stats{ |
1785 | unsigned lss_populated; | 1784 | unsigned lss_populated; |
@@ -1835,61 +1834,68 @@ static void lu_site_stats_get(cfs_hash_t *hs, | |||
1835 | * objects without taking the lu_sites_guard lock, but this is not | 1834 | * objects without taking the lu_sites_guard lock, but this is not |
1836 | * possible in the current implementation. | 1835 | * possible in the current implementation. |
1837 | */ | 1836 | */ |
1838 | static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) | 1837 | static unsigned long lu_cache_shrink_count(struct shrinker *sk, |
1838 | struct shrink_control *sc) | ||
1839 | { | 1839 | { |
1840 | lu_site_stats_t stats; | 1840 | lu_site_stats_t stats; |
1841 | struct lu_site *s; | 1841 | struct lu_site *s; |
1842 | struct lu_site *tmp; | 1842 | struct lu_site *tmp; |
1843 | int cached = 0; | 1843 | unsigned long cached = 0; |
1844 | int remain = shrink_param(sc, nr_to_scan); | ||
1845 | LIST_HEAD(splice); | ||
1846 | |||
1847 | if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) { | ||
1848 | if (remain != 0) | ||
1849 | return -1; | ||
1850 | else | ||
1851 | /* We must not take the lu_sites_guard lock when | ||
1852 | * __GFP_FS is *not* set because of the deadlock | ||
1853 | * possibility detailed above. Additionally, | ||
1854 | * since we cannot determine the number of | ||
1855 | * objects in the cache without taking this | ||
1856 | * lock, we're in a particularly tough spot. As | ||
1857 | * a result, we'll just lie and say our cache is | ||
1858 | * empty. This _should_ be ok, as we can't | ||
1859 | * reclaim objects when __GFP_FS is *not* set | ||
1860 | * anyways. | ||
1861 | */ | ||
1862 | return 0; | ||
1863 | } | ||
1864 | 1844 | ||
1865 | CDEBUG(D_INODE, "Shrink %d objects\n", remain); | 1845 | if (!(sc->gfp_mask & __GFP_FS)) |
1846 | return 0; | ||
1866 | 1847 | ||
1867 | mutex_lock(&lu_sites_guard); | 1848 | mutex_lock(&lu_sites_guard); |
1868 | list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { | 1849 | list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { |
1869 | if (shrink_param(sc, nr_to_scan) != 0) { | ||
1870 | remain = lu_site_purge(&lu_shrink_env, s, remain); | ||
1871 | /* | ||
1872 | * Move just shrunk site to the tail of site list to | ||
1873 | * assure shrinking fairness. | ||
1874 | */ | ||
1875 | list_move_tail(&s->ls_linkage, &splice); | ||
1876 | } | ||
1877 | |||
1878 | memset(&stats, 0, sizeof(stats)); | 1850 | memset(&stats, 0, sizeof(stats)); |
1879 | lu_site_stats_get(s->ls_obj_hash, &stats, 0); | 1851 | lu_site_stats_get(s->ls_obj_hash, &stats, 0); |
1880 | cached += stats.lss_total - stats.lss_busy; | 1852 | cached += stats.lss_total - stats.lss_busy; |
1881 | if (shrink_param(sc, nr_to_scan) && remain <= 0) | ||
1882 | break; | ||
1883 | } | 1853 | } |
1884 | list_splice(&splice, lu_sites.prev); | ||
1885 | mutex_unlock(&lu_sites_guard); | 1854 | mutex_unlock(&lu_sites_guard); |
1886 | 1855 | ||
1887 | cached = (cached / 100) * sysctl_vfs_cache_pressure; | 1856 | cached = (cached / 100) * sysctl_vfs_cache_pressure; |
1888 | if (shrink_param(sc, nr_to_scan) == 0) | 1857 | CDEBUG(D_INODE, "%ld objects cached\n", cached); |
1889 | CDEBUG(D_INODE, "%d objects cached\n", cached); | ||
1890 | return cached; | 1858 | return cached; |
1891 | } | 1859 | } |
1892 | 1860 | ||
1861 | static unsigned long lu_cache_shrink_scan(struct shrinker *sk, | ||
1862 | struct shrink_control *sc) | ||
1863 | { | ||
1864 | struct lu_site *s; | ||
1865 | struct lu_site *tmp; | ||
1866 | unsigned long remain = sc->nr_to_scan, freed = 0; | ||
1867 | LIST_HEAD(splice); | ||
1868 | |||
1869 | if (!(sc->gfp_mask & __GFP_FS)) | ||
1870 | /* We must not take the lu_sites_guard lock when | ||
1871 | * __GFP_FS is *not* set because of the deadlock | ||
1872 | * possibility detailed above. Additionally, | ||
1873 | * since we cannot determine the number of | ||
1874 | * objects in the cache without taking this | ||
1875 | * lock, we're in a particularly tough spot. As | ||
1876 | * a result, we'll just lie and say our cache is | ||
1877 | * empty. This _should_ be ok, as we can't | ||
1878 | * reclaim objects when __GFP_FS is *not* set | ||
1879 | * anyways. | ||
1880 | */ | ||
1881 | return SHRINK_STOP; | ||
1882 | |||
1883 | mutex_lock(&lu_sites_guard); | ||
1884 | list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { | ||
1885 | freed = lu_site_purge(&lu_shrink_env, s, remain); | ||
1886 | remain -= freed; | ||
1887 | /* | ||
1888 | * Move just shrunk site to the tail of site list to | ||
1889 | * assure shrinking fairness. | ||
1890 | */ | ||
1891 | list_move_tail(&s->ls_linkage, &splice); | ||
1892 | } | ||
1893 | list_splice(&splice, lu_sites.prev); | ||
1894 | mutex_unlock(&lu_sites_guard); | ||
1895 | |||
1896 | return sc->nr_to_scan - remain; | ||
1897 | } | ||
1898 | |||
1893 | /* | 1899 | /* |
1894 | * Debugging stuff. | 1900 | * Debugging stuff. |
1895 | */ | 1901 | */ |
@@ -1913,6 +1919,12 @@ int lu_printk_printer(const struct lu_env *env, | |||
1913 | return 0; | 1919 | return 0; |
1914 | } | 1920 | } |
1915 | 1921 | ||
1922 | static struct shrinker lu_site_shrinker = { | ||
1923 | .count_objects = lu_cache_shrink_count, | ||
1924 | .scan_objects = lu_cache_shrink_scan, | ||
1925 | .seeks = DEFAULT_SEEKS, | ||
1926 | }; | ||
1927 | |||
1916 | /** | 1928 | /** |
1917 | * Initialization of global lu_* data. | 1929 | * Initialization of global lu_* data. |
1918 | */ | 1930 | */ |
@@ -1947,9 +1959,7 @@ int lu_global_init(void) | |||
1947 | * inode, one for ea. Unfortunately setting this high value results in | 1959 | * inode, one for ea. Unfortunately setting this high value results in |
1948 | * lu_object/inode cache consuming all the memory. | 1960 | * lu_object/inode cache consuming all the memory. |
1949 | */ | 1961 | */ |
1950 | lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink); | 1962 | register_shrinker(&lu_site_shrinker); |
1951 | if (lu_site_shrinker == NULL) | ||
1952 | return -ENOMEM; | ||
1953 | 1963 | ||
1954 | return result; | 1964 | return result; |
1955 | } | 1965 | } |
@@ -1959,11 +1969,7 @@ int lu_global_init(void) | |||
1959 | */ | 1969 | */ |
1960 | void lu_global_fini(void) | 1970 | void lu_global_fini(void) |
1961 | { | 1971 | { |
1962 | if (lu_site_shrinker != NULL) { | 1972 | unregister_shrinker(&lu_site_shrinker); |
1963 | remove_shrinker(lu_site_shrinker); | ||
1964 | lu_site_shrinker = NULL; | ||
1965 | } | ||
1966 | |||
1967 | lu_context_key_degister(&lu_global_key); | 1973 | lu_context_key_degister(&lu_global_key); |
1968 | 1974 | ||
1969 | /* | 1975 | /* |