diff options
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 143 |
1 files changed, 70 insertions, 73 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 61574b81d979..e64fa726a790 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -6,8 +6,8 @@ | |||
6 | * 2000-2001 Christoph Rohland | 6 | * 2000-2001 Christoph Rohland |
7 | * 2000-2001 SAP AG | 7 | * 2000-2001 SAP AG |
8 | * 2002 Red Hat Inc. | 8 | * 2002 Red Hat Inc. |
9 | * Copyright (C) 2002-2004 Hugh Dickins. | 9 | * Copyright (C) 2002-2005 Hugh Dickins. |
10 | * Copyright (C) 2002-2004 VERITAS Software Corporation. | 10 | * Copyright (C) 2002-2005 VERITAS Software Corporation. |
11 | * Copyright (C) 2004 Andi Kleen, SuSE Labs | 11 | * Copyright (C) 2004 Andi Kleen, SuSE Labs |
12 | * | 12 | * |
13 | * Extended attribute support for tmpfs: | 13 | * Extended attribute support for tmpfs: |
@@ -194,7 +194,7 @@ static DEFINE_SPINLOCK(shmem_swaplist_lock); | |||
194 | static void shmem_free_blocks(struct inode *inode, long pages) | 194 | static void shmem_free_blocks(struct inode *inode, long pages) |
195 | { | 195 | { |
196 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | 196 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
197 | if (sbinfo) { | 197 | if (sbinfo->max_blocks) { |
198 | spin_lock(&sbinfo->stat_lock); | 198 | spin_lock(&sbinfo->stat_lock); |
199 | sbinfo->free_blocks += pages; | 199 | sbinfo->free_blocks += pages; |
200 | inode->i_blocks -= pages*BLOCKS_PER_PAGE; | 200 | inode->i_blocks -= pages*BLOCKS_PER_PAGE; |
@@ -357,7 +357,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long | |||
357 | * page (and perhaps indirect index pages) yet to allocate: | 357 | * page (and perhaps indirect index pages) yet to allocate: |
358 | * a waste to allocate index if we cannot allocate data. | 358 | * a waste to allocate index if we cannot allocate data. |
359 | */ | 359 | */ |
360 | if (sbinfo) { | 360 | if (sbinfo->max_blocks) { |
361 | spin_lock(&sbinfo->stat_lock); | 361 | spin_lock(&sbinfo->stat_lock); |
362 | if (sbinfo->free_blocks <= 1) { | 362 | if (sbinfo->free_blocks <= 1) { |
363 | spin_unlock(&sbinfo->stat_lock); | 363 | spin_unlock(&sbinfo->stat_lock); |
@@ -677,8 +677,8 @@ static void shmem_delete_inode(struct inode *inode) | |||
677 | spin_unlock(&shmem_swaplist_lock); | 677 | spin_unlock(&shmem_swaplist_lock); |
678 | } | 678 | } |
679 | } | 679 | } |
680 | if (sbinfo) { | 680 | BUG_ON(inode->i_blocks); |
681 | BUG_ON(inode->i_blocks); | 681 | if (sbinfo->max_inodes) { |
682 | spin_lock(&sbinfo->stat_lock); | 682 | spin_lock(&sbinfo->stat_lock); |
683 | sbinfo->free_inodes++; | 683 | sbinfo->free_inodes++; |
684 | spin_unlock(&sbinfo->stat_lock); | 684 | spin_unlock(&sbinfo->stat_lock); |
@@ -1080,7 +1080,7 @@ repeat: | |||
1080 | } else { | 1080 | } else { |
1081 | shmem_swp_unmap(entry); | 1081 | shmem_swp_unmap(entry); |
1082 | sbinfo = SHMEM_SB(inode->i_sb); | 1082 | sbinfo = SHMEM_SB(inode->i_sb); |
1083 | if (sbinfo) { | 1083 | if (sbinfo->max_blocks) { |
1084 | spin_lock(&sbinfo->stat_lock); | 1084 | spin_lock(&sbinfo->stat_lock); |
1085 | if (sbinfo->free_blocks == 0 || | 1085 | if (sbinfo->free_blocks == 0 || |
1086 | shmem_acct_block(info->flags)) { | 1086 | shmem_acct_block(info->flags)) { |
@@ -1269,7 +1269,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) | |||
1269 | struct shmem_inode_info *info; | 1269 | struct shmem_inode_info *info; |
1270 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 1270 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
1271 | 1271 | ||
1272 | if (sbinfo) { | 1272 | if (sbinfo->max_inodes) { |
1273 | spin_lock(&sbinfo->stat_lock); | 1273 | spin_lock(&sbinfo->stat_lock); |
1274 | if (!sbinfo->free_inodes) { | 1274 | if (!sbinfo->free_inodes) { |
1275 | spin_unlock(&sbinfo->stat_lock); | 1275 | spin_unlock(&sbinfo->stat_lock); |
@@ -1319,7 +1319,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) | |||
1319 | mpol_shared_policy_init(&info->policy); | 1319 | mpol_shared_policy_init(&info->policy); |
1320 | break; | 1320 | break; |
1321 | } | 1321 | } |
1322 | } else if (sbinfo) { | 1322 | } else if (sbinfo->max_inodes) { |
1323 | spin_lock(&sbinfo->stat_lock); | 1323 | spin_lock(&sbinfo->stat_lock); |
1324 | sbinfo->free_inodes++; | 1324 | sbinfo->free_inodes++; |
1325 | spin_unlock(&sbinfo->stat_lock); | 1325 | spin_unlock(&sbinfo->stat_lock); |
@@ -1328,31 +1328,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) | |||
1328 | } | 1328 | } |
1329 | 1329 | ||
1330 | #ifdef CONFIG_TMPFS | 1330 | #ifdef CONFIG_TMPFS |
1331 | |||
1332 | static int shmem_set_size(struct shmem_sb_info *sbinfo, | ||
1333 | unsigned long max_blocks, unsigned long max_inodes) | ||
1334 | { | ||
1335 | int error; | ||
1336 | unsigned long blocks, inodes; | ||
1337 | |||
1338 | spin_lock(&sbinfo->stat_lock); | ||
1339 | blocks = sbinfo->max_blocks - sbinfo->free_blocks; | ||
1340 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; | ||
1341 | error = -EINVAL; | ||
1342 | if (max_blocks < blocks) | ||
1343 | goto out; | ||
1344 | if (max_inodes < inodes) | ||
1345 | goto out; | ||
1346 | error = 0; | ||
1347 | sbinfo->max_blocks = max_blocks; | ||
1348 | sbinfo->free_blocks = max_blocks - blocks; | ||
1349 | sbinfo->max_inodes = max_inodes; | ||
1350 | sbinfo->free_inodes = max_inodes - inodes; | ||
1351 | out: | ||
1352 | spin_unlock(&sbinfo->stat_lock); | ||
1353 | return error; | ||
1354 | } | ||
1355 | |||
1356 | static struct inode_operations shmem_symlink_inode_operations; | 1331 | static struct inode_operations shmem_symlink_inode_operations; |
1357 | static struct inode_operations shmem_symlink_inline_operations; | 1332 | static struct inode_operations shmem_symlink_inline_operations; |
1358 | 1333 | ||
@@ -1607,15 +1582,17 @@ static int shmem_statfs(struct super_block *sb, struct kstatfs *buf) | |||
1607 | buf->f_type = TMPFS_MAGIC; | 1582 | buf->f_type = TMPFS_MAGIC; |
1608 | buf->f_bsize = PAGE_CACHE_SIZE; | 1583 | buf->f_bsize = PAGE_CACHE_SIZE; |
1609 | buf->f_namelen = NAME_MAX; | 1584 | buf->f_namelen = NAME_MAX; |
1610 | if (sbinfo) { | 1585 | spin_lock(&sbinfo->stat_lock); |
1611 | spin_lock(&sbinfo->stat_lock); | 1586 | if (sbinfo->max_blocks) { |
1612 | buf->f_blocks = sbinfo->max_blocks; | 1587 | buf->f_blocks = sbinfo->max_blocks; |
1613 | buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; | 1588 | buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; |
1589 | } | ||
1590 | if (sbinfo->max_inodes) { | ||
1614 | buf->f_files = sbinfo->max_inodes; | 1591 | buf->f_files = sbinfo->max_inodes; |
1615 | buf->f_ffree = sbinfo->free_inodes; | 1592 | buf->f_ffree = sbinfo->free_inodes; |
1616 | spin_unlock(&sbinfo->stat_lock); | ||
1617 | } | 1593 | } |
1618 | /* else leave those fields 0 like simple_statfs */ | 1594 | /* else leave those fields 0 like simple_statfs */ |
1595 | spin_unlock(&sbinfo->stat_lock); | ||
1619 | return 0; | 1596 | return 0; |
1620 | } | 1597 | } |
1621 | 1598 | ||
@@ -1672,7 +1649,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr | |||
1672 | * but each new link needs a new dentry, pinning lowmem, and | 1649 | * but each new link needs a new dentry, pinning lowmem, and |
1673 | * tmpfs dentries cannot be pruned until they are unlinked. | 1650 | * tmpfs dentries cannot be pruned until they are unlinked. |
1674 | */ | 1651 | */ |
1675 | if (sbinfo) { | 1652 | if (sbinfo->max_inodes) { |
1676 | spin_lock(&sbinfo->stat_lock); | 1653 | spin_lock(&sbinfo->stat_lock); |
1677 | if (!sbinfo->free_inodes) { | 1654 | if (!sbinfo->free_inodes) { |
1678 | spin_unlock(&sbinfo->stat_lock); | 1655 | spin_unlock(&sbinfo->stat_lock); |
@@ -1697,7 +1674,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry) | |||
1697 | 1674 | ||
1698 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { | 1675 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { |
1699 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | 1676 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
1700 | if (sbinfo) { | 1677 | if (sbinfo->max_inodes) { |
1701 | spin_lock(&sbinfo->stat_lock); | 1678 | spin_lock(&sbinfo->stat_lock); |
1702 | sbinfo->free_inodes++; | 1679 | sbinfo->free_inodes++; |
1703 | spin_unlock(&sbinfo->stat_lock); | 1680 | spin_unlock(&sbinfo->stat_lock); |
@@ -1921,22 +1898,42 @@ bad_val: | |||
1921 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) | 1898 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
1922 | { | 1899 | { |
1923 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 1900 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
1924 | unsigned long max_blocks = 0; | 1901 | unsigned long max_blocks = sbinfo->max_blocks; |
1925 | unsigned long max_inodes = 0; | 1902 | unsigned long max_inodes = sbinfo->max_inodes; |
1903 | unsigned long blocks; | ||
1904 | unsigned long inodes; | ||
1905 | int error = -EINVAL; | ||
1906 | |||
1907 | if (shmem_parse_options(data, NULL, NULL, NULL, | ||
1908 | &max_blocks, &max_inodes)) | ||
1909 | return error; | ||
1926 | 1910 | ||
1927 | if (sbinfo) { | 1911 | spin_lock(&sbinfo->stat_lock); |
1928 | max_blocks = sbinfo->max_blocks; | 1912 | blocks = sbinfo->max_blocks - sbinfo->free_blocks; |
1929 | max_inodes = sbinfo->max_inodes; | 1913 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; |
1930 | } | 1914 | if (max_blocks < blocks) |
1931 | if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, &max_inodes)) | 1915 | goto out; |
1932 | return -EINVAL; | 1916 | if (max_inodes < inodes) |
1933 | /* Keep it simple: disallow limited <-> unlimited remount */ | 1917 | goto out; |
1934 | if ((max_blocks || max_inodes) == !sbinfo) | 1918 | /* |
1935 | return -EINVAL; | 1919 | * Those tests also disallow limited->unlimited while any are in |
1936 | /* But allow the pointless unlimited -> unlimited remount */ | 1920 | * use, so i_blocks will always be zero when max_blocks is zero; |
1937 | if (!sbinfo) | 1921 | * but we must separately disallow unlimited->limited, because |
1938 | return 0; | 1922 | * in that case we have no record of how much is already in use. |
1939 | return shmem_set_size(sbinfo, max_blocks, max_inodes); | 1923 | */ |
1924 | if (max_blocks && !sbinfo->max_blocks) | ||
1925 | goto out; | ||
1926 | if (max_inodes && !sbinfo->max_inodes) | ||
1927 | goto out; | ||
1928 | |||
1929 | error = 0; | ||
1930 | sbinfo->max_blocks = max_blocks; | ||
1931 | sbinfo->free_blocks = max_blocks - blocks; | ||
1932 | sbinfo->max_inodes = max_inodes; | ||
1933 | sbinfo->free_inodes = max_inodes - inodes; | ||
1934 | out: | ||
1935 | spin_unlock(&sbinfo->stat_lock); | ||
1936 | return error; | ||
1940 | } | 1937 | } |
1941 | #endif | 1938 | #endif |
1942 | 1939 | ||
@@ -1961,11 +1958,11 @@ static int shmem_fill_super(struct super_block *sb, | |||
1961 | uid_t uid = current->fsuid; | 1958 | uid_t uid = current->fsuid; |
1962 | gid_t gid = current->fsgid; | 1959 | gid_t gid = current->fsgid; |
1963 | int err = -ENOMEM; | 1960 | int err = -ENOMEM; |
1964 | 1961 | struct shmem_sb_info *sbinfo; | |
1965 | #ifdef CONFIG_TMPFS | ||
1966 | unsigned long blocks = 0; | 1962 | unsigned long blocks = 0; |
1967 | unsigned long inodes = 0; | 1963 | unsigned long inodes = 0; |
1968 | 1964 | ||
1965 | #ifdef CONFIG_TMPFS | ||
1969 | /* | 1966 | /* |
1970 | * Per default we only allow half of the physical ram per | 1967 | * Per default we only allow half of the physical ram per |
1971 | * tmpfs instance, limiting inodes to one per page of lowmem; | 1968 | * tmpfs instance, limiting inodes to one per page of lowmem; |
@@ -1976,34 +1973,34 @@ static int shmem_fill_super(struct super_block *sb, | |||
1976 | inodes = totalram_pages - totalhigh_pages; | 1973 | inodes = totalram_pages - totalhigh_pages; |
1977 | if (inodes > blocks) | 1974 | if (inodes > blocks) |
1978 | inodes = blocks; | 1975 | inodes = blocks; |
1979 | 1976 | if (shmem_parse_options(data, &mode, &uid, &gid, | |
1980 | if (shmem_parse_options(data, &mode, | 1977 | &blocks, &inodes)) |
1981 | &uid, &gid, &blocks, &inodes)) | ||
1982 | return -EINVAL; | 1978 | return -EINVAL; |
1983 | } | 1979 | } |
1984 | |||
1985 | if (blocks || inodes) { | ||
1986 | struct shmem_sb_info *sbinfo; | ||
1987 | sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL); | ||
1988 | if (!sbinfo) | ||
1989 | return -ENOMEM; | ||
1990 | sb->s_fs_info = sbinfo; | ||
1991 | spin_lock_init(&sbinfo->stat_lock); | ||
1992 | sbinfo->max_blocks = blocks; | ||
1993 | sbinfo->free_blocks = blocks; | ||
1994 | sbinfo->max_inodes = inodes; | ||
1995 | sbinfo->free_inodes = inodes; | ||
1996 | } | ||
1997 | sb->s_xattr = shmem_xattr_handlers; | ||
1998 | #else | 1980 | #else |
1999 | sb->s_flags |= MS_NOUSER; | 1981 | sb->s_flags |= MS_NOUSER; |
2000 | #endif | 1982 | #endif |
2001 | 1983 | ||
1984 | /* Round up to L1_CACHE_BYTES to resist false sharing */ | ||
1985 | sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), | ||
1986 | L1_CACHE_BYTES), GFP_KERNEL); | ||
1987 | if (!sbinfo) | ||
1988 | return -ENOMEM; | ||
1989 | |||
1990 | spin_lock_init(&sbinfo->stat_lock); | ||
1991 | sbinfo->max_blocks = blocks; | ||
1992 | sbinfo->free_blocks = blocks; | ||
1993 | sbinfo->max_inodes = inodes; | ||
1994 | sbinfo->free_inodes = inodes; | ||
1995 | |||
1996 | sb->s_fs_info = sbinfo; | ||
2002 | sb->s_maxbytes = SHMEM_MAX_BYTES; | 1997 | sb->s_maxbytes = SHMEM_MAX_BYTES; |
2003 | sb->s_blocksize = PAGE_CACHE_SIZE; | 1998 | sb->s_blocksize = PAGE_CACHE_SIZE; |
2004 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 1999 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
2005 | sb->s_magic = TMPFS_MAGIC; | 2000 | sb->s_magic = TMPFS_MAGIC; |
2006 | sb->s_op = &shmem_ops; | 2001 | sb->s_op = &shmem_ops; |
2002 | sb->s_xattr = shmem_xattr_handlers; | ||
2003 | |||
2007 | inode = shmem_get_inode(sb, S_IFDIR | mode, 0); | 2004 | inode = shmem_get_inode(sb, S_IFDIR | mode, 0); |
2008 | if (!inode) | 2005 | if (!inode) |
2009 | goto failed; | 2006 | goto failed; |