diff options
author | James Morris <jmorris@namei.org> | 2009-02-05 19:01:45 -0500 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2009-02-05 19:01:45 -0500 |
commit | cb5629b10d64a8006622ce3a52bc887d91057d69 (patch) | |
tree | 7c06d8f30783115e3384721046258ce615b129c5 /fs/inode.c | |
parent | 8920d5ad6ba74ae8ab020e90cc4d976980e68701 (diff) | |
parent | f01d1d546abb2f4028b5299092f529eefb01253a (diff) |
Merge branch 'master' into next
Conflicts:
fs/namei.c
Manually merged per:
diff --cc fs/namei.c
index 734f2b5,bbc15c2..0000000
--- a/fs/namei.c
+++ b/fs/namei.c
@@@ -860,9 -848,8 +849,10 @@@ static int __link_path_walk(const char
nd->flags |= LOOKUP_CONTINUE;
err = exec_permission_lite(inode);
if (err == -EAGAIN)
- err = vfs_permission(nd, MAY_EXEC);
+ err = inode_permission(nd->path.dentry->d_inode,
+ MAY_EXEC);
+ if (!err)
+ err = ima_path_check(&nd->path, MAY_EXEC);
if (err)
break;
@@@ -1525,14 -1506,9 +1509,14 @@@ int may_open(struct path *path, int acc
flag &= ~O_TRUNC;
}
- error = vfs_permission(nd, acc_mode);
+ error = inode_permission(inode, acc_mode);
if (error)
return error;
+
- error = ima_path_check(&nd->path,
++ error = ima_path_check(path,
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
+ if (error)
+ return error;
/*
* An append-only file must be opened in append mode for writing.
*/
Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'fs/inode.c')
-rw-r--r-- | fs/inode.c | 74 |
1 files changed, 68 insertions, 6 deletions
diff --git a/fs/inode.c b/fs/inode.c index ed22b14f2202..40e37c026565 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/inotify.h> | 24 | #include <linux/inotify.h> |
25 | #include <linux/mount.h> | 25 | #include <linux/mount.h> |
26 | #include <linux/async.h> | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * This is needed for the following functions: | 29 | * This is needed for the following functions: |
@@ -111,8 +112,8 @@ static void wake_up_inode(struct inode *inode) | |||
111 | 112 | ||
112 | /** | 113 | /** |
113 | * inode_init_always - perform inode structure intialisation | 114 | * inode_init_always - perform inode structure intialisation |
114 | * @sb - superblock inode belongs to. | 115 | * @sb: superblock inode belongs to |
115 | * @inode - inode to initialise | 116 | * @inode: inode to initialise |
116 | * | 117 | * |
117 | * These are initializations that need to be done on every inode | 118 | * These are initializations that need to be done on every inode |
118 | * allocation as the fields are not initialised by slab allocation. | 119 | * allocation as the fields are not initialised by slab allocation. |
@@ -132,6 +133,8 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | |||
132 | inode->i_op = &empty_iops; | 133 | inode->i_op = &empty_iops; |
133 | inode->i_fop = &empty_fops; | 134 | inode->i_fop = &empty_fops; |
134 | inode->i_nlink = 1; | 135 | inode->i_nlink = 1; |
136 | inode->i_uid = 0; | ||
137 | inode->i_gid = 0; | ||
135 | atomic_set(&inode->i_writecount, 0); | 138 | atomic_set(&inode->i_writecount, 0); |
136 | inode->i_size = 0; | 139 | inode->i_size = 0; |
137 | inode->i_blocks = 0; | 140 | inode->i_blocks = 0; |
@@ -165,7 +168,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | |||
165 | mapping->a_ops = &empty_aops; | 168 | mapping->a_ops = &empty_aops; |
166 | mapping->host = inode; | 169 | mapping->host = inode; |
167 | mapping->flags = 0; | 170 | mapping->flags = 0; |
168 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); | 171 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); |
169 | mapping->assoc_mapping = NULL; | 172 | mapping->assoc_mapping = NULL; |
170 | mapping->backing_dev_info = &default_backing_dev_info; | 173 | mapping->backing_dev_info = &default_backing_dev_info; |
171 | mapping->writeback_index = 0; | 174 | mapping->writeback_index = 0; |
@@ -584,8 +587,8 @@ __inode_add_to_lists(struct super_block *sb, struct hlist_head *head, | |||
584 | 587 | ||
585 | /** | 588 | /** |
586 | * inode_add_to_lists - add a new inode to relevant lists | 589 | * inode_add_to_lists - add a new inode to relevant lists |
587 | * @sb - superblock inode belongs to. | 590 | * @sb: superblock inode belongs to |
588 | * @inode - inode to mark in use | 591 | * @inode: inode to mark in use |
589 | * | 592 | * |
590 | * When an inode is allocated it needs to be accounted for, added to the in use | 593 | * When an inode is allocated it needs to be accounted for, added to the in use |
591 | * list, the owning superblock and the inode hash. This needs to be done under | 594 | * list, the owning superblock and the inode hash. This needs to be done under |
@@ -609,7 +612,7 @@ EXPORT_SYMBOL_GPL(inode_add_to_lists); | |||
609 | * @sb: superblock | 612 | * @sb: superblock |
610 | * | 613 | * |
611 | * Allocates a new inode for given superblock. The default gfp_mask | 614 | * Allocates a new inode for given superblock. The default gfp_mask |
612 | * for allocations related to inode->i_mapping is GFP_HIGHUSER_PAGECACHE. | 615 | * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. |
613 | * If HIGHMEM pages are unsuitable or it is known that pages allocated | 616 | * If HIGHMEM pages are unsuitable or it is known that pages allocated |
614 | * for the page cache are not reclaimable or migratable, | 617 | * for the page cache are not reclaimable or migratable, |
615 | * mapping_set_gfp_mask() must be called with suitable flags on the | 618 | * mapping_set_gfp_mask() must be called with suitable flags on the |
@@ -1042,6 +1045,65 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino) | |||
1042 | 1045 | ||
1043 | EXPORT_SYMBOL(iget_locked); | 1046 | EXPORT_SYMBOL(iget_locked); |
1044 | 1047 | ||
1048 | int insert_inode_locked(struct inode *inode) | ||
1049 | { | ||
1050 | struct super_block *sb = inode->i_sb; | ||
1051 | ino_t ino = inode->i_ino; | ||
1052 | struct hlist_head *head = inode_hashtable + hash(sb, ino); | ||
1053 | struct inode *old; | ||
1054 | |||
1055 | inode->i_state |= I_LOCK|I_NEW; | ||
1056 | while (1) { | ||
1057 | spin_lock(&inode_lock); | ||
1058 | old = find_inode_fast(sb, head, ino); | ||
1059 | if (likely(!old)) { | ||
1060 | hlist_add_head(&inode->i_hash, head); | ||
1061 | spin_unlock(&inode_lock); | ||
1062 | return 0; | ||
1063 | } | ||
1064 | __iget(old); | ||
1065 | spin_unlock(&inode_lock); | ||
1066 | wait_on_inode(old); | ||
1067 | if (unlikely(!hlist_unhashed(&old->i_hash))) { | ||
1068 | iput(old); | ||
1069 | return -EBUSY; | ||
1070 | } | ||
1071 | iput(old); | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | EXPORT_SYMBOL(insert_inode_locked); | ||
1076 | |||
1077 | int insert_inode_locked4(struct inode *inode, unsigned long hashval, | ||
1078 | int (*test)(struct inode *, void *), void *data) | ||
1079 | { | ||
1080 | struct super_block *sb = inode->i_sb; | ||
1081 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); | ||
1082 | struct inode *old; | ||
1083 | |||
1084 | inode->i_state |= I_LOCK|I_NEW; | ||
1085 | |||
1086 | while (1) { | ||
1087 | spin_lock(&inode_lock); | ||
1088 | old = find_inode(sb, head, test, data); | ||
1089 | if (likely(!old)) { | ||
1090 | hlist_add_head(&inode->i_hash, head); | ||
1091 | spin_unlock(&inode_lock); | ||
1092 | return 0; | ||
1093 | } | ||
1094 | __iget(old); | ||
1095 | spin_unlock(&inode_lock); | ||
1096 | wait_on_inode(old); | ||
1097 | if (unlikely(!hlist_unhashed(&old->i_hash))) { | ||
1098 | iput(old); | ||
1099 | return -EBUSY; | ||
1100 | } | ||
1101 | iput(old); | ||
1102 | } | ||
1103 | } | ||
1104 | |||
1105 | EXPORT_SYMBOL(insert_inode_locked4); | ||
1106 | |||
1045 | /** | 1107 | /** |
1046 | * __insert_inode_hash - hash an inode | 1108 | * __insert_inode_hash - hash an inode |
1047 | * @inode: unhashed inode | 1109 | * @inode: unhashed inode |