aboutsummaryrefslogtreecommitdiffstats
path: root/ipc/mqueue.c
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2011-07-26 19:08:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 19:49:44 -0400
commit04715206c0c2fd4ec5ca77fa51e3a5b41ce71492 (patch)
treefbead768d176d82e5e1ff6021c030b8cd6d26c9f /ipc/mqueue.c
parenta64a26e822ddb739de464540dfd2cbb6abce47d5 (diff)
ipc/mqueue.c: refactor failure handling
If new_inode fails to allocate an inode we need only to return with NULL. But now we test the opposite and have all the work in a nested block. So do the opposite to save one indentation level (and remove unnecessary line breaks). This is only a preparation/cleanup for the next patch where we fix up return values from mqueue_get_inode. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Cc: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/mqueue.c')
-rw-r--r--ipc/mqueue.c113
1 files changed, 57 insertions, 56 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 14fb6d67e6a3..d43c30f72f1d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -115,69 +115,70 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
115 struct inode *inode; 115 struct inode *inode;
116 116
117 inode = new_inode(sb); 117 inode = new_inode(sb);
118 if (inode) { 118 if (!inode)
119 inode->i_ino = get_next_ino(); 119 goto err;
120 inode->i_mode = mode;
121 inode->i_uid = current_fsuid();
122 inode->i_gid = current_fsgid();
123 inode->i_mtime = inode->i_ctime = inode->i_atime =
124 CURRENT_TIME;
125 120
126 if (S_ISREG(mode)) { 121 inode->i_ino = get_next_ino();
127 struct mqueue_inode_info *info; 122 inode->i_mode = mode;
128 struct task_struct *p = current; 123 inode->i_uid = current_fsuid();
129 unsigned long mq_bytes, mq_msg_tblsz; 124 inode->i_gid = current_fsgid();
130 125 inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
131 inode->i_fop = &mqueue_file_operations; 126
132 inode->i_size = FILENT_SIZE; 127 if (S_ISREG(mode)) {
133 /* mqueue specific info */ 128 struct mqueue_inode_info *info;
134 info = MQUEUE_I(inode); 129 struct task_struct *p = current;
135 spin_lock_init(&info->lock); 130 unsigned long mq_bytes, mq_msg_tblsz;
136 init_waitqueue_head(&info->wait_q); 131
137 INIT_LIST_HEAD(&info->e_wait_q[0].list); 132 inode->i_fop = &mqueue_file_operations;
138 INIT_LIST_HEAD(&info->e_wait_q[1].list); 133 inode->i_size = FILENT_SIZE;
139 info->notify_owner = NULL; 134 /* mqueue specific info */
140 info->qsize = 0; 135 info = MQUEUE_I(inode);
141 info->user = NULL; /* set when all is ok */ 136 spin_lock_init(&info->lock);
142 memset(&info->attr, 0, sizeof(info->attr)); 137 init_waitqueue_head(&info->wait_q);
143 info->attr.mq_maxmsg = ipc_ns->mq_msg_max; 138 INIT_LIST_HEAD(&info->e_wait_q[0].list);
144 info->attr.mq_msgsize = ipc_ns->mq_msgsize_max; 139 INIT_LIST_HEAD(&info->e_wait_q[1].list);
145 if (attr) { 140 info->notify_owner = NULL;
146 info->attr.mq_maxmsg = attr->mq_maxmsg; 141 info->qsize = 0;
147 info->attr.mq_msgsize = attr->mq_msgsize; 142 info->user = NULL; /* set when all is ok */
148 } 143 memset(&info->attr, 0, sizeof(info->attr));
149 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 144 info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
150 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); 145 info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
151 if (!info->messages) 146 if (attr) {
152 goto out_inode; 147 info->attr.mq_maxmsg = attr->mq_maxmsg;
153 148 info->attr.mq_msgsize = attr->mq_msgsize;
154 mq_bytes = (mq_msg_tblsz + 149 }
155 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 150 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
156 151 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
157 spin_lock(&mq_lock); 152 if (!info->messages)
158 if (u->mq_bytes + mq_bytes < u->mq_bytes || 153 goto out_inode;
159 u->mq_bytes + mq_bytes >
160 task_rlimit(p, RLIMIT_MSGQUEUE)) {
161 spin_unlock(&mq_lock);
162 /* mqueue_evict_inode() releases info->messages */
163 goto out_inode;
164 }
165 u->mq_bytes += mq_bytes;
166 spin_unlock(&mq_lock);
167 154
168 /* all is ok */ 155 mq_bytes = (mq_msg_tblsz +
169 info->user = get_uid(u); 156 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
170 } else if (S_ISDIR(mode)) { 157
171 inc_nlink(inode); 158 spin_lock(&mq_lock);
172 /* Some things misbehave if size == 0 on a directory */ 159 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
173 inode->i_size = 2 * DIRENT_SIZE; 160 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
174 inode->i_op = &mqueue_dir_inode_operations; 161 spin_unlock(&mq_lock);
175 inode->i_fop = &simple_dir_operations; 162 /* mqueue_evict_inode() releases info->messages */
163 goto out_inode;
176 } 164 }
165 u->mq_bytes += mq_bytes;
166 spin_unlock(&mq_lock);
167
168 /* all is ok */
169 info->user = get_uid(u);
170 } else if (S_ISDIR(mode)) {
171 inc_nlink(inode);
172 /* Some things misbehave if size == 0 on a directory */
173 inode->i_size = 2 * DIRENT_SIZE;
174 inode->i_op = &mqueue_dir_inode_operations;
175 inode->i_fop = &simple_dir_operations;
177 } 176 }
177
178 return inode; 178 return inode;
179out_inode: 179out_inode:
180 iput(inode); 180 iput(inode);
181err:
181 return NULL; 182 return NULL;
182} 183}
183 184