aboutsummaryrefslogtreecommitdiffstats
path: root/ipc/shm.c
diff options
context:
space:
mode:
authorVasiliy Kulikov <segoon@openwall.com>2011-07-28 19:56:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-30 14:44:20 -0400
commit4c677e2eefdba9c5bfc4474e2e91b26ae8458a1d (patch)
treec3c81703d022e0c3c43ddffc3ae165eb25aa0b1d /ipc/shm.c
parent5774ed014f02120db9a6945a1ecebeb97c2acccb (diff)
shm: optimize locking and ipc_namespace getting
shm_lock() does a lookup of shm segment in shm_ids(ns).ipcs_idr, which is redundant as we already know shmid_kernel address. An actual lock is also not required for reads until we really want to destroy the segment. exit_shm() and shm_destroy_orphaned() may avoid the loop by checking whether there is at least one segment in current ipc_namespace. The check of nsproxy and ipc_ns against NULL is redundant as exit_shm() is called from do_exit() before the call to exit_notify(), so the dereferencing current->nsproxy->ipc_ns is guaranteed to be safe. Reported-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Vasiliy Kulikov <segoon@openwall.com> Acked-by: Serge Hallyn <serge.hallyn@canonical.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/shm.c')
-rw-r--r--ipc/shm.c61
1 files changed, 28 insertions, 33 deletions
diff --git a/ipc/shm.c b/ipc/shm.c
index fdaf8be65b75..9fb044f3b345 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -131,6 +131,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
131 return container_of(ipcp, struct shmid_kernel, shm_perm); 131 return container_of(ipcp, struct shmid_kernel, shm_perm);
132} 132}
133 133
134static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
135{
136 rcu_read_lock();
137 spin_lock(&ipcp->shm_perm.lock);
138}
139
134static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, 140static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
135 int id) 141 int id)
136{ 142{
@@ -231,18 +237,15 @@ static void shm_close(struct vm_area_struct *vma)
231 up_write(&shm_ids(ns).rw_mutex); 237 up_write(&shm_ids(ns).rw_mutex);
232} 238}
233 239
240/* Called with ns->shm_ids(ns).rw_mutex locked */
234static int shm_try_destroy_current(int id, void *p, void *data) 241static int shm_try_destroy_current(int id, void *p, void *data)
235{ 242{
236 struct ipc_namespace *ns = data; 243 struct ipc_namespace *ns = data;
237 struct shmid_kernel *shp = shm_lock(ns, id); 244 struct kern_ipc_perm *ipcp = p;
245 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
238 246
239 if (IS_ERR(shp)) 247 if (shp->shm_creator != current)
240 return 0;
241
242 if (shp->shm_creator != current) {
243 shm_unlock(shp);
244 return 0; 248 return 0;
245 }
246 249
247 /* 250 /*
248 * Mark it as orphaned to destroy the segment when 251 * Mark it as orphaned to destroy the segment when
@@ -255,64 +258,56 @@ static int shm_try_destroy_current(int id, void *p, void *data)
255 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID 258 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
256 * is not set, it shouldn't be deleted here. 259 * is not set, it shouldn't be deleted here.
257 */ 260 */
258 if (!ns->shm_rmid_forced) { 261 if (!ns->shm_rmid_forced)
259 shm_unlock(shp);
260 return 0; 262 return 0;
261 }
262 263
263 if (shm_may_destroy(ns, shp)) 264 if (shm_may_destroy(ns, shp)) {
265 shm_lock_by_ptr(shp);
264 shm_destroy(ns, shp); 266 shm_destroy(ns, shp);
265 else 267 }
266 shm_unlock(shp);
267 return 0; 268 return 0;
268} 269}
269 270
271/* Called with ns->shm_ids(ns).rw_mutex locked */
270static int shm_try_destroy_orphaned(int id, void *p, void *data) 272static int shm_try_destroy_orphaned(int id, void *p, void *data)
271{ 273{
272 struct ipc_namespace *ns = data; 274 struct ipc_namespace *ns = data;
273 struct shmid_kernel *shp = shm_lock(ns, id); 275 struct kern_ipc_perm *ipcp = p;
274 276 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
275 if (IS_ERR(shp))
276 return 0;
277 277
278 /* 278 /*
279 * We want to destroy segments without users and with already 279 * We want to destroy segments without users and with already
280 * exit'ed originating process. 280 * exit'ed originating process.
281 *
282 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
281 */ 283 */
282 if (shp->shm_creator != NULL) { 284 if (shp->shm_creator != NULL)
283 shm_unlock(shp);
284 return 0; 285 return 0;
285 }
286 286
287 if (shm_may_destroy(ns, shp)) 287 if (shm_may_destroy(ns, shp)) {
288 shm_lock_by_ptr(shp);
288 shm_destroy(ns, shp); 289 shm_destroy(ns, shp);
289 else 290 }
290 shm_unlock(shp);
291 return 0; 291 return 0;
292} 292}
293 293
294void shm_destroy_orphaned(struct ipc_namespace *ns) 294void shm_destroy_orphaned(struct ipc_namespace *ns)
295{ 295{
296 down_write(&shm_ids(ns).rw_mutex); 296 down_write(&shm_ids(ns).rw_mutex);
297 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); 297 if (&shm_ids(ns).in_use)
298 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
298 up_write(&shm_ids(ns).rw_mutex); 299 up_write(&shm_ids(ns).rw_mutex);
299} 300}
300 301
301 302
302void exit_shm(struct task_struct *task) 303void exit_shm(struct task_struct *task)
303{ 304{
304 struct nsproxy *nsp = task->nsproxy; 305 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
305 struct ipc_namespace *ns;
306
307 if (!nsp)
308 return;
309 ns = nsp->ipc_ns;
310 if (!ns)
311 return;
312 306
313 /* Destroy all already created segments, but not mapped yet */ 307 /* Destroy all already created segments, but not mapped yet */
314 down_write(&shm_ids(ns).rw_mutex); 308 down_write(&shm_ids(ns).rw_mutex);
315 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); 309 if (&shm_ids(ns).in_use)
310 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
316 up_write(&shm_ids(ns).rw_mutex); 311 up_write(&shm_ids(ns).rw_mutex);
317} 312}
318 313