aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ipc/shm.c21
-rw-r--r--ipc/util.c52
-rw-r--r--ipc/util.h6
3 files changed, 4 insertions, 75 deletions
diff --git a/ipc/shm.c b/ipc/shm.c
index a726aebce7d7..e77ec698cf40 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -112,23 +112,8 @@ void __init shm_init (void)
112} 112}
113 113
114/* 114/*
115 * shm_lock_(check_)down routines are called in the paths where the rw_mutex
116 * is held to protect access to the idr tree.
117 */
118static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
119 int id)
120{
121 struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
122
123 if (IS_ERR(ipcp))
124 return (struct shmid_kernel *)ipcp;
125
126 return container_of(ipcp, struct shmid_kernel, shm_perm);
127}
128
129/*
130 * shm_lock_(check_) routines are called in the paths where the rw_mutex 115 * shm_lock_(check_) routines are called in the paths where the rw_mutex
131 * is not held. 116 * is not necessarily held.
132 */ 117 */
133static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 118static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
134{ 119{
@@ -211,7 +196,7 @@ static void shm_close(struct vm_area_struct *vma)
211 196
212 down_write(&shm_ids(ns).rw_mutex); 197 down_write(&shm_ids(ns).rw_mutex);
213 /* remove from the list of attaches of the shm segment */ 198 /* remove from the list of attaches of the shm segment */
214 shp = shm_lock_down(ns, sfd->id); 199 shp = shm_lock(ns, sfd->id);
215 BUG_ON(IS_ERR(shp)); 200 BUG_ON(IS_ERR(shp));
216 shp->shm_lprid = task_tgid_vnr(current); 201 shp->shm_lprid = task_tgid_vnr(current);
217 shp->shm_dtim = get_seconds(); 202 shp->shm_dtim = get_seconds();
@@ -932,7 +917,7 @@ invalid:
932 917
933out_nattch: 918out_nattch:
934 down_write(&shm_ids(ns).rw_mutex); 919 down_write(&shm_ids(ns).rw_mutex);
935 shp = shm_lock_down(ns, shmid); 920 shp = shm_lock(ns, shmid);
936 BUG_ON(IS_ERR(shp)); 921 BUG_ON(IS_ERR(shp));
937 shp->shm_nattch--; 922 shp->shm_nattch--;
938 if(shp->shm_nattch == 0 && 923 if(shp->shm_nattch == 0 &&
diff --git a/ipc/util.c b/ipc/util.c
index 0f468c34e83c..49b3ea615dc5 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -716,56 +716,6 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
716 return out; 716 return out;
717} 717}
718 718
719/**
720 * ipc_lock_down - Lock an ipc structure with rw_sem held
721 * @ids: IPC identifier set
722 * @id: ipc id to look for
723 *
724 * Look for an id in the ipc ids idr and lock the associated ipc object.
725 *
726 * The ipc object is locked on exit.
727 *
728 * This is the routine that should be called when the rw_mutex is already
729 * held, i.e. idr tree protected.
730 */
731
732struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id)
733{
734 struct kern_ipc_perm *out;
735 int lid = ipcid_to_idx(id);
736
737 rcu_read_lock();
738 out = idr_find(&ids->ipcs_idr, lid);
739 if (out == NULL) {
740 rcu_read_unlock();
741 return ERR_PTR(-EINVAL);
742 }
743
744 spin_lock(&out->lock);
745
746 /*
747 * No need to verify that the structure is still valid since the
748 * rw_mutex is held.
749 */
750 return out;
751}
752
753struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id)
754{
755 struct kern_ipc_perm *out;
756
757 out = ipc_lock_down(ids, id);
758 if (IS_ERR(out))
759 return out;
760
761 if (ipc_checkid(out, id)) {
762 ipc_unlock(out);
763 return ERR_PTR(-EIDRM);
764 }
765
766 return out;
767}
768
769struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) 719struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
770{ 720{
771 struct kern_ipc_perm *out; 721 struct kern_ipc_perm *out;
@@ -837,7 +787,7 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd,
837 int err; 787 int err;
838 788
839 down_write(&ids->rw_mutex); 789 down_write(&ids->rw_mutex);
840 ipcp = ipc_lock_check_down(ids, id); 790 ipcp = ipc_lock_check(ids, id);
841 if (IS_ERR(ipcp)) { 791 if (IS_ERR(ipcp)) {
842 err = PTR_ERR(ipcp); 792 err = PTR_ERR(ipcp);
843 goto out_up; 793 goto out_up;
diff --git a/ipc/util.h b/ipc/util.h
index cdb966aebe07..3646b45a03c9 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -102,11 +102,6 @@ void* ipc_rcu_alloc(int size);
102void ipc_rcu_getref(void *ptr); 102void ipc_rcu_getref(void *ptr);
103void ipc_rcu_putref(void *ptr); 103void ipc_rcu_putref(void *ptr);
104 104
105/*
106 * ipc_lock_down: called with rw_mutex held
107 * ipc_lock: called without that lock held
108 */
109struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *, int);
110struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); 105struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
111 106
112void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); 107void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
@@ -155,7 +150,6 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm)
155 rcu_read_unlock(); 150 rcu_read_unlock();
156} 151}
157 152
158struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id);
159struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id); 153struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
160int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, 154int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
161 struct ipc_ops *ops, struct ipc_params *params); 155 struct ipc_ops *ops, struct ipc_params *params);