diff options
Diffstat (limited to 'ipc/util.c')
| -rw-r--r-- | ipc/util.c | 91 |
1 files changed, 48 insertions, 43 deletions
diff --git a/ipc/util.c b/ipc/util.c index fdffff41f65b..e5c9e2b2e4c4 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
| @@ -194,46 +194,54 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) | |||
| 194 | return NULL; | 194 | return NULL; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
| 198 | /* | 197 | /* |
| 199 | * Specify desired id for next allocated IPC object. | 198 | * Insert new IPC object into idr tree, and set sequence number and id |
| 199 | * in the correct order. | ||
| 200 | * Especially: | ||
| 201 | * - the sequence number must be set before inserting the object into the idr, | ||
| 202 | * because the sequence number is accessed without a lock. | ||
| 203 | * - the id can/must be set after inserting the object into the idr. | ||
| 204 | * All accesses must be done after getting kern_ipc_perm.lock. | ||
| 205 | * | ||
| 206 | * The caller must own kern_ipc_perm.lock.of the new object. | ||
| 207 | * On error, the function returns a (negative) error code. | ||
| 200 | */ | 208 | */ |
| 201 | #define ipc_idr_alloc(ids, new) \ | 209 | static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new) |
| 202 | idr_alloc(&(ids)->ipcs_idr, (new), \ | ||
| 203 | (ids)->next_id < 0 ? 0 : ipcid_to_idx((ids)->next_id),\ | ||
| 204 | 0, GFP_NOWAIT) | ||
| 205 | |||
| 206 | static inline int ipc_buildid(int id, struct ipc_ids *ids, | ||
| 207 | struct kern_ipc_perm *new) | ||
| 208 | { | 210 | { |
| 209 | if (ids->next_id < 0) { /* default, behave as !CHECKPOINT_RESTORE */ | 211 | int idx, next_id = -1; |
| 212 | |||
| 213 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
| 214 | next_id = ids->next_id; | ||
| 215 | ids->next_id = -1; | ||
| 216 | #endif | ||
| 217 | |||
| 218 | /* | ||
| 219 | * As soon as a new object is inserted into the idr, | ||
| 220 | * ipc_obtain_object_idr() or ipc_obtain_object_check() can find it, | ||
| 221 | * and the lockless preparations for ipc operations can start. | ||
| 222 | * This means especially: permission checks, audit calls, allocation | ||
| 223 | * of undo structures, ... | ||
| 224 | * | ||
| 225 | * Thus the object must be fully initialized, and if something fails, | ||
| 226 | * then the full tear-down sequence must be followed. | ||
| 227 | * (i.e.: set new->deleted, reduce refcount, call_rcu()) | ||
| 228 | */ | ||
| 229 | |||
| 230 | if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */ | ||
| 210 | new->seq = ids->seq++; | 231 | new->seq = ids->seq++; |
| 211 | if (ids->seq > IPCID_SEQ_MAX) | 232 | if (ids->seq > IPCID_SEQ_MAX) |
| 212 | ids->seq = 0; | 233 | ids->seq = 0; |
| 234 | idx = idr_alloc(&ids->ipcs_idr, new, 0, 0, GFP_NOWAIT); | ||
| 213 | } else { | 235 | } else { |
| 214 | new->seq = ipcid_to_seqx(ids->next_id); | 236 | new->seq = ipcid_to_seqx(next_id); |
| 215 | ids->next_id = -1; | 237 | idx = idr_alloc(&ids->ipcs_idr, new, ipcid_to_idx(next_id), |
| 238 | 0, GFP_NOWAIT); | ||
| 216 | } | 239 | } |
| 217 | 240 | if (idx >= 0) | |
| 218 | return SEQ_MULTIPLIER * new->seq + id; | 241 | new->id = SEQ_MULTIPLIER * new->seq + idx; |
| 242 | return idx; | ||
| 219 | } | 243 | } |
| 220 | 244 | ||
| 221 | #else | ||
| 222 | #define ipc_idr_alloc(ids, new) \ | ||
| 223 | idr_alloc(&(ids)->ipcs_idr, (new), 0, 0, GFP_NOWAIT) | ||
| 224 | |||
| 225 | static inline int ipc_buildid(int id, struct ipc_ids *ids, | ||
| 226 | struct kern_ipc_perm *new) | ||
| 227 | { | ||
| 228 | new->seq = ids->seq++; | ||
| 229 | if (ids->seq > IPCID_SEQ_MAX) | ||
| 230 | ids->seq = 0; | ||
| 231 | |||
| 232 | return SEQ_MULTIPLIER * new->seq + id; | ||
| 233 | } | ||
| 234 | |||
| 235 | #endif /* CONFIG_CHECKPOINT_RESTORE */ | ||
| 236 | |||
| 237 | /** | 245 | /** |
| 238 | * ipc_addid - add an ipc identifier | 246 | * ipc_addid - add an ipc identifier |
| 239 | * @ids: ipc identifier set | 247 | * @ids: ipc identifier set |
| @@ -251,7 +259,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit) | |||
| 251 | { | 259 | { |
| 252 | kuid_t euid; | 260 | kuid_t euid; |
| 253 | kgid_t egid; | 261 | kgid_t egid; |
| 254 | int id, err; | 262 | int idx, err; |
| 255 | 263 | ||
| 256 | if (limit > IPCMNI) | 264 | if (limit > IPCMNI) |
| 257 | limit = IPCMNI; | 265 | limit = IPCMNI; |
| @@ -271,30 +279,27 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit) | |||
| 271 | new->cuid = new->uid = euid; | 279 | new->cuid = new->uid = euid; |
| 272 | new->gid = new->cgid = egid; | 280 | new->gid = new->cgid = egid; |
| 273 | 281 | ||
| 274 | id = ipc_idr_alloc(ids, new); | 282 | idx = ipc_idr_alloc(ids, new); |
| 275 | idr_preload_end(); | 283 | idr_preload_end(); |
| 276 | 284 | ||
| 277 | if (id >= 0 && new->key != IPC_PRIVATE) { | 285 | if (idx >= 0 && new->key != IPC_PRIVATE) { |
| 278 | err = rhashtable_insert_fast(&ids->key_ht, &new->khtnode, | 286 | err = rhashtable_insert_fast(&ids->key_ht, &new->khtnode, |
| 279 | ipc_kht_params); | 287 | ipc_kht_params); |
| 280 | if (err < 0) { | 288 | if (err < 0) { |
| 281 | idr_remove(&ids->ipcs_idr, id); | 289 | idr_remove(&ids->ipcs_idr, idx); |
| 282 | id = err; | 290 | idx = err; |
| 283 | } | 291 | } |
| 284 | } | 292 | } |
| 285 | if (id < 0) { | 293 | if (idx < 0) { |
| 286 | spin_unlock(&new->lock); | 294 | spin_unlock(&new->lock); |
| 287 | rcu_read_unlock(); | 295 | rcu_read_unlock(); |
| 288 | return id; | 296 | return idx; |
| 289 | } | 297 | } |
| 290 | 298 | ||
| 291 | ids->in_use++; | 299 | ids->in_use++; |
| 292 | if (id > ids->max_id) | 300 | if (idx > ids->max_id) |
| 293 | ids->max_id = id; | 301 | ids->max_id = idx; |
| 294 | 302 | return idx; | |
| 295 | new->id = ipc_buildid(id, ids, new); | ||
| 296 | |||
| 297 | return id; | ||
| 298 | } | 303 | } |
| 299 | 304 | ||
| 300 | /** | 305 | /** |
