diff options
| author | Manfred Spraul <manfred@colorfullife.com> | 2018-08-22 01:01:25 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-22 13:52:51 -0400 |
| commit | e2652ae6bd7492cdc0436817cbcd09282eb0bb03 (patch) | |
| tree | 3736e02fc0a194548e10bf40680b22d737f6b320 | |
| parent | 615c999cd8a07b7c3c93bbdee89ef705d2ce52e1 (diff) | |
ipc: reorganize initialization of kern_ipc_perm.seq
ipc_addid() initializes kern_ipc_perm.seq after having called idr_alloc()
(within ipc_idr_alloc()).
Thus a parallel semop() or msgrcv() that uses ipc_obtain_object_check()
may see an uninitialized value.
The patch moves the initialization of kern_ipc_perm.seq before the calls
of idr_alloc().
Notes:
1) This patch has a user space visible side effect:
If /proc/sys/kernel/*_next_id is used (i.e.: checkpoint/restore) and
if semget()/msgget()/shmget() fails in the final step of adding the id
to the rhash tree, then .._next_id is cleared. Before the patch, is
remained unmodified.
There is no change of the behavior after a successful ..get() call: It
always clears .._next_id, there is no impact to non checkpoint/restore
code as that code does not use .._next_id.
2) The patch correctly documents that after a call to ipc_idr_alloc(),
the full tear-down sequence must be used. The callers of ipc_addid()
do not fullfill that, i.e. more bugfixes are required.
The patch is a squash of a patch from Dmitry and my own changes.
Link: http://lkml.kernel.org/r/20180712185241.4017-3-manfred@colorfullife.com
Reported-by: syzbot+2827ef6b3385deb07eaf@syzkaller.appspotmail.com
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | Documentation/sysctl/kernel.txt | 3 | ||||
| -rw-r--r-- | ipc/util.c | 91 |
2 files changed, 50 insertions, 44 deletions
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 9d38e75b19a0..37a679501ddc 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
| @@ -464,7 +464,8 @@ Notes: | |||
| 464 | 1) kernel doesn't guarantee, that new object will have desired id. So, | 464 | 1) kernel doesn't guarantee, that new object will have desired id. So, |
| 465 | it's up to userspace, how to handle an object with "wrong" id. | 465 | it's up to userspace, how to handle an object with "wrong" id. |
| 466 | 2) Toggle with non-default value will be set back to -1 by kernel after | 466 | 2) Toggle with non-default value will be set back to -1 by kernel after |
| 467 | successful IPC object allocation. | 467 | successful IPC object allocation. If an IPC object allocation syscall |
| 468 | fails, it is undefined if the value remains unmodified or is reset to -1. | ||
| 468 | 469 | ||
| 469 | ============================================================== | 470 | ============================================================== |
| 470 | 471 | ||
diff --git a/ipc/util.c b/ipc/util.c index fdffff41f65b..e5c9e2b2e4c4 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
| @@ -194,46 +194,54 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) | |||
| 194 | return NULL; | 194 | return NULL; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
| 198 | /* | 197 | /* |
| 199 | * Specify desired id for next allocated IPC object. | 198 | * Insert new IPC object into idr tree, and set sequence number and id |
| 199 | * in the correct order. | ||
| 200 | * Especially: | ||
| 201 | * - the sequence number must be set before inserting the object into the idr, | ||
| 202 | * because the sequence number is accessed without a lock. | ||
| 203 | * - the id can/must be set after inserting the object into the idr. | ||
| 204 | * All accesses must be done after getting kern_ipc_perm.lock. | ||
| 205 | * | ||
| 206 | * The caller must own kern_ipc_perm.lock.of the new object. | ||
| 207 | * On error, the function returns a (negative) error code. | ||
| 200 | */ | 208 | */ |
| 201 | #define ipc_idr_alloc(ids, new) \ | 209 | static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new) |
| 202 | idr_alloc(&(ids)->ipcs_idr, (new), \ | ||
| 203 | (ids)->next_id < 0 ? 0 : ipcid_to_idx((ids)->next_id),\ | ||
| 204 | 0, GFP_NOWAIT) | ||
| 205 | |||
| 206 | static inline int ipc_buildid(int id, struct ipc_ids *ids, | ||
| 207 | struct kern_ipc_perm *new) | ||
| 208 | { | 210 | { |
| 209 | if (ids->next_id < 0) { /* default, behave as !CHECKPOINT_RESTORE */ | 211 | int idx, next_id = -1; |
| 212 | |||
| 213 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
| 214 | next_id = ids->next_id; | ||
| 215 | ids->next_id = -1; | ||
| 216 | #endif | ||
| 217 | |||
| 218 | /* | ||
| 219 | * As soon as a new object is inserted into the idr, | ||
| 220 | * ipc_obtain_object_idr() or ipc_obtain_object_check() can find it, | ||
| 221 | * and the lockless preparations for ipc operations can start. | ||
| 222 | * This means especially: permission checks, audit calls, allocation | ||
| 223 | * of undo structures, ... | ||
| 224 | * | ||
| 225 | * Thus the object must be fully initialized, and if something fails, | ||
| 226 | * then the full tear-down sequence must be followed. | ||
| 227 | * (i.e.: set new->deleted, reduce refcount, call_rcu()) | ||
| 228 | */ | ||
| 229 | |||
| 230 | if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */ | ||
| 210 | new->seq = ids->seq++; | 231 | new->seq = ids->seq++; |
| 211 | if (ids->seq > IPCID_SEQ_MAX) | 232 | if (ids->seq > IPCID_SEQ_MAX) |
| 212 | ids->seq = 0; | 233 | ids->seq = 0; |
| 234 | idx = idr_alloc(&ids->ipcs_idr, new, 0, 0, GFP_NOWAIT); | ||
| 213 | } else { | 235 | } else { |
| 214 | new->seq = ipcid_to_seqx(ids->next_id); | 236 | new->seq = ipcid_to_seqx(next_id); |
| 215 | ids->next_id = -1; | 237 | idx = idr_alloc(&ids->ipcs_idr, new, ipcid_to_idx(next_id), |
| 238 | 0, GFP_NOWAIT); | ||
| 216 | } | 239 | } |
| 217 | 240 | if (idx >= 0) | |
| 218 | return SEQ_MULTIPLIER * new->seq + id; | 241 | new->id = SEQ_MULTIPLIER * new->seq + idx; |
| 242 | return idx; | ||
| 219 | } | 243 | } |
| 220 | 244 | ||
| 221 | #else | ||
| 222 | #define ipc_idr_alloc(ids, new) \ | ||
| 223 | idr_alloc(&(ids)->ipcs_idr, (new), 0, 0, GFP_NOWAIT) | ||
| 224 | |||
| 225 | static inline int ipc_buildid(int id, struct ipc_ids *ids, | ||
| 226 | struct kern_ipc_perm *new) | ||
| 227 | { | ||
| 228 | new->seq = ids->seq++; | ||
| 229 | if (ids->seq > IPCID_SEQ_MAX) | ||
| 230 | ids->seq = 0; | ||
| 231 | |||
| 232 | return SEQ_MULTIPLIER * new->seq + id; | ||
| 233 | } | ||
| 234 | |||
| 235 | #endif /* CONFIG_CHECKPOINT_RESTORE */ | ||
| 236 | |||
| 237 | /** | 245 | /** |
| 238 | * ipc_addid - add an ipc identifier | 246 | * ipc_addid - add an ipc identifier |
| 239 | * @ids: ipc identifier set | 247 | * @ids: ipc identifier set |
| @@ -251,7 +259,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit) | |||
| 251 | { | 259 | { |
| 252 | kuid_t euid; | 260 | kuid_t euid; |
| 253 | kgid_t egid; | 261 | kgid_t egid; |
| 254 | int id, err; | 262 | int idx, err; |
| 255 | 263 | ||
| 256 | if (limit > IPCMNI) | 264 | if (limit > IPCMNI) |
| 257 | limit = IPCMNI; | 265 | limit = IPCMNI; |
| @@ -271,30 +279,27 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit) | |||
| 271 | new->cuid = new->uid = euid; | 279 | new->cuid = new->uid = euid; |
| 272 | new->gid = new->cgid = egid; | 280 | new->gid = new->cgid = egid; |
| 273 | 281 | ||
| 274 | id = ipc_idr_alloc(ids, new); | 282 | idx = ipc_idr_alloc(ids, new); |
| 275 | idr_preload_end(); | 283 | idr_preload_end(); |
| 276 | 284 | ||
| 277 | if (id >= 0 && new->key != IPC_PRIVATE) { | 285 | if (idx >= 0 && new->key != IPC_PRIVATE) { |
| 278 | err = rhashtable_insert_fast(&ids->key_ht, &new->khtnode, | 286 | err = rhashtable_insert_fast(&ids->key_ht, &new->khtnode, |
| 279 | ipc_kht_params); | 287 | ipc_kht_params); |
| 280 | if (err < 0) { | 288 | if (err < 0) { |
| 281 | idr_remove(&ids->ipcs_idr, id); | 289 | idr_remove(&ids->ipcs_idr, idx); |
| 282 | id = err; | 290 | idx = err; |
| 283 | } | 291 | } |
| 284 | } | 292 | } |
| 285 | if (id < 0) { | 293 | if (idx < 0) { |
| 286 | spin_unlock(&new->lock); | 294 | spin_unlock(&new->lock); |
| 287 | rcu_read_unlock(); | 295 | rcu_read_unlock(); |
| 288 | return id; | 296 | return idx; |
| 289 | } | 297 | } |
| 290 | 298 | ||
| 291 | ids->in_use++; | 299 | ids->in_use++; |
| 292 | if (id > ids->max_id) | 300 | if (idx > ids->max_id) |
| 293 | ids->max_id = id; | 301 | ids->max_id = idx; |
| 294 | 302 | return idx; | |
| 295 | new->id = ipc_buildid(id, ids, new); | ||
| 296 | |||
| 297 | return id; | ||
| 298 | } | 303 | } |
| 299 | 304 | ||
| 300 | /** | 305 | /** |
