summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorManfred Spraul <manfred@colorfullife.com>2019-05-14 18:46:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 22:52:52 -0400
commit99db46ea292780cd978d56932d9445b1e8bdafe8 (patch)
treea61a2121c48635478afb64875f8d8170c36bdfc0 /ipc
parent3278a2c20cb302d27e6f6ee45a3f57361176e426 (diff)
ipc: do cyclic id allocation for the ipc object.
For ipcmni_extend mode, the sequence number space is only 7 bits. So the chance of id reuse is relatively high compared with the non-extended mode. To alleviate this id reuse problem, this patch enables cyclic allocation for the index to the radix tree (idx). The disadvantage is that this can cause a slight slow-down of the fast path, as the radix tree could be higher than necessary. To limit the radix tree height, I have chosen the following limits: 1) The cycling is done over in_use*1.5. 2) At least, the cycling is done over "normal" ipcnmi mode: RADIX_TREE_MAP_SIZE elements "ipcmni_extended": 4096 elements Result: - for normal mode: No change for <= 42 active ipc elements. With more than 42 active ipc elements, a 2nd level would be added to the radix tree. Without cyclic allocation, a 2nd level would be added only with more than 63 active elements. - for extended mode: Cycling creates always at least a 2-level radix tree. With more than 2730 active objects, a 3rd level would be added, instead of > 4095 active objects until the 3rd level is added without cyclic allocation. For a 2-level radix tree compared to a 1-level radix tree, I have observed < 1% performance impact. Notes: 1) Normal "x=semget();y=semget();" is unaffected: Then the idx is e.g. a and a+1, regardless if idr_alloc() or idr_alloc_cyclic() is used. 2) The -1% happens in a microbenchmark after this situation: x=semget(); for(i=0;i<4000;i++) {t=semget();semctl(t,0,IPC_RMID);} y=semget(); Now perform semget calls on x and y that do not sleep. 3) The worst-case reuse cycle time is unfortunately unaffected: If you have 2^24-1 ipc objects allocated, and get/remove the last possible element in a loop, then the id is reused after 128 get/remove pairs. Performance check: A microbenchmark that performes no-op semop() randomly on two IDs, with only these two IDs allocated. The IDs were set using /proc/sys/kernel/sem_next_id. The test was run 5 times, averages are shown. 1 & 2: Base (6.22 seconds for 10.000.000 semops) 1 & 40: -0.2% 1 & 3348: - 0.8% 1 & 27348: - 1.6% 1 & 15777204: - 3.2% Or: ~12.6 cpu cycles per additional radix tree level. The cpu is an Intel I3-5010U. ~1300 cpu cycles/syscall is slower than what I remember (spectre impact?). V2 of the patch: - use "min" and "max" - use RADIX_TREE_MAP_SIZE * RADIX_TREE_MAP_SIZE instead of (2<<12). [akpm@linux-foundation.org: fix max() warning] Link: http://lkml.kernel.org/r/20190329204930.21620-3-longman@redhat.com Signed-off-by: Manfred Spraul <manfred@colorfullife.com> Acked-by: Waiman Long <longman@redhat.com> Cc: "Luis R. Rodriguez" <mcgrof@kernel.org> Cc: Kees Cook <keescook@chromium.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Matthew Wilcox <willy@infradead.org> Cc: "Eric W . Biederman" <ebiederm@xmission.com> Cc: Takashi Iwai <tiwai@suse.de> Cc: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc')
-rw-r--r--ipc/ipc_sysctl.c2
-rw-r--r--ipc/util.c7
-rw-r--r--ipc/util.h3
3 files changed, 11 insertions, 1 deletions
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 73b7782eccf4..bfaae457810c 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -122,6 +122,7 @@ static int one = 1;
122static int int_max = INT_MAX; 122static int int_max = INT_MAX;
123int ipc_mni = IPCMNI; 123int ipc_mni = IPCMNI;
124int ipc_mni_shift = IPCMNI_SHIFT; 124int ipc_mni_shift = IPCMNI_SHIFT;
125int ipc_min_cycle = RADIX_TREE_MAP_SIZE;
125 126
126static struct ctl_table ipc_kern_table[] = { 127static struct ctl_table ipc_kern_table[] = {
127 { 128 {
@@ -252,6 +253,7 @@ static int __init ipc_mni_extend(char *str)
252{ 253{
253 ipc_mni = IPCMNI_EXTEND; 254 ipc_mni = IPCMNI_EXTEND;
254 ipc_mni_shift = IPCMNI_EXTEND_SHIFT; 255 ipc_mni_shift = IPCMNI_EXTEND_SHIFT;
256 ipc_min_cycle = IPCMNI_EXTEND_MIN_CYCLE;
255 pr_info("IPCMNI extended to %d.\n", ipc_mni); 257 pr_info("IPCMNI extended to %d.\n", ipc_mni);
256 return 0; 258 return 0;
257} 259}
diff --git a/ipc/util.c b/ipc/util.c
index 71f3f3982fc8..d126d156efc6 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -220,9 +220,14 @@ static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new)
220 */ 220 */
221 221
222 if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */ 222 if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */
223 int max_idx;
224
225 max_idx = max(ids->in_use*3/2, ipc_min_cycle);
226 max_idx = min(max_idx, ipc_mni);
223 227
224 /* allocate the idx, with a NULL struct kern_ipc_perm */ 228 /* allocate the idx, with a NULL struct kern_ipc_perm */
225 idx = idr_alloc(&ids->ipcs_idr, NULL, 0, 0, GFP_NOWAIT); 229 idx = idr_alloc_cyclic(&ids->ipcs_idr, NULL, 0, max_idx,
230 GFP_NOWAIT);
226 231
227 if (idx >= 0) { 232 if (idx >= 0) {
228 /* 233 /*
diff --git a/ipc/util.h b/ipc/util.h
index 8c834ed39012..0fcf8e719b76 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -27,12 +27,14 @@
27 */ 27 */
28#define IPCMNI_SHIFT 15 28#define IPCMNI_SHIFT 15
29#define IPCMNI_EXTEND_SHIFT 24 29#define IPCMNI_EXTEND_SHIFT 24
30#define IPCMNI_EXTEND_MIN_CYCLE (RADIX_TREE_MAP_SIZE * RADIX_TREE_MAP_SIZE)
30#define IPCMNI (1 << IPCMNI_SHIFT) 31#define IPCMNI (1 << IPCMNI_SHIFT)
31#define IPCMNI_EXTEND (1 << IPCMNI_EXTEND_SHIFT) 32#define IPCMNI_EXTEND (1 << IPCMNI_EXTEND_SHIFT)
32 33
33#ifdef CONFIG_SYSVIPC_SYSCTL 34#ifdef CONFIG_SYSVIPC_SYSCTL
34extern int ipc_mni; 35extern int ipc_mni;
35extern int ipc_mni_shift; 36extern int ipc_mni_shift;
37extern int ipc_min_cycle;
36 38
37#define ipcmni_seq_shift() ipc_mni_shift 39#define ipcmni_seq_shift() ipc_mni_shift
38#define IPCMNI_IDX_MASK ((1 << ipc_mni_shift) - 1) 40#define IPCMNI_IDX_MASK ((1 << ipc_mni_shift) - 1)
@@ -40,6 +42,7 @@ extern int ipc_mni_shift;
40#else /* CONFIG_SYSVIPC_SYSCTL */ 42#else /* CONFIG_SYSVIPC_SYSCTL */
41 43
42#define ipc_mni IPCMNI 44#define ipc_mni IPCMNI
45#define ipc_min_cycle ((int)RADIX_TREE_MAP_SIZE)
43#define ipcmni_seq_shift() IPCMNI_SHIFT 46#define ipcmni_seq_shift() IPCMNI_SHIFT
44#define IPCMNI_IDX_MASK ((1 << IPCMNI_SHIFT) - 1) 47#define IPCMNI_IDX_MASK ((1 << IPCMNI_SHIFT) - 1)
45#endif /* CONFIG_SYSVIPC_SYSCTL */ 48#endif /* CONFIG_SYSVIPC_SYSCTL */