aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c12
-rw-r--r--kernel/user.c7
4 files changed, 14 insertions, 9 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index fce53d8df8a7..cf19547cc9e4 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -413,7 +413,7 @@ static u32 encode_float(u64 value)
413 * The acct_process() call is the workhorse of the process 413 * The acct_process() call is the workhorse of the process
414 * accounting system. The struct acct is built here and then written 414 * accounting system. The struct acct is built here and then written
415 * into the accounting file. This function should only be called from 415 * into the accounting file. This function should only be called from
416 * do_exit(). 416 * do_exit() or when switching to a different output file.
417 */ 417 */
418 418
419/* 419/*
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ca198a797bfa..5d0d623a5465 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -199,7 +199,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
199 u64 now = ktime_to_ns(ktime_get()); 199 u64 now = ktime_to_ns(ktime_get());
200 int cpu; 200 int cpu;
201 201
202 SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n", 202 SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
203 init_utsname()->release, 203 init_utsname()->release,
204 (int)strcspn(init_utsname()->version, " "), 204 (int)strcspn(init_utsname()->version, " "),
205 init_utsname()->version); 205 init_utsname()->version);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ee00da284b12..2f16e15c022c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -22,7 +22,7 @@
22 22
23/* 23/*
24 * Targeted preemption latency for CPU-bound tasks: 24 * Targeted preemption latency for CPU-bound tasks:
25 * (default: 20ms * ilog(ncpus), units: nanoseconds) 25 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
26 * 26 *
27 * NOTE: this latency value is not the same as the concept of 27 * NOTE: this latency value is not the same as the concept of
28 * 'timeslice length' - timeslices in CFS are of variable length 28 * 'timeslice length' - timeslices in CFS are of variable length
@@ -36,14 +36,14 @@ unsigned int sysctl_sched_latency = 20000000ULL;
36 36
37/* 37/*
38 * Minimal preemption granularity for CPU-bound tasks: 38 * Minimal preemption granularity for CPU-bound tasks:
39 * (default: 1 msec * ilog(ncpus), units: nanoseconds) 39 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
40 */ 40 */
41unsigned int sysctl_sched_min_granularity = 1000000ULL; 41unsigned int sysctl_sched_min_granularity = 4000000ULL;
42 42
43/* 43/*
44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
45 */ 45 */
46static unsigned int sched_nr_latency = 20; 46static unsigned int sched_nr_latency = 5;
47 47
48/* 48/*
49 * After fork, child runs first. (default) If set to 0 then 49 * After fork, child runs first. (default) If set to 0 then
@@ -61,7 +61,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
61 61
62/* 62/*
63 * SCHED_BATCH wake-up granularity. 63 * SCHED_BATCH wake-up granularity.
64 * (default: 10 msec * ilog(ncpus), units: nanoseconds) 64 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
65 * 65 *
66 * This option delays the preemption effects of decoupled workloads 66 * This option delays the preemption effects of decoupled workloads
67 * and reduces their over-scheduling. Synchronous workloads will still 67 * and reduces their over-scheduling. Synchronous workloads will still
@@ -71,7 +71,7 @@ unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
71 71
72/* 72/*
73 * SCHED_OTHER wake-up granularity. 73 * SCHED_OTHER wake-up granularity.
74 * (default: 10 msec * ilog(ncpus), units: nanoseconds) 74 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
75 * 75 *
76 * This option delays the preemption effects of decoupled workloads 76 * This option delays the preemption effects of decoupled workloads
77 * and reduces their over-scheduling. Synchronous workloads will still 77 * and reduces their over-scheduling. Synchronous workloads will still
diff --git a/kernel/user.c b/kernel/user.c
index 0f3aa0234107..8320a87f3e5a 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -337,8 +337,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
337 struct user_struct *new; 337 struct user_struct *new;
338 338
339 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); 339 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
340 if (!new) 340 if (!new) {
341 uids_mutex_unlock();
341 return NULL; 342 return NULL;
343 }
344
342 new->uid = uid; 345 new->uid = uid;
343 atomic_set(&new->__count, 1); 346 atomic_set(&new->__count, 1);
344 atomic_set(&new->processes, 0); 347 atomic_set(&new->processes, 0);
@@ -355,6 +358,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
355 358
356 if (alloc_uid_keyring(new, current) < 0) { 359 if (alloc_uid_keyring(new, current) < 0) {
357 kmem_cache_free(uid_cachep, new); 360 kmem_cache_free(uid_cachep, new);
361 uids_mutex_unlock();
358 return NULL; 362 return NULL;
359 } 363 }
360 364
@@ -362,6 +366,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
362 key_put(new->uid_keyring); 366 key_put(new->uid_keyring);
363 key_put(new->session_keyring); 367 key_put(new->session_keyring);
364 kmem_cache_free(uid_cachep, new); 368 kmem_cache_free(uid_cachep, new);
369 uids_mutex_unlock();
365 return NULL; 370 return NULL;
366 } 371 }
367 372