aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/exit.c11
-rw-r--r--kernel/kallsyms.c7
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sched_debug.c10
-rw-r--r--kernel/sched_fair.c12
-rw-r--r--kernel/sched_stats.h3
-rw-r--r--kernel/sysctl_check.c45
-rw-r--r--kernel/time/ntp.c9
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/user.c7
-rw-r--r--kernel/utsname_sysctl.c4
13 files changed, 50 insertions, 75 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index fce53d8df8a7..cf19547cc9e4 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -413,7 +413,7 @@ static u32 encode_float(u64 value)
413 * The acct_process() call is the workhorse of the process 413 * The acct_process() call is the workhorse of the process
414 * accounting system. The struct acct is built here and then written 414 * accounting system. The struct acct is built here and then written
415 * into the accounting file. This function should only be called from 415 * into the accounting file. This function should only be called from
416 * do_exit(). 416 * do_exit() or when switching to a different output file.
417 */ 417 */
418 418
419/* 419/*
diff --git a/kernel/exit.c b/kernel/exit.c
index cd0f1d4137a7..549c0558ba68 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1357,7 +1357,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1357 int __user *stat_addr, struct rusage __user *ru) 1357 int __user *stat_addr, struct rusage __user *ru)
1358{ 1358{
1359 int retval, exit_code; 1359 int retval, exit_code;
1360 struct pid_namespace *ns; 1360 pid_t pid;
1361 1361
1362 if (!p->exit_code) 1362 if (!p->exit_code)
1363 return 0; 1363 return 0;
@@ -1376,12 +1376,11 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1376 * keep holding onto the tasklist_lock while we call getrusage and 1376 * keep holding onto the tasklist_lock while we call getrusage and
1377 * possibly take page faults for user memory. 1377 * possibly take page faults for user memory.
1378 */ 1378 */
1379 ns = current->nsproxy->pid_ns; 1379 pid = task_pid_nr_ns(p, current->nsproxy->pid_ns);
1380 get_task_struct(p); 1380 get_task_struct(p);
1381 read_unlock(&tasklist_lock); 1381 read_unlock(&tasklist_lock);
1382 1382
1383 if (unlikely(noreap)) { 1383 if (unlikely(noreap)) {
1384 pid_t pid = task_pid_nr_ns(p, ns);
1385 uid_t uid = p->uid; 1384 uid_t uid = p->uid;
1386 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; 1385 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1387 1386
@@ -1389,7 +1388,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1389 if (unlikely(!exit_code) || unlikely(p->exit_state)) 1388 if (unlikely(!exit_code) || unlikely(p->exit_state))
1390 goto bail_ref; 1389 goto bail_ref;
1391 return wait_noreap_copyout(p, pid, uid, 1390 return wait_noreap_copyout(p, pid, uid,
1392 why, (exit_code << 8) | 0x7f, 1391 why, exit_code,
1393 infop, ru); 1392 infop, ru);
1394 } 1393 }
1395 1394
@@ -1451,11 +1450,11 @@ bail_ref:
1451 if (!retval && infop) 1450 if (!retval && infop)
1452 retval = put_user(exit_code, &infop->si_status); 1451 retval = put_user(exit_code, &infop->si_status);
1453 if (!retval && infop) 1452 if (!retval && infop)
1454 retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid); 1453 retval = put_user(pid, &infop->si_pid);
1455 if (!retval && infop) 1454 if (!retval && infop)
1456 retval = put_user(p->uid, &infop->si_uid); 1455 retval = put_user(p->uid, &infop->si_uid);
1457 if (!retval) 1456 if (!retval)
1458 retval = task_pid_nr_ns(p, ns); 1457 retval = pid;
1459 put_task_struct(p); 1458 put_task_struct(p);
1460 1459
1461 BUG_ON(!retval); 1460 BUG_ON(!retval);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 474219a41929..2fc25810509e 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -32,9 +32,14 @@
32 32
33/* These will be re-linked against their real values during the second link stage */ 33/* These will be re-linked against their real values during the second link stage */
34extern const unsigned long kallsyms_addresses[] __attribute__((weak)); 34extern const unsigned long kallsyms_addresses[] __attribute__((weak));
35extern const unsigned long kallsyms_num_syms __attribute__((weak));
36extern const u8 kallsyms_names[] __attribute__((weak)); 35extern const u8 kallsyms_names[] __attribute__((weak));
37 36
37/* tell the compiler that the count isn't in the small data section if the arch
38 * has one (eg: FRV)
39 */
40extern const unsigned long kallsyms_num_syms
41__attribute__((weak, section(".rodata")));
42
38extern const u8 kallsyms_token_table[] __attribute__((weak)); 43extern const u8 kallsyms_token_table[] __attribute__((weak));
39extern const u16 kallsyms_token_index[] __attribute__((weak)); 44extern const u16 kallsyms_token_index[] __attribute__((weak));
40 45
diff --git a/kernel/module.c b/kernel/module.c
index 3202c9950073..91fe6958b6e1 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -81,7 +81,8 @@ int unregister_module_notifier(struct notifier_block * nb)
81} 81}
82EXPORT_SYMBOL(unregister_module_notifier); 82EXPORT_SYMBOL(unregister_module_notifier);
83 83
84/* We require a truly strong try_module_get() */ 84/* We require a truly strong try_module_get(): 0 means failure due to
85 ongoing or failed initialization etc. */
85static inline int strong_try_module_get(struct module *mod) 86static inline int strong_try_module_get(struct module *mod)
86{ 87{
87 if (mod && mod->state == MODULE_STATE_COMING) 88 if (mod && mod->state == MODULE_STATE_COMING)
@@ -952,7 +953,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
952 ret = __find_symbol(name, &owner, &crc, 953 ret = __find_symbol(name, &owner, &crc,
953 !(mod->taints & TAINT_PROPRIETARY_MODULE)); 954 !(mod->taints & TAINT_PROPRIETARY_MODULE));
954 if (ret) { 955 if (ret) {
955 /* use_module can fail due to OOM, or module unloading */ 956 /* use_module can fail due to OOM,
957 or module initialization or unloading */
956 if (!check_version(sechdrs, versindex, name, mod, crc) || 958 if (!check_version(sechdrs, versindex, name, mod, crc) ||
957 !use_module(mod, owner)) 959 !use_module(mod, owner))
958 ret = 0; 960 ret = 0;
@@ -1369,7 +1371,7 @@ dup:
1369 return ret; 1371 return ret;
1370} 1372}
1371 1373
1372/* Change all symbols so that sh_value encodes the pointer directly. */ 1374/* Change all symbols so that st_value encodes the pointer directly. */
1373static int simplify_symbols(Elf_Shdr *sechdrs, 1375static int simplify_symbols(Elf_Shdr *sechdrs,
1374 unsigned int symindex, 1376 unsigned int symindex,
1375 const char *strtab, 1377 const char *strtab,
diff --git a/kernel/sched.c b/kernel/sched.c
index 38933cafea8a..98dcdf272db3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5466,7 +5466,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
5466 return table; 5466 return table;
5467} 5467}
5468 5468
5469static ctl_table * sd_alloc_ctl_cpu_table(int cpu) 5469static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5470{ 5470{
5471 struct ctl_table *entry, *table; 5471 struct ctl_table *entry, *table;
5472 struct sched_domain *sd; 5472 struct sched_domain *sd;
@@ -6708,9 +6708,6 @@ void __init sched_init_smp(void)
6708 6708
6709int in_sched_functions(unsigned long addr) 6709int in_sched_functions(unsigned long addr)
6710{ 6710{
6711 /* Linker adds these: start and end of __sched functions */
6712 extern char __sched_text_start[], __sched_text_end[];
6713
6714 return in_lock_functions(addr) || 6711 return in_lock_functions(addr) ||
6715 (addr >= (unsigned long)__sched_text_start 6712 (addr >= (unsigned long)__sched_text_start
6716 && addr < (unsigned long)__sched_text_end); 6713 && addr < (unsigned long)__sched_text_end);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ca198a797bfa..d30467b47ddd 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -199,7 +199,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
199 u64 now = ktime_to_ns(ktime_get()); 199 u64 now = ktime_to_ns(ktime_get());
200 int cpu; 200 int cpu;
201 201
202 SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n", 202 SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
203 init_utsname()->release, 203 init_utsname()->release,
204 (int)strcspn(init_utsname()->version, " "), 204 (int)strcspn(init_utsname()->version, " "),
205 init_utsname()->version); 205 init_utsname()->version);
@@ -327,10 +327,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
327 avg_atom = -1LL; 327 avg_atom = -1LL;
328 328
329 avg_per_cpu = p->se.sum_exec_runtime; 329 avg_per_cpu = p->se.sum_exec_runtime;
330 if (p->se.nr_migrations) 330 if (p->se.nr_migrations) {
331 avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations); 331 avg_per_cpu = div64_64(avg_per_cpu,
332 else 332 p->se.nr_migrations);
333 } else {
333 avg_per_cpu = -1LL; 334 avg_per_cpu = -1LL;
335 }
334 336
335 __PN(avg_atom); 337 __PN(avg_atom);
336 __PN(avg_per_cpu); 338 __PN(avg_per_cpu);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ee00da284b12..2f16e15c022c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -22,7 +22,7 @@
22 22
23/* 23/*
24 * Targeted preemption latency for CPU-bound tasks: 24 * Targeted preemption latency for CPU-bound tasks:
25 * (default: 20ms * ilog(ncpus), units: nanoseconds) 25 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
26 * 26 *
27 * NOTE: this latency value is not the same as the concept of 27 * NOTE: this latency value is not the same as the concept of
28 * 'timeslice length' - timeslices in CFS are of variable length 28 * 'timeslice length' - timeslices in CFS are of variable length
@@ -36,14 +36,14 @@ unsigned int sysctl_sched_latency = 20000000ULL;
36 36
37/* 37/*
38 * Minimal preemption granularity for CPU-bound tasks: 38 * Minimal preemption granularity for CPU-bound tasks:
39 * (default: 1 msec * ilog(ncpus), units: nanoseconds) 39 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
40 */ 40 */
41unsigned int sysctl_sched_min_granularity = 1000000ULL; 41unsigned int sysctl_sched_min_granularity = 4000000ULL;
42 42
43/* 43/*
44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
45 */ 45 */
46static unsigned int sched_nr_latency = 20; 46static unsigned int sched_nr_latency = 5;
47 47
48/* 48/*
49 * After fork, child runs first. (default) If set to 0 then 49 * After fork, child runs first. (default) If set to 0 then
@@ -61,7 +61,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
61 61
62/* 62/*
63 * SCHED_BATCH wake-up granularity. 63 * SCHED_BATCH wake-up granularity.
64 * (default: 10 msec * ilog(ncpus), units: nanoseconds) 64 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
65 * 65 *
66 * This option delays the preemption effects of decoupled workloads 66 * This option delays the preemption effects of decoupled workloads
67 * and reduces their over-scheduling. Synchronous workloads will still 67 * and reduces their over-scheduling. Synchronous workloads will still
@@ -71,7 +71,7 @@ unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
71 71
72/* 72/*
73 * SCHED_OTHER wake-up granularity. 73 * SCHED_OTHER wake-up granularity.
74 * (default: 10 msec * ilog(ncpus), units: nanoseconds) 74 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
75 * 75 *
76 * This option delays the preemption effects of decoupled workloads 76 * This option delays the preemption effects of decoupled workloads
77 * and reduces their over-scheduling. Synchronous workloads will still 77 * and reduces their over-scheduling. Synchronous workloads will still
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 630178e53bb6..5b32433e7ee5 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -52,7 +52,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
52 sd->lb_nobusyq[itype], 52 sd->lb_nobusyq[itype],
53 sd->lb_nobusyg[itype]); 53 sd->lb_nobusyg[itype]);
54 } 54 }
55 seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n", 55 seq_printf(seq,
56 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
56 sd->alb_count, sd->alb_failed, sd->alb_pushed, 57 sd->alb_count, sd->alb_failed, sd->alb_pushed,
57 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, 58 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
58 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, 59 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 4abc6d2306f4..6972f26c65f7 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -140,9 +140,6 @@ static struct trans_ctl_table trans_vm_table[] = {
140 { VM_PANIC_ON_OOM, "panic_on_oom" }, 140 { VM_PANIC_ON_OOM, "panic_on_oom" },
141 { VM_VDSO_ENABLED, "vdso_enabled" }, 141 { VM_VDSO_ENABLED, "vdso_enabled" },
142 { VM_MIN_SLAB, "min_slab_ratio" }, 142 { VM_MIN_SLAB, "min_slab_ratio" },
143 { VM_CMM_PAGES, "cmm_pages" },
144 { VM_CMM_TIMED_PAGES, "cmm_timed_pages" },
145 { VM_CMM_TIMEOUT, "cmm_timeout" },
146 143
147 {} 144 {}
148}; 145};
@@ -237,36 +234,6 @@ static struct trans_ctl_table trans_net_ipv4_conf_table[] = {
237 {} 234 {}
238}; 235};
239 236
240
241static struct trans_ctl_table trans_net_ipv4_vs_table[] = {
242 { NET_IPV4_VS_AMEMTHRESH, "amemthresh" },
243 { NET_IPV4_VS_DEBUG_LEVEL, "debug_level" },
244 { NET_IPV4_VS_AMDROPRATE, "am_droprate" },
245 { NET_IPV4_VS_DROP_ENTRY, "drop_entry" },
246 { NET_IPV4_VS_DROP_PACKET, "drop_packet" },
247 { NET_IPV4_VS_SECURE_TCP, "secure_tcp" },
248 { NET_IPV4_VS_TO_ES, "timeout_established" },
249 { NET_IPV4_VS_TO_SS, "timeout_synsent" },
250 { NET_IPV4_VS_TO_SR, "timeout_synrecv" },
251 { NET_IPV4_VS_TO_FW, "timeout_finwait" },
252 { NET_IPV4_VS_TO_TW, "timeout_timewait" },
253 { NET_IPV4_VS_TO_CL, "timeout_close" },
254 { NET_IPV4_VS_TO_CW, "timeout_closewait" },
255 { NET_IPV4_VS_TO_LA, "timeout_lastack" },
256 { NET_IPV4_VS_TO_LI, "timeout_listen" },
257 { NET_IPV4_VS_TO_SA, "timeout_synack" },
258 { NET_IPV4_VS_TO_UDP, "timeout_udp" },
259 { NET_IPV4_VS_TO_ICMP, "timeout_icmp" },
260 { NET_IPV4_VS_CACHE_BYPASS, "cache_bypass" },
261 { NET_IPV4_VS_EXPIRE_NODEST_CONN, "expire_nodest_conn" },
262 { NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, "expire_quiescent_template" },
263 { NET_IPV4_VS_SYNC_THRESHOLD, "sync_threshold" },
264 { NET_IPV4_VS_NAT_ICMP_SEND, "nat_icmp_send" },
265 { NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration" },
266 { NET_IPV4_VS_LBLCR_EXPIRE, "lblcr_expiration" },
267 {}
268};
269
270static struct trans_ctl_table trans_net_neigh_vars_table[] = { 237static struct trans_ctl_table trans_net_neigh_vars_table[] = {
271 { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" }, 238 { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
272 { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" }, 239 { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
@@ -341,7 +308,6 @@ static struct trans_ctl_table trans_net_ipv4_table[] = {
341 { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table }, 308 { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table },
342 /* NET_IPV4_FIB_HASH unused */ 309 /* NET_IPV4_FIB_HASH unused */
343 { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table }, 310 { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table },
344 { NET_IPV4_VS, "vs", trans_net_ipv4_vs_table },
345 311
346 { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" }, 312 { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
347 { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" }, 313 { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
@@ -1219,16 +1185,6 @@ static struct trans_ctl_table trans_arlan_table[] = {
1219 {} 1185 {}
1220}; 1186};
1221 1187
1222static struct trans_ctl_table trans_appldata_table[] = {
1223 { CTL_APPLDATA_TIMER, "timer" },
1224 { CTL_APPLDATA_INTERVAL, "interval" },
1225 { CTL_APPLDATA_OS, "os" },
1226 { CTL_APPLDATA_NET_SUM, "net_sum" },
1227 { CTL_APPLDATA_MEM, "mem" },
1228 {}
1229
1230};
1231
1232static struct trans_ctl_table trans_s390dbf_table[] = { 1188static struct trans_ctl_table trans_s390dbf_table[] = {
1233 { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" }, 1189 { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
1234 { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" }, 1190 { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
@@ -1273,7 +1229,6 @@ static struct trans_ctl_table trans_root_table[] = {
1273 { CTL_ABI, "abi" }, 1229 { CTL_ABI, "abi" },
1274 /* CTL_CPU not used */ 1230 /* CTL_CPU not used */
1275 { CTL_ARLAN, "arlan", trans_arlan_table }, 1231 { CTL_ARLAN, "arlan", trans_arlan_table },
1276 { CTL_APPLDATA, "appldata", trans_appldata_table },
1277 { CTL_S390DBF, "s390dbf", trans_s390dbf_table }, 1232 { CTL_S390DBF, "s390dbf", trans_s390dbf_table },
1278 { CTL_SUNRPC, "sunrpc", trans_sunrpc_table }, 1233 { CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
1279 { CTL_PM, "pm", trans_pm_table }, 1234 { CTL_PM, "pm", trans_pm_table },
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 14a2ecf2b318..e64efaf957e8 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -249,10 +249,12 @@ int do_adjtimex(struct timex *txc)
249 249
250 /* Now we validate the data before disabling interrupts */ 250 /* Now we validate the data before disabling interrupts */
251 251
252 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) 252 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
253 /* singleshot must not be used with any other mode bits */ 253 /* singleshot must not be used with any other mode bits */
254 if (txc->modes != ADJ_OFFSET_SINGLESHOT) 254 if (txc->modes != ADJ_OFFSET_SINGLESHOT &&
255 txc->modes != ADJ_OFFSET_SS_READ)
255 return -EINVAL; 256 return -EINVAL;
257 }
256 258
257 if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) 259 if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
258 /* adjustment Offset limited to +- .512 seconds */ 260 /* adjustment Offset limited to +- .512 seconds */
@@ -372,7 +374,8 @@ int do_adjtimex(struct timex *txc)
372leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) 374leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
373 result = TIME_ERROR; 375 result = TIME_ERROR;
374 376
375 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) 377 if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
378 (txc->modes == ADJ_OFFSET_SS_READ))
376 txc->offset = save_adjust; 379 txc->offset = save_adjust;
377 else 380 else
378 txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * 381 txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) *
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 27a2338deb4a..cb89fa8db110 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -133,6 +133,8 @@ void tick_nohz_update_jiffies(void)
133 if (!ts->tick_stopped) 133 if (!ts->tick_stopped)
134 return; 134 return;
135 135
136 touch_softlockup_watchdog();
137
136 cpu_clear(cpu, nohz_cpu_mask); 138 cpu_clear(cpu, nohz_cpu_mask);
137 now = ktime_get(); 139 now = ktime_get();
138 140
diff --git a/kernel/user.c b/kernel/user.c
index 0f3aa0234107..8320a87f3e5a 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -337,8 +337,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
337 struct user_struct *new; 337 struct user_struct *new;
338 338
339 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); 339 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
340 if (!new) 340 if (!new) {
341 uids_mutex_unlock();
341 return NULL; 342 return NULL;
343 }
344
342 new->uid = uid; 345 new->uid = uid;
343 atomic_set(&new->__count, 1); 346 atomic_set(&new->__count, 1);
344 atomic_set(&new->processes, 0); 347 atomic_set(&new->processes, 0);
@@ -355,6 +358,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
355 358
356 if (alloc_uid_keyring(new, current) < 0) { 359 if (alloc_uid_keyring(new, current) < 0) {
357 kmem_cache_free(uid_cachep, new); 360 kmem_cache_free(uid_cachep, new);
361 uids_mutex_unlock();
358 return NULL; 362 return NULL;
359 } 363 }
360 364
@@ -362,6 +366,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
362 key_put(new->uid_keyring); 366 key_put(new->uid_keyring);
363 key_put(new->session_keyring); 367 key_put(new->session_keyring);
364 kmem_cache_free(uid_cachep, new); 368 kmem_cache_free(uid_cachep, new);
369 uids_mutex_unlock();
365 return NULL; 370 return NULL;
366 } 371 }
367 372
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index c76c06466bfd..fe3a56c2256d 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -18,6 +18,10 @@
18static void *get_uts(ctl_table *table, int write) 18static void *get_uts(ctl_table *table, int write)
19{ 19{
20 char *which = table->data; 20 char *which = table->data;
21 struct uts_namespace *uts_ns;
22
23 uts_ns = current->nsproxy->uts_ns;
24 which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
21 25
22 if (!write) 26 if (!write)
23 down_read(&uts_sem); 27 down_read(&uts_sem);