aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c5
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/power/process.c19
-rw-r--r--kernel/power/snapshot.c3
-rw-r--r--kernel/power/user.c9
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/res_counter.c25
-rw-r--r--kernel/sched/cpupri.c3
10 files changed, 65 insertions, 13 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index caaea6e944f8..af1de0f34eae 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1863,11 +1863,12 @@ void __audit_syscall_entry(int arch, int major,
1863 1863
1864/** 1864/**
1865 * audit_syscall_exit - deallocate audit context after a system call 1865 * audit_syscall_exit - deallocate audit context after a system call
1866 * @pt_regs: syscall registers 1866 * @success: success value of the syscall
1867 * @return_code: return value of the syscall
1867 * 1868 *
1868 * Tear down after system call. If the audit context has been marked as 1869 * Tear down after system call. If the audit context has been marked as
1869 * auditable (either because of the AUDIT_RECORD_CONTEXT state from 1870 * auditable (either because of the AUDIT_RECORD_CONTEXT state from
1870 * filtering, or because some other part of the kernel write an audit 1871 * filtering, or because some other part of the kernel wrote an audit
1871 * message), then write out the syscall information. In call cases, 1872 * message), then write out the syscall information. In call cases,
1872 * free the names stored from getname(). 1873 * free the names stored from getname().
1873 */ 1874 */
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 057e24b665cf..6581a040f399 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -115,8 +115,6 @@ int get_callchain_buffers(void)
115 } 115 }
116 116
117 err = alloc_callchain_buffers(); 117 err = alloc_callchain_buffers();
118 if (err)
119 release_callchain_buffers();
120exit: 118exit:
121 mutex_unlock(&callchain_mutex); 119 mutex_unlock(&callchain_mutex);
122 120
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a8f4ac001a00..32b48c889711 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -815,7 +815,7 @@ static void update_event_times(struct perf_event *event)
815 * here. 815 * here.
816 */ 816 */
817 if (is_cgroup_event(event)) 817 if (is_cgroup_event(event))
818 run_end = perf_event_time(event); 818 run_end = perf_cgroup_event_time(event);
819 else if (ctx->is_active) 819 else if (ctx->is_active)
820 run_end = ctx->time; 820 run_end = ctx->time;
821 else 821 else
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 95dd7212e610..29f5b65bee29 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1077 /* Early boot. kretprobe_table_locks not yet initialized. */ 1077 /* Early boot. kretprobe_table_locks not yet initialized. */
1078 return; 1078 return;
1079 1079
1080 INIT_HLIST_HEAD(&empty_rp);
1080 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1081 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1081 head = &kretprobe_inst_table[hash]; 1082 head = &kretprobe_inst_table[hash];
1082 kretprobe_table_lock(hash, &flags); 1083 kretprobe_table_lock(hash, &flags);
@@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1085 recycle_rp_inst(ri, &empty_rp); 1086 recycle_rp_inst(ri, &empty_rp);
1086 } 1087 }
1087 kretprobe_table_unlock(hash, &flags); 1088 kretprobe_table_unlock(hash, &flags);
1088 INIT_HLIST_HEAD(&empty_rp);
1089 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 1089 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
1090 hlist_del(&ri->hlist); 1090 hlist_del(&ri->hlist);
1091 kfree(ri); 1091 kfree(ri);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 77274c9ba2f1..eeca00311f39 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -188,3 +188,22 @@ void thaw_processes(void)
188 printk("done.\n"); 188 printk("done.\n");
189} 189}
190 190
191void thaw_kernel_threads(void)
192{
193 struct task_struct *g, *p;
194
195 pm_nosig_freezing = false;
196 printk("Restarting kernel threads ... ");
197
198 thaw_workqueues();
199
200 read_lock(&tasklist_lock);
201 do_each_thread(g, p) {
202 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
203 __thaw_task(p);
204 } while_each_thread(g, p);
205 read_unlock(&tasklist_lock);
206
207 schedule();
208 printk("done.\n");
209}
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 1cf88900ec4f..6a768e537001 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -812,7 +812,8 @@ unsigned int snapshot_additional_pages(struct zone *zone)
812 unsigned int res; 812 unsigned int res;
813 813
814 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 814 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
815 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); 815 res += DIV_ROUND_UP(res * sizeof(struct bm_block),
816 LINKED_PAGE_DATA_SIZE);
816 return 2 * res; 817 return 2 * res;
817} 818}
818 819
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6b1ab7a88522..e5a21a857302 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -274,6 +274,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
274 swsusp_free(); 274 swsusp_free();
275 memset(&data->handle, 0, sizeof(struct snapshot_handle)); 275 memset(&data->handle, 0, sizeof(struct snapshot_handle));
276 data->ready = 0; 276 data->ready = 0;
277 /*
278 * It is necessary to thaw kernel threads here, because
279 * SNAPSHOT_CREATE_IMAGE may be invoked directly after
280 * SNAPSHOT_FREE. In that case, if kernel threads were not
281 * thawed, the preallocation of memory carried out by
282 * hibernation_snapshot() might run into problems (i.e. it
283 * might fail or even deadlock).
284 */
285 thaw_kernel_threads();
277 break; 286 break;
278 287
279 case SNAPSHOT_PREF_IMAGE_SIZE: 288 case SNAPSHOT_PREF_IMAGE_SIZE:
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 88f17b8a3b1d..a58ac285fc69 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -56,8 +56,8 @@ static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
56static int nfakewriters = 4; /* # fake writer threads */ 56static int nfakewriters = 4; /* # fake writer threads */
57static int stat_interval; /* Interval between stats, in seconds. */ 57static int stat_interval; /* Interval between stats, in seconds. */
58 /* Defaults to "only at end of test". */ 58 /* Defaults to "only at end of test". */
59static int verbose; /* Print more debug info. */ 59static bool verbose; /* Print more debug info. */
60static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ 60static bool test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ 61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62static int stutter = 5; /* Start/stop testing interval (in sec) */ 62static int stutter = 5; /* Start/stop testing interval (in sec) */
63static int irqreader = 1; /* RCU readers from irq (timers). */ 63static int irqreader = 1; /* RCU readers from irq (timers). */
@@ -1399,7 +1399,7 @@ rcu_torture_shutdown(void *arg)
1399 * Execute random CPU-hotplug operations at the interval specified 1399 * Execute random CPU-hotplug operations at the interval specified
1400 * by the onoff_interval. 1400 * by the onoff_interval.
1401 */ 1401 */
1402static int 1402static int __cpuinit
1403rcu_torture_onoff(void *arg) 1403rcu_torture_onoff(void *arg)
1404{ 1404{
1405 int cpu; 1405 int cpu;
@@ -1447,7 +1447,7 @@ rcu_torture_onoff(void *arg)
1447 return 0; 1447 return 0;
1448} 1448}
1449 1449
1450static int 1450static int __cpuinit
1451rcu_torture_onoff_init(void) 1451rcu_torture_onoff_init(void)
1452{ 1452{
1453 if (onoff_interval <= 0) 1453 if (onoff_interval <= 0)
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 6d269cce7aa1..d508363858b3 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -66,6 +66,31 @@ done:
66 return ret; 66 return ret;
67} 67}
68 68
69int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
70 struct res_counter **limit_fail_at)
71{
72 int ret, r;
73 unsigned long flags;
74 struct res_counter *c;
75
76 r = ret = 0;
77 *limit_fail_at = NULL;
78 local_irq_save(flags);
79 for (c = counter; c != NULL; c = c->parent) {
80 spin_lock(&c->lock);
81 r = res_counter_charge_locked(c, val);
82 if (r)
83 c->usage += val;
84 spin_unlock(&c->lock);
85 if (r < 0 && ret == 0) {
86 *limit_fail_at = c;
87 ret = r;
88 }
89 }
90 local_irq_restore(flags);
91
92 return ret;
93}
69void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) 94void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
70{ 95{
71 if (WARN_ON(counter->usage < val)) 96 if (WARN_ON(counter->usage < val))
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index b0d798eaf130..d72586fdf660 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -129,7 +129,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
129 * cpupri_set - update the cpu priority setting 129 * cpupri_set - update the cpu priority setting
130 * @cp: The cpupri context 130 * @cp: The cpupri context
131 * @cpu: The target cpu 131 * @cpu: The target cpu
132 * @pri: The priority (INVALID-RT99) to assign to this CPU 132 * @newpri: The priority (INVALID-RT99) to assign to this CPU
133 * 133 *
134 * Note: Assumes cpu_rq(cpu)->lock is locked 134 * Note: Assumes cpu_rq(cpu)->lock is locked
135 * 135 *
@@ -200,7 +200,6 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
200/** 200/**
201 * cpupri_init - initialize the cpupri structure 201 * cpupri_init - initialize the cpupri structure
202 * @cp: The cpupri context 202 * @cp: The cpupri context
203 * @bootmem: true if allocations need to use bootmem
204 * 203 *
205 * Returns: -ENOMEM if memory fails. 204 * Returns: -ENOMEM if memory fails.
206 */ 205 */