aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c4
-rw-r--r--kernel/futex.c6
-rw-r--r--kernel/kprobes.c36
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/rcupdate.c59
-rw-r--r--kernel/rcutorture.c3
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl.c29
-rw-r--r--kernel/time.c22
9 files changed, 135 insertions, 29 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 0c56320d38dc..32fa03ad1984 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -291,8 +291,10 @@ int kauditd_thread(void *dummy)
291 set_current_state(TASK_INTERRUPTIBLE); 291 set_current_state(TASK_INTERRUPTIBLE);
292 add_wait_queue(&kauditd_wait, &wait); 292 add_wait_queue(&kauditd_wait, &wait);
293 293
294 if (!skb_queue_len(&audit_skb_queue)) 294 if (!skb_queue_len(&audit_skb_queue)) {
295 try_to_freeze();
295 schedule(); 296 schedule();
297 }
296 298
297 __set_current_state(TASK_RUNNING); 299 __set_current_state(TASK_RUNNING);
298 remove_wait_queue(&kauditd_wait, &wait); 300 remove_wait_queue(&kauditd_wait, &wait);
diff --git a/kernel/futex.c b/kernel/futex.c
index 5872e3507f35..5e71a6bf6f6b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -270,7 +270,13 @@ static void wake_futex(struct futex_q *q)
270 /* 270 /*
271 * The waiting task can free the futex_q as soon as this is written, 271 * The waiting task can free the futex_q as soon as this is written,
272 * without taking any locks. This must come last. 272 * without taking any locks. This must come last.
273 *
274 * A memory barrier is required here to prevent the following store
275 * to lock_ptr from getting ahead of the wakeup. Clearing the lock
276 * at the end of wake_up_all() does not prevent this store from
277 * moving.
273 */ 278 */
279 wmb();
274 q->lock_ptr = NULL; 280 q->lock_ptr = NULL;
275} 281}
276 282
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5beda378cc75..3bb71e63a37e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -246,6 +246,19 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
246 return ret; 246 return ret;
247} 247}
248 248
249/* Walks the list and increments nmissed count for multiprobe case */
250void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
251{
252 struct kprobe *kp;
253 if (p->pre_handler != aggr_pre_handler) {
254 p->nmissed++;
255 } else {
256 list_for_each_entry_rcu(kp, &p->list, list)
257 kp->nmissed++;
258 }
259 return;
260}
261
249/* Called with kretprobe_lock held */ 262/* Called with kretprobe_lock held */
250struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) 263struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
251{ 264{
@@ -399,10 +412,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
399 INIT_LIST_HEAD(&ap->list); 412 INIT_LIST_HEAD(&ap->list);
400 list_add_rcu(&p->list, &ap->list); 413 list_add_rcu(&p->list, &ap->list);
401 414
402 INIT_HLIST_NODE(&ap->hlist); 415 hlist_replace_rcu(&p->hlist, &ap->hlist);
403 hlist_del_rcu(&p->hlist);
404 hlist_add_head_rcu(&ap->hlist,
405 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
406} 416}
407 417
408/* 418/*
@@ -462,9 +472,16 @@ int __kprobes register_kprobe(struct kprobe *p)
462 int ret = 0; 472 int ret = 0;
463 unsigned long flags = 0; 473 unsigned long flags = 0;
464 struct kprobe *old_p; 474 struct kprobe *old_p;
475 struct module *mod;
476
477 if ((!kernel_text_address((unsigned long) p->addr)) ||
478 in_kprobes_functions((unsigned long) p->addr))
479 return -EINVAL;
480
481 if ((mod = module_text_address((unsigned long) p->addr)) &&
482 (unlikely(!try_module_get(mod))))
483 return -EINVAL;
465 484
466 if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
467 return ret;
468 if ((ret = arch_prepare_kprobe(p)) != 0) 485 if ((ret = arch_prepare_kprobe(p)) != 0)
469 goto rm_kprobe; 486 goto rm_kprobe;
470 487
@@ -488,6 +505,8 @@ out:
488rm_kprobe: 505rm_kprobe:
489 if (ret == -EEXIST) 506 if (ret == -EEXIST)
490 arch_remove_kprobe(p); 507 arch_remove_kprobe(p);
508 if (ret && mod)
509 module_put(mod);
491 return ret; 510 return ret;
492} 511}
493 512
@@ -495,6 +514,7 @@ void __kprobes unregister_kprobe(struct kprobe *p)
495{ 514{
496 unsigned long flags; 515 unsigned long flags;
497 struct kprobe *old_p; 516 struct kprobe *old_p;
517 struct module *mod;
498 518
499 spin_lock_irqsave(&kprobe_lock, flags); 519 spin_lock_irqsave(&kprobe_lock, flags);
500 old_p = get_kprobe(p->addr); 520 old_p = get_kprobe(p->addr);
@@ -506,6 +526,10 @@ void __kprobes unregister_kprobe(struct kprobe *p)
506 cleanup_kprobe(p, flags); 526 cleanup_kprobe(p, flags);
507 527
508 synchronize_sched(); 528 synchronize_sched();
529
530 if ((mod = module_text_address((unsigned long)p->addr)))
531 module_put(mod);
532
509 if (old_p->pre_handler == aggr_pre_handler && 533 if (old_p->pre_handler == aggr_pre_handler &&
510 list_empty(&old_p->list)) 534 list_empty(&old_p->list))
511 kfree(old_p); 535 kfree(old_p);
diff --git a/kernel/params.c b/kernel/params.c
index 47ba69547945..c76ad25e6a21 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -619,7 +619,7 @@ static void __init param_sysfs_builtin(void)
619 619
620 620
621/* module-related sysfs stuff */ 621/* module-related sysfs stuff */
622#ifdef CONFIG_MODULES 622#ifdef CONFIG_SYSFS
623 623
624#define to_module_attr(n) container_of(n, struct module_attribute, attr); 624#define to_module_attr(n) container_of(n, struct module_attribute, attr);
625#define to_module_kobject(n) container_of(n, struct module_kobject, kobj); 625#define to_module_kobject(n) container_of(n, struct module_kobject, kobj);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index c4d159a21e04..48d3bce465b8 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -116,6 +116,10 @@ void fastcall call_rcu(struct rcu_head *head,
116 local_irq_restore(flags); 116 local_irq_restore(flags);
117} 117}
118 118
119static atomic_t rcu_barrier_cpu_count;
120static struct semaphore rcu_barrier_sema;
121static struct completion rcu_barrier_completion;
122
119/** 123/**
120 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. 124 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
121 * @head: structure to be used for queueing the RCU updates. 125 * @head: structure to be used for queueing the RCU updates.
@@ -162,6 +166,42 @@ long rcu_batches_completed(void)
162 return rcu_ctrlblk.completed; 166 return rcu_ctrlblk.completed;
163} 167}
164 168
169static void rcu_barrier_callback(struct rcu_head *notused)
170{
171 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
172 complete(&rcu_barrier_completion);
173}
174
175/*
176 * Called with preemption disabled, and from cross-cpu IRQ context.
177 */
178static void rcu_barrier_func(void *notused)
179{
180 int cpu = smp_processor_id();
181 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
182 struct rcu_head *head;
183
184 head = &rdp->barrier;
185 atomic_inc(&rcu_barrier_cpu_count);
186 call_rcu(head, rcu_barrier_callback);
187}
188
189/**
190 * rcu_barrier - Wait until all the in-flight RCUs are complete.
191 */
192void rcu_barrier(void)
193{
194 BUG_ON(in_interrupt());
195 /* Take cpucontrol semaphore to protect against CPU hotplug */
196 down(&rcu_barrier_sema);
197 init_completion(&rcu_barrier_completion);
198 atomic_set(&rcu_barrier_cpu_count, 0);
199 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
200 wait_for_completion(&rcu_barrier_completion);
201 up(&rcu_barrier_sema);
202}
203EXPORT_SYMBOL_GPL(rcu_barrier);
204
165/* 205/*
166 * Invoke the completed RCU callbacks. They are expected to be in 206 * Invoke the completed RCU callbacks. They are expected to be in
167 * a per-cpu list. 207 * a per-cpu list.
@@ -217,15 +257,23 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
217 257
218 if (rcp->next_pending && 258 if (rcp->next_pending &&
219 rcp->completed == rcp->cur) { 259 rcp->completed == rcp->cur) {
220 /* Can't change, since spin lock held. */
221 cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
222
223 rcp->next_pending = 0; 260 rcp->next_pending = 0;
224 /* next_pending == 0 must be visible in __rcu_process_callbacks() 261 /*
225 * before it can see new value of cur. 262 * next_pending == 0 must be visible in
263 * __rcu_process_callbacks() before it can see new value of cur.
226 */ 264 */
227 smp_wmb(); 265 smp_wmb();
228 rcp->cur++; 266 rcp->cur++;
267
268 /*
269 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
270 * Barrier Otherwise it can cause tickless idle CPUs to be
271 * included in rsp->cpumask, which will extend graceperiods
272 * unnecessarily.
273 */
274 smp_mb();
275 cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
276
229 } 277 }
230} 278}
231 279
@@ -457,6 +505,7 @@ static struct notifier_block __devinitdata rcu_nb = {
457 */ 505 */
458void __init rcu_init(void) 506void __init rcu_init(void)
459{ 507{
508 sema_init(&rcu_barrier_sema, 1);
460 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, 509 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
461 (void *)(long)smp_processor_id()); 510 (void *)(long)smp_processor_id());
462 /* Register notifier for non-boot CPUs */ 511 /* Register notifier for non-boot CPUs */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 88c28d476550..49fbbeff201c 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -409,9 +409,8 @@ rcu_torture_cleanup(void)
409 stats_task = NULL; 409 stats_task = NULL;
410 410
411 /* Wait for all RCU callbacks to fire. */ 411 /* Wait for all RCU callbacks to fire. */
412 rcu_barrier();
412 413
413 for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
414 synchronize_rcu();
415 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 414 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
416 printk(KERN_ALERT TORTURE_FLAG 415 printk(KERN_ALERT TORTURE_FLAG
417 "--- End of test: %s\n", 416 "--- End of test: %s\n",
diff --git a/kernel/sys.c b/kernel/sys.c
index bce933ebb29f..eecf84526afe 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/compat.h> 33#include <linux/compat.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kprobes.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -168,7 +169,7 @@ EXPORT_SYMBOL(notifier_chain_unregister);
168 * of the last notifier function called. 169 * of the last notifier function called.
169 */ 170 */
170 171
171int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) 172int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
172{ 173{
173 int ret=NOTIFY_DONE; 174 int ret=NOTIFY_DONE;
174 struct notifier_block *nb = *n; 175 struct notifier_block *nb = *n;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9990e10192e8..b53115b882e1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2192,29 +2192,32 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
2192 void __user *oldval, size_t __user *oldlenp, 2192 void __user *oldval, size_t __user *oldlenp,
2193 void __user *newval, size_t newlen, void **context) 2193 void __user *newval, size_t newlen, void **context)
2194{ 2194{
2195 size_t l, len;
2196
2197 if (!table->data || !table->maxlen) 2195 if (!table->data || !table->maxlen)
2198 return -ENOTDIR; 2196 return -ENOTDIR;
2199 2197
2200 if (oldval && oldlenp) { 2198 if (oldval && oldlenp) {
2201 if (get_user(len, oldlenp)) 2199 size_t bufsize;
2200 if (get_user(bufsize, oldlenp))
2202 return -EFAULT; 2201 return -EFAULT;
2203 if (len) { 2202 if (bufsize) {
2204 l = strlen(table->data); 2203 size_t len = strlen(table->data), copied;
2205 if (len > l) len = l; 2204
2206 if (len >= table->maxlen) 2205 /* This shouldn't trigger for a well-formed sysctl */
2206 if (len > table->maxlen)
2207 len = table->maxlen; 2207 len = table->maxlen;
2208 if(copy_to_user(oldval, table->data, len)) 2208
2209 return -EFAULT; 2209 /* Copy up to a max of bufsize-1 bytes of the string */
2210 if(put_user(0, ((char __user *) oldval) + len)) 2210 copied = (len >= bufsize) ? bufsize - 1 : len;
2211
2212 if (copy_to_user(oldval, table->data, copied) ||
2213 put_user(0, (char __user *)(oldval + copied)))
2211 return -EFAULT; 2214 return -EFAULT;
2212 if(put_user(len, oldlenp)) 2215 if (put_user(len, oldlenp))
2213 return -EFAULT; 2216 return -EFAULT;
2214 } 2217 }
2215 } 2218 }
2216 if (newval && newlen) { 2219 if (newval && newlen) {
2217 len = newlen; 2220 size_t len = newlen;
2218 if (len > table->maxlen) 2221 if (len > table->maxlen)
2219 len = table->maxlen; 2222 len = table->maxlen;
2220 if(copy_from_user(table->data, newval, len)) 2223 if(copy_from_user(table->data, newval, len))
@@ -2223,7 +2226,7 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
2223 len--; 2226 len--;
2224 ((char *) table->data)[len] = 0; 2227 ((char *) table->data)[len] = 0;
2225 } 2228 }
2226 return 0; 2229 return 1;
2227} 2230}
2228 2231
2229/* 2232/*
diff --git a/kernel/time.c b/kernel/time.c
index 245d595a13cb..b94bfa8c03e0 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -561,6 +561,28 @@ void getnstimeofday(struct timespec *tv)
561EXPORT_SYMBOL_GPL(getnstimeofday); 561EXPORT_SYMBOL_GPL(getnstimeofday);
562#endif 562#endif
563 563
564void getnstimestamp(struct timespec *ts)
565{
566 unsigned int seq;
567 struct timespec wall2mono;
568
569 /* synchronize with settimeofday() changes */
570 do {
571 seq = read_seqbegin(&xtime_lock);
572 getnstimeofday(ts);
573 wall2mono = wall_to_monotonic;
574 } while(unlikely(read_seqretry(&xtime_lock, seq)));
575
576 /* adjust to monotonicaly-increasing values */
577 ts->tv_sec += wall2mono.tv_sec;
578 ts->tv_nsec += wall2mono.tv_nsec;
579 while (unlikely(ts->tv_nsec >= NSEC_PER_SEC)) {
580 ts->tv_nsec -= NSEC_PER_SEC;
581 ts->tv_sec++;
582 }
583}
584EXPORT_SYMBOL_GPL(getnstimestamp);
585
564#if (BITS_PER_LONG < 64) 586#if (BITS_PER_LONG < 64)
565u64 get_jiffies_64(void) 587u64 get_jiffies_64(void)
566{ 588{