aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c15
-rw-r--r--kernel/hrtimer.c53
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/module.c7
-rw-r--r--kernel/perf_event.c2
-rw-r--r--kernel/rcutree_trace.c10
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/res_counter.c18
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_clock.c4
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/time/timer_stats.c2
12 files changed, 64 insertions, 57 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7ccba4bc5e3b..ca83b73fba19 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -703,7 +703,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); 703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
704static int cgroup_populate_dir(struct cgroup *cgrp); 704static int cgroup_populate_dir(struct cgroup *cgrp);
705static const struct inode_operations cgroup_dir_inode_operations; 705static const struct inode_operations cgroup_dir_inode_operations;
706static struct file_operations proc_cgroupstats_operations; 706static const struct file_operations proc_cgroupstats_operations;
707 707
708static struct backing_dev_info cgroup_backing_dev_info = { 708static struct backing_dev_info cgroup_backing_dev_info = {
709 .name = "cgroup", 709 .name = "cgroup",
@@ -1863,7 +1863,7 @@ static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1863 return single_release(inode, file); 1863 return single_release(inode, file);
1864} 1864}
1865 1865
1866static struct file_operations cgroup_seqfile_operations = { 1866static const struct file_operations cgroup_seqfile_operations = {
1867 .read = seq_read, 1867 .read = seq_read,
1868 .write = cgroup_file_write, 1868 .write = cgroup_file_write,
1869 .llseek = seq_lseek, 1869 .llseek = seq_lseek,
@@ -1922,7 +1922,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
1922 return simple_rename(old_dir, old_dentry, new_dir, new_dentry); 1922 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1923} 1923}
1924 1924
1925static struct file_operations cgroup_file_operations = { 1925static const struct file_operations cgroup_file_operations = {
1926 .read = cgroup_file_read, 1926 .read = cgroup_file_read,
1927 .write = cgroup_file_write, 1927 .write = cgroup_file_write,
1928 .llseek = generic_file_llseek, 1928 .llseek = generic_file_llseek,
@@ -3369,7 +3369,7 @@ static int cgroup_open(struct inode *inode, struct file *file)
3369 return single_open(file, proc_cgroup_show, pid); 3369 return single_open(file, proc_cgroup_show, pid);
3370} 3370}
3371 3371
3372struct file_operations proc_cgroup_operations = { 3372const struct file_operations proc_cgroup_operations = {
3373 .open = cgroup_open, 3373 .open = cgroup_open,
3374 .read = seq_read, 3374 .read = seq_read,
3375 .llseek = seq_lseek, 3375 .llseek = seq_lseek,
@@ -3398,7 +3398,7 @@ static int cgroupstats_open(struct inode *inode, struct file *file)
3398 return single_open(file, proc_cgroupstats_show, NULL); 3398 return single_open(file, proc_cgroupstats_show, NULL);
3399} 3399}
3400 3400
3401static struct file_operations proc_cgroupstats_operations = { 3401static const struct file_operations proc_cgroupstats_operations = {
3402 .open = cgroupstats_open, 3402 .open = cgroupstats_open,
3403 .read = seq_read, 3403 .read = seq_read,
3404 .llseek = seq_lseek, 3404 .llseek = seq_lseek,
@@ -3708,8 +3708,10 @@ static void check_for_release(struct cgroup *cgrp)
3708void __css_put(struct cgroup_subsys_state *css) 3708void __css_put(struct cgroup_subsys_state *css)
3709{ 3709{
3710 struct cgroup *cgrp = css->cgroup; 3710 struct cgroup *cgrp = css->cgroup;
3711 int val;
3711 rcu_read_lock(); 3712 rcu_read_lock();
3712 if (atomic_dec_return(&css->refcnt) == 1) { 3713 val = atomic_dec_return(&css->refcnt);
3714 if (val == 1) {
3713 if (notify_on_release(cgrp)) { 3715 if (notify_on_release(cgrp)) {
3714 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3716 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3715 check_for_release(cgrp); 3717 check_for_release(cgrp);
@@ -3717,6 +3719,7 @@ void __css_put(struct cgroup_subsys_state *css)
3717 cgroup_wakeup_rmdir_waiter(cgrp); 3719 cgroup_wakeup_rmdir_waiter(cgrp);
3718 } 3720 }
3719 rcu_read_unlock(); 3721 rcu_read_unlock();
3722 WARN_ON_ONCE(val < 1);
3720} 3723}
3721 3724
3722/* 3725/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index e5d98ce50f89..6d7020490f94 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -509,13 +509,14 @@ static inline int hrtimer_hres_active(void)
509 * next event 509 * next event
510 * Called with interrupts disabled and base->lock held 510 * Called with interrupts disabled and base->lock held
511 */ 511 */
512static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) 512static void
513hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
513{ 514{
514 int i; 515 int i;
515 struct hrtimer_clock_base *base = cpu_base->clock_base; 516 struct hrtimer_clock_base *base = cpu_base->clock_base;
516 ktime_t expires; 517 ktime_t expires, expires_next;
517 518
518 cpu_base->expires_next.tv64 = KTIME_MAX; 519 expires_next.tv64 = KTIME_MAX;
519 520
520 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 521 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
521 struct hrtimer *timer; 522 struct hrtimer *timer;
@@ -531,10 +532,15 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
531 */ 532 */
532 if (expires.tv64 < 0) 533 if (expires.tv64 < 0)
533 expires.tv64 = 0; 534 expires.tv64 = 0;
534 if (expires.tv64 < cpu_base->expires_next.tv64) 535 if (expires.tv64 < expires_next.tv64)
535 cpu_base->expires_next = expires; 536 expires_next = expires;
536 } 537 }
537 538
539 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
540 return;
541
542 cpu_base->expires_next.tv64 = expires_next.tv64;
543
538 if (cpu_base->expires_next.tv64 != KTIME_MAX) 544 if (cpu_base->expires_next.tv64 != KTIME_MAX)
539 tick_program_event(cpu_base->expires_next, 1); 545 tick_program_event(cpu_base->expires_next, 1);
540} 546}
@@ -617,7 +623,7 @@ static void retrigger_next_event(void *arg)
617 base->clock_base[CLOCK_REALTIME].offset = 623 base->clock_base[CLOCK_REALTIME].offset =
618 timespec_to_ktime(realtime_offset); 624 timespec_to_ktime(realtime_offset);
619 625
620 hrtimer_force_reprogram(base); 626 hrtimer_force_reprogram(base, 0);
621 spin_unlock(&base->lock); 627 spin_unlock(&base->lock);
622} 628}
623 629
@@ -730,7 +736,8 @@ static int hrtimer_switch_to_hres(void)
730static inline int hrtimer_hres_active(void) { return 0; } 736static inline int hrtimer_hres_active(void) { return 0; }
731static inline int hrtimer_is_hres_enabled(void) { return 0; } 737static inline int hrtimer_is_hres_enabled(void) { return 0; }
732static inline int hrtimer_switch_to_hres(void) { return 0; } 738static inline int hrtimer_switch_to_hres(void) { return 0; }
733static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } 739static inline void
740hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
734static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 741static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
735 struct hrtimer_clock_base *base, 742 struct hrtimer_clock_base *base,
736 int wakeup) 743 int wakeup)
@@ -873,19 +880,29 @@ static void __remove_hrtimer(struct hrtimer *timer,
873 struct hrtimer_clock_base *base, 880 struct hrtimer_clock_base *base,
874 unsigned long newstate, int reprogram) 881 unsigned long newstate, int reprogram)
875{ 882{
876 if (timer->state & HRTIMER_STATE_ENQUEUED) { 883 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
877 /* 884 goto out;
878 * Remove the timer from the rbtree and replace the 885
879 * first entry pointer if necessary. 886 /*
880 */ 887 * Remove the timer from the rbtree and replace the first
881 if (base->first == &timer->node) { 888 * entry pointer if necessary.
882 base->first = rb_next(&timer->node); 889 */
883 /* Reprogram the clock event device. if enabled */ 890 if (base->first == &timer->node) {
884 if (reprogram && hrtimer_hres_active()) 891 base->first = rb_next(&timer->node);
885 hrtimer_force_reprogram(base->cpu_base); 892#ifdef CONFIG_HIGH_RES_TIMERS
893 /* Reprogram the clock event device. if enabled */
894 if (reprogram && hrtimer_hres_active()) {
895 ktime_t expires;
896
897 expires = ktime_sub(hrtimer_get_expires(timer),
898 base->offset);
899 if (base->cpu_base->expires_next.tv64 == expires.tv64)
900 hrtimer_force_reprogram(base->cpu_base, 1);
886 } 901 }
887 rb_erase(&timer->node, &base->active); 902#endif
888 } 903 }
904 rb_erase(&timer->node, &base->active);
905out:
889 timer->state = newstate; 906 timer->state = newstate;
890} 907}
891 908
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index cfadc1291d0b..5240d75f4c60 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1333,7 +1333,7 @@ static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1333 return seq_open(filp, &kprobes_seq_ops); 1333 return seq_open(filp, &kprobes_seq_ops);
1334} 1334}
1335 1335
1336static struct file_operations debugfs_kprobes_operations = { 1336static const struct file_operations debugfs_kprobes_operations = {
1337 .open = kprobes_open, 1337 .open = kprobes_open,
1338 .read = seq_read, 1338 .read = seq_read,
1339 .llseek = seq_lseek, 1339 .llseek = seq_lseek,
@@ -1515,7 +1515,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
1515 return count; 1515 return count;
1516} 1516}
1517 1517
1518static struct file_operations fops_kp = { 1518static const struct file_operations fops_kp = {
1519 .read = read_enabled_file_bool, 1519 .read = read_enabled_file_bool,
1520 .write = write_enabled_file_bool, 1520 .write = write_enabled_file_bool,
1521}; 1521};
diff --git a/kernel/module.c b/kernel/module.c
index fe748a86d452..8b7d8805819d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1992,12 +1992,14 @@ static inline unsigned long layout_symtab(struct module *mod,
1992 Elf_Shdr *sechdrs, 1992 Elf_Shdr *sechdrs,
1993 unsigned int symindex, 1993 unsigned int symindex,
1994 unsigned int strindex, 1994 unsigned int strindex,
1995 const Elf_Hdr *hdr, 1995 const Elf_Ehdr *hdr,
1996 const char *secstrings, 1996 const char *secstrings,
1997 unsigned long *pstroffs, 1997 unsigned long *pstroffs,
1998 unsigned long *strmap) 1998 unsigned long *strmap)
1999{ 1999{
2000 return 0;
2000} 2001}
2002
2001static inline void add_kallsyms(struct module *mod, 2003static inline void add_kallsyms(struct module *mod,
2002 Elf_Shdr *sechdrs, 2004 Elf_Shdr *sechdrs,
2003 unsigned int shnum, 2005 unsigned int shnum,
@@ -2081,9 +2083,8 @@ static noinline struct module *load_module(void __user *umod,
2081 struct module *mod; 2083 struct module *mod;
2082 long err = 0; 2084 long err = 0;
2083 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 2085 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
2084#ifdef CONFIG_KALLSYMS
2085 unsigned long symoffs, stroffs, *strmap; 2086 unsigned long symoffs, stroffs, *strmap;
2086#endif 2087
2087 mm_segment_t old_fs; 2088 mm_segment_t old_fs;
2088 2089
2089 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 2090 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 76ac4db405e9..0f86feb6db0c 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2253,7 +2253,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
2253 } 2253 }
2254} 2254}
2255 2255
2256static struct vm_operations_struct perf_mmap_vmops = { 2256static const struct vm_operations_struct perf_mmap_vmops = {
2257 .open = perf_mmap_open, 2257 .open = perf_mmap_open,
2258 .close = perf_mmap_close, 2258 .close = perf_mmap_close,
2259 .fault = perf_mmap_fault, 2259 .fault = perf_mmap_fault,
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index c89f5e9fd173..179e6ad80dc0 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -93,7 +93,7 @@ static int rcudata_open(struct inode *inode, struct file *file)
93 return single_open(file, show_rcudata, NULL); 93 return single_open(file, show_rcudata, NULL);
94} 94}
95 95
96static struct file_operations rcudata_fops = { 96static const struct file_operations rcudata_fops = {
97 .owner = THIS_MODULE, 97 .owner = THIS_MODULE,
98 .open = rcudata_open, 98 .open = rcudata_open,
99 .read = seq_read, 99 .read = seq_read,
@@ -145,7 +145,7 @@ static int rcudata_csv_open(struct inode *inode, struct file *file)
145 return single_open(file, show_rcudata_csv, NULL); 145 return single_open(file, show_rcudata_csv, NULL);
146} 146}
147 147
148static struct file_operations rcudata_csv_fops = { 148static const struct file_operations rcudata_csv_fops = {
149 .owner = THIS_MODULE, 149 .owner = THIS_MODULE,
150 .open = rcudata_csv_open, 150 .open = rcudata_csv_open,
151 .read = seq_read, 151 .read = seq_read,
@@ -196,7 +196,7 @@ static int rcuhier_open(struct inode *inode, struct file *file)
196 return single_open(file, show_rcuhier, NULL); 196 return single_open(file, show_rcuhier, NULL);
197} 197}
198 198
199static struct file_operations rcuhier_fops = { 199static const struct file_operations rcuhier_fops = {
200 .owner = THIS_MODULE, 200 .owner = THIS_MODULE,
201 .open = rcuhier_open, 201 .open = rcuhier_open,
202 .read = seq_read, 202 .read = seq_read,
@@ -222,7 +222,7 @@ static int rcugp_open(struct inode *inode, struct file *file)
222 return single_open(file, show_rcugp, NULL); 222 return single_open(file, show_rcugp, NULL);
223} 223}
224 224
225static struct file_operations rcugp_fops = { 225static const struct file_operations rcugp_fops = {
226 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
227 .open = rcugp_open, 227 .open = rcugp_open,
228 .read = seq_read, 228 .read = seq_read,
@@ -276,7 +276,7 @@ static int rcu_pending_open(struct inode *inode, struct file *file)
276 return single_open(file, show_rcu_pending, NULL); 276 return single_open(file, show_rcu_pending, NULL);
277} 277}
278 278
279static struct file_operations rcu_pending_fops = { 279static const struct file_operations rcu_pending_fops = {
280 .owner = THIS_MODULE, 280 .owner = THIS_MODULE,
281 .open = rcu_pending_open, 281 .open = rcu_pending_open,
282 .read = seq_read, 282 .read = seq_read,
diff --git a/kernel/relay.c b/kernel/relay.c
index bc188549788f..760c26209a3c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -60,7 +60,7 @@ static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60/* 60/*
61 * vm_ops for relay file mappings. 61 * vm_ops for relay file mappings.
62 */ 62 */
63static struct vm_operations_struct relay_file_mmap_ops = { 63static const struct vm_operations_struct relay_file_mmap_ops = {
64 .fault = relay_buf_fault, 64 .fault = relay_buf_fault,
65 .close = relay_file_mmap_close, 65 .close = relay_file_mmap_close,
66}; 66};
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 88faec23e833..bcdabf37c40b 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -37,27 +37,17 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
37} 37}
38 38
39int res_counter_charge(struct res_counter *counter, unsigned long val, 39int res_counter_charge(struct res_counter *counter, unsigned long val,
40 struct res_counter **limit_fail_at, 40 struct res_counter **limit_fail_at)
41 struct res_counter **soft_limit_fail_at)
42{ 41{
43 int ret; 42 int ret;
44 unsigned long flags; 43 unsigned long flags;
45 struct res_counter *c, *u; 44 struct res_counter *c, *u;
46 45
47 *limit_fail_at = NULL; 46 *limit_fail_at = NULL;
48 if (soft_limit_fail_at)
49 *soft_limit_fail_at = NULL;
50 local_irq_save(flags); 47 local_irq_save(flags);
51 for (c = counter; c != NULL; c = c->parent) { 48 for (c = counter; c != NULL; c = c->parent) {
52 spin_lock(&c->lock); 49 spin_lock(&c->lock);
53 ret = res_counter_charge_locked(c, val); 50 ret = res_counter_charge_locked(c, val);
54 /*
55 * With soft limits, we return the highest ancestor
56 * that exceeds its soft limit
57 */
58 if (soft_limit_fail_at &&
59 !res_counter_soft_limit_check_locked(c))
60 *soft_limit_fail_at = c;
61 spin_unlock(&c->lock); 51 spin_unlock(&c->lock);
62 if (ret < 0) { 52 if (ret < 0) {
63 *limit_fail_at = c; 53 *limit_fail_at = c;
@@ -85,8 +75,7 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
85 counter->usage -= val; 75 counter->usage -= val;
86} 76}
87 77
88void res_counter_uncharge(struct res_counter *counter, unsigned long val, 78void res_counter_uncharge(struct res_counter *counter, unsigned long val)
89 bool *was_soft_limit_excess)
90{ 79{
91 unsigned long flags; 80 unsigned long flags;
92 struct res_counter *c; 81 struct res_counter *c;
@@ -94,9 +83,6 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val,
94 local_irq_save(flags); 83 local_irq_save(flags);
95 for (c = counter; c != NULL; c = c->parent) { 84 for (c = counter; c != NULL; c = c->parent) {
96 spin_lock(&c->lock); 85 spin_lock(&c->lock);
97 if (was_soft_limit_excess)
98 *was_soft_limit_excess =
99 !res_counter_soft_limit_check_locked(c);
100 res_counter_uncharge_locked(c, val); 86 res_counter_uncharge_locked(c, val);
101 spin_unlock(&c->lock); 87 spin_unlock(&c->lock);
102 } 88 }
diff --git a/kernel/sched.c b/kernel/sched.c
index ee61f454a98b..1535f3884b88 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -780,7 +780,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
780 return single_open(filp, sched_feat_show, NULL); 780 return single_open(filp, sched_feat_show, NULL);
781} 781}
782 782
783static struct file_operations sched_feat_fops = { 783static const struct file_operations sched_feat_fops = {
784 .open = sched_feat_open, 784 .open = sched_feat_open,
785 .write = sched_feat_write, 785 .write = sched_feat_write,
786 .read = seq_read, 786 .read = seq_read,
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index ac2e1dc708bd..479ce5682d7c 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -127,7 +127,7 @@ again:
127 clock = wrap_max(clock, min_clock); 127 clock = wrap_max(clock, min_clock);
128 clock = wrap_min(clock, max_clock); 128 clock = wrap_min(clock, max_clock);
129 129
130 if (cmpxchg(&scd->clock, old_clock, clock) != old_clock) 130 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
131 goto again; 131 goto again;
132 132
133 return clock; 133 return clock;
@@ -163,7 +163,7 @@ again:
163 val = remote_clock; 163 val = remote_clock;
164 } 164 }
165 165
166 if (cmpxchg(ptr, old_val, val) != old_val) 166 if (cmpxchg64(ptr, old_val, val) != old_val)
167 goto again; 167 goto again;
168 168
169 return val; 169 return val;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index fddd69d16e03..1b5b7aa2fdfd 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -275,7 +275,7 @@ static int timer_list_open(struct inode *inode, struct file *filp)
275 return single_open(filp, timer_list_show, NULL); 275 return single_open(filp, timer_list_show, NULL);
276} 276}
277 277
278static struct file_operations timer_list_fops = { 278static const struct file_operations timer_list_fops = {
279 .open = timer_list_open, 279 .open = timer_list_open,
280 .read = seq_read, 280 .read = seq_read,
281 .llseek = seq_lseek, 281 .llseek = seq_lseek,
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 4cde8b9c716f..ee5681f8d7ec 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -395,7 +395,7 @@ static int tstats_open(struct inode *inode, struct file *filp)
395 return single_open(filp, tstats_show, NULL); 395 return single_open(filp, tstats_show, NULL);
396} 396}
397 397
398static struct file_operations tstats_fops = { 398static const struct file_operations tstats_fops = {
399 .open = tstats_open, 399 .open = tstats_open,
400 .read = seq_read, 400 .read = seq_read,
401 .write = tstats_write, 401 .write = tstats_write,