diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2014-03-12 11:01:07 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-03-12 11:01:07 -0400 |
commit | ffb12cf002edbc5927079f51bebde428d601f723 (patch) | |
tree | 1f04d80df9db8883037d59c81f5836770eecfdc6 /kernel | |
parent | 1a75b8e64571a85d5e648cfdf4c40e0d9923abc5 (diff) | |
parent | c1bacbae8192dd2a9ebadd22d793b68054f6c6e5 (diff) |
Merge branch 'irq/for-gpio' into irq/core
Merge the request/release callbacks which are in a separate branch for
consumption by the gpio folks.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit_tree.c | 2 | ||||
-rw-r--r-- | kernel/audit_watch.c | 2 | ||||
-rw-r--r-- | kernel/cgroup.c | 60 | ||||
-rw-r--r-- | kernel/cpuset.c | 10 | ||||
-rw-r--r-- | kernel/events/core.c | 12 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 1 | ||||
-rw-r--r-- | kernel/irq/manage.c | 31 | ||||
-rw-r--r-- | kernel/power/console.c | 1 | ||||
-rw-r--r-- | kernel/printk/printk.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 28 | ||||
-rw-r--r-- | kernel/sched/cpudeadline.c | 6 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 20 | ||||
-rw-r--r-- | kernel/sched/fair.c | 10 | ||||
-rw-r--r-- | kernel/sched/rt.c | 8 | ||||
-rw-r--r-- | kernel/sched/sched.h | 1 | ||||
-rw-r--r-- | kernel/time/sched_clock.c | 46 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 10 | ||||
-rw-r--r-- | kernel/tracepoint.c | 7 | ||||
-rw-r--r-- | kernel/user_namespace.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 7 |
20 files changed, 168 insertions, 98 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 67ccf0e7cca9..135944a7b28a 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -916,7 +916,7 @@ static int audit_tree_handle_event(struct fsnotify_group *group, | |||
916 | struct fsnotify_mark *inode_mark, | 916 | struct fsnotify_mark *inode_mark, |
917 | struct fsnotify_mark *vfsmount_mark, | 917 | struct fsnotify_mark *vfsmount_mark, |
918 | u32 mask, void *data, int data_type, | 918 | u32 mask, void *data, int data_type, |
919 | const unsigned char *file_name) | 919 | const unsigned char *file_name, u32 cookie) |
920 | { | 920 | { |
921 | return 0; | 921 | return 0; |
922 | } | 922 | } |
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 2596fac5dcb4..70b4554d2fbe 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
@@ -471,7 +471,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group, | |||
471 | struct fsnotify_mark *inode_mark, | 471 | struct fsnotify_mark *inode_mark, |
472 | struct fsnotify_mark *vfsmount_mark, | 472 | struct fsnotify_mark *vfsmount_mark, |
473 | u32 mask, void *data, int data_type, | 473 | u32 mask, void *data, int data_type, |
474 | const unsigned char *dname) | 474 | const unsigned char *dname, u32 cookie) |
475 | { | 475 | { |
476 | struct inode *inode; | 476 | struct inode *inode; |
477 | struct audit_parent *parent; | 477 | struct audit_parent *parent; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e2f46ba37f72..105f273b6f86 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -886,7 +886,9 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
886 | * per-subsystem and moved to css->id so that lookups are | 886 | * per-subsystem and moved to css->id so that lookups are |
887 | * successful until the target css is released. | 887 | * successful until the target css is released. |
888 | */ | 888 | */ |
889 | mutex_lock(&cgroup_mutex); | ||
889 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | 890 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); |
891 | mutex_unlock(&cgroup_mutex); | ||
890 | cgrp->id = -1; | 892 | cgrp->id = -1; |
891 | 893 | ||
892 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); | 894 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
@@ -1566,10 +1568,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, | |||
1566 | mutex_lock(&cgroup_mutex); | 1568 | mutex_lock(&cgroup_mutex); |
1567 | mutex_lock(&cgroup_root_mutex); | 1569 | mutex_lock(&cgroup_root_mutex); |
1568 | 1570 | ||
1569 | root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp, | 1571 | ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); |
1570 | 0, 1, GFP_KERNEL); | 1572 | if (ret < 0) |
1571 | if (root_cgrp->id < 0) | ||
1572 | goto unlock_drop; | 1573 | goto unlock_drop; |
1574 | root_cgrp->id = ret; | ||
1573 | 1575 | ||
1574 | /* Check for name clashes with existing mounts */ | 1576 | /* Check for name clashes with existing mounts */ |
1575 | ret = -EBUSY; | 1577 | ret = -EBUSY; |
@@ -2763,10 +2765,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
2763 | */ | 2765 | */ |
2764 | update_before = cgroup_serial_nr_next; | 2766 | update_before = cgroup_serial_nr_next; |
2765 | 2767 | ||
2766 | mutex_unlock(&cgroup_mutex); | ||
2767 | |||
2768 | /* add/rm files for all cgroups created before */ | 2768 | /* add/rm files for all cgroups created before */ |
2769 | rcu_read_lock(); | ||
2770 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { | 2769 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { |
2771 | struct cgroup *cgrp = css->cgroup; | 2770 | struct cgroup *cgrp = css->cgroup; |
2772 | 2771 | ||
@@ -2775,23 +2774,19 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
2775 | 2774 | ||
2776 | inode = cgrp->dentry->d_inode; | 2775 | inode = cgrp->dentry->d_inode; |
2777 | dget(cgrp->dentry); | 2776 | dget(cgrp->dentry); |
2778 | rcu_read_unlock(); | ||
2779 | |||
2780 | dput(prev); | 2777 | dput(prev); |
2781 | prev = cgrp->dentry; | 2778 | prev = cgrp->dentry; |
2782 | 2779 | ||
2780 | mutex_unlock(&cgroup_mutex); | ||
2783 | mutex_lock(&inode->i_mutex); | 2781 | mutex_lock(&inode->i_mutex); |
2784 | mutex_lock(&cgroup_mutex); | 2782 | mutex_lock(&cgroup_mutex); |
2785 | if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) | 2783 | if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) |
2786 | ret = cgroup_addrm_files(cgrp, cfts, is_add); | 2784 | ret = cgroup_addrm_files(cgrp, cfts, is_add); |
2787 | mutex_unlock(&cgroup_mutex); | ||
2788 | mutex_unlock(&inode->i_mutex); | 2785 | mutex_unlock(&inode->i_mutex); |
2789 | |||
2790 | rcu_read_lock(); | ||
2791 | if (ret) | 2786 | if (ret) |
2792 | break; | 2787 | break; |
2793 | } | 2788 | } |
2794 | rcu_read_unlock(); | 2789 | mutex_unlock(&cgroup_mutex); |
2795 | dput(prev); | 2790 | dput(prev); |
2796 | deactivate_super(sb); | 2791 | deactivate_super(sb); |
2797 | return ret; | 2792 | return ret; |
@@ -2910,9 +2905,14 @@ static void cgroup_enable_task_cg_lists(void) | |||
2910 | * We should check if the process is exiting, otherwise | 2905 | * We should check if the process is exiting, otherwise |
2911 | * it will race with cgroup_exit() in that the list | 2906 | * it will race with cgroup_exit() in that the list |
2912 | * entry won't be deleted though the process has exited. | 2907 | * entry won't be deleted though the process has exited. |
2908 | * Do it while holding siglock so that we don't end up | ||
2909 | * racing against cgroup_exit(). | ||
2913 | */ | 2910 | */ |
2911 | spin_lock_irq(&p->sighand->siglock); | ||
2914 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) | 2912 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) |
2915 | list_add(&p->cg_list, &task_css_set(p)->tasks); | 2913 | list_add(&p->cg_list, &task_css_set(p)->tasks); |
2914 | spin_unlock_irq(&p->sighand->siglock); | ||
2915 | |||
2916 | task_unlock(p); | 2916 | task_unlock(p); |
2917 | } while_each_thread(g, p); | 2917 | } while_each_thread(g, p); |
2918 | read_unlock(&tasklist_lock); | 2918 | read_unlock(&tasklist_lock); |
@@ -4158,7 +4158,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4158 | struct cgroup *cgrp; | 4158 | struct cgroup *cgrp; |
4159 | struct cgroup_name *name; | 4159 | struct cgroup_name *name; |
4160 | struct cgroupfs_root *root = parent->root; | 4160 | struct cgroupfs_root *root = parent->root; |
4161 | int ssid, err = 0; | 4161 | int ssid, err; |
4162 | struct cgroup_subsys *ss; | 4162 | struct cgroup_subsys *ss; |
4163 | struct super_block *sb = root->sb; | 4163 | struct super_block *sb = root->sb; |
4164 | 4164 | ||
@@ -4168,19 +4168,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4168 | return -ENOMEM; | 4168 | return -ENOMEM; |
4169 | 4169 | ||
4170 | name = cgroup_alloc_name(dentry); | 4170 | name = cgroup_alloc_name(dentry); |
4171 | if (!name) | 4171 | if (!name) { |
4172 | err = -ENOMEM; | ||
4172 | goto err_free_cgrp; | 4173 | goto err_free_cgrp; |
4174 | } | ||
4173 | rcu_assign_pointer(cgrp->name, name); | 4175 | rcu_assign_pointer(cgrp->name, name); |
4174 | 4176 | ||
4175 | /* | 4177 | /* |
4176 | * Temporarily set the pointer to NULL, so idr_find() won't return | ||
4177 | * a half-baked cgroup. | ||
4178 | */ | ||
4179 | cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); | ||
4180 | if (cgrp->id < 0) | ||
4181 | goto err_free_name; | ||
4182 | |||
4183 | /* | ||
4184 | * Only live parents can have children. Note that the liveliness | 4178 | * Only live parents can have children. Note that the liveliness |
4185 | * check isn't strictly necessary because cgroup_mkdir() and | 4179 | * check isn't strictly necessary because cgroup_mkdir() and |
4186 | * cgroup_rmdir() are fully synchronized by i_mutex; however, do it | 4180 | * cgroup_rmdir() are fully synchronized by i_mutex; however, do it |
@@ -4189,7 +4183,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4189 | */ | 4183 | */ |
4190 | if (!cgroup_lock_live_group(parent)) { | 4184 | if (!cgroup_lock_live_group(parent)) { |
4191 | err = -ENODEV; | 4185 | err = -ENODEV; |
4192 | goto err_free_id; | 4186 | goto err_free_name; |
4187 | } | ||
4188 | |||
4189 | /* | ||
4190 | * Temporarily set the pointer to NULL, so idr_find() won't return | ||
4191 | * a half-baked cgroup. | ||
4192 | */ | ||
4193 | cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); | ||
4194 | if (cgrp->id < 0) { | ||
4195 | err = -ENOMEM; | ||
4196 | goto err_unlock; | ||
4193 | } | 4197 | } |
4194 | 4198 | ||
4195 | /* Grab a reference on the superblock so the hierarchy doesn't | 4199 | /* Grab a reference on the superblock so the hierarchy doesn't |
@@ -4221,7 +4225,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4221 | */ | 4225 | */ |
4222 | err = cgroup_create_file(dentry, S_IFDIR | mode, sb); | 4226 | err = cgroup_create_file(dentry, S_IFDIR | mode, sb); |
4223 | if (err < 0) | 4227 | if (err < 0) |
4224 | goto err_unlock; | 4228 | goto err_free_id; |
4225 | lockdep_assert_held(&dentry->d_inode->i_mutex); | 4229 | lockdep_assert_held(&dentry->d_inode->i_mutex); |
4226 | 4230 | ||
4227 | cgrp->serial_nr = cgroup_serial_nr_next++; | 4231 | cgrp->serial_nr = cgroup_serial_nr_next++; |
@@ -4257,12 +4261,12 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4257 | 4261 | ||
4258 | return 0; | 4262 | return 0; |
4259 | 4263 | ||
4260 | err_unlock: | ||
4261 | mutex_unlock(&cgroup_mutex); | ||
4262 | /* Release the reference count that we took on the superblock */ | ||
4263 | deactivate_super(sb); | ||
4264 | err_free_id: | 4264 | err_free_id: |
4265 | idr_remove(&root->cgroup_idr, cgrp->id); | 4265 | idr_remove(&root->cgroup_idr, cgrp->id); |
4266 | /* Release the reference count that we took on the superblock */ | ||
4267 | deactivate_super(sb); | ||
4268 | err_unlock: | ||
4269 | mutex_unlock(&cgroup_mutex); | ||
4266 | err_free_name: | 4270 | err_free_name: |
4267 | kfree(rcu_dereference_raw(cgrp->name)); | 4271 | kfree(rcu_dereference_raw(cgrp->name)); |
4268 | err_free_cgrp: | 4272 | err_free_cgrp: |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f1..e6b1b66afe52 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
974 | * Temporarilly set tasks mems_allowed to target nodes of migration, | 974 | * Temporarilly set tasks mems_allowed to target nodes of migration, |
975 | * so that the migration code can allocate pages on these nodes. | 975 | * so that the migration code can allocate pages on these nodes. |
976 | * | 976 | * |
977 | * Call holding cpuset_mutex, so current's cpuset won't change | ||
978 | * during this call, as manage_mutex holds off any cpuset_attach() | ||
979 | * calls. Therefore we don't need to take task_lock around the | ||
980 | * call to guarantee_online_mems(), as we know no one is changing | ||
981 | * our task's cpuset. | ||
982 | * | ||
983 | * While the mm_struct we are migrating is typically from some | 977 | * While the mm_struct we are migrating is typically from some |
984 | * other task, the task_struct mems_allowed that we are hacking | 978 | * other task, the task_struct mems_allowed that we are hacking |
985 | * is for our current task, which must allocate new pages for that | 979 | * is for our current task, which must allocate new pages for that |
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
996 | 990 | ||
997 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); | 991 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); |
998 | 992 | ||
993 | rcu_read_lock(); | ||
999 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); | 994 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); |
1000 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); | 995 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); |
996 | rcu_read_unlock(); | ||
1001 | } | 997 | } |
1002 | 998 | ||
1003 | /* | 999 | /* |
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |||
2486 | 2482 | ||
2487 | task_lock(current); | 2483 | task_lock(current); |
2488 | cs = nearest_hardwall_ancestor(task_cs(current)); | 2484 | cs = nearest_hardwall_ancestor(task_cs(current)); |
2485 | allowed = node_isset(node, cs->mems_allowed); | ||
2489 | task_unlock(current); | 2486 | task_unlock(current); |
2490 | 2487 | ||
2491 | allowed = node_isset(node, cs->mems_allowed); | ||
2492 | mutex_unlock(&callback_mutex); | 2488 | mutex_unlock(&callback_mutex); |
2493 | return allowed; | 2489 | return allowed; |
2494 | } | 2490 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 56003c6edfd3..fa0b2d4ad83c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -7856,14 +7856,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) | |||
7856 | static void __perf_event_exit_context(void *__info) | 7856 | static void __perf_event_exit_context(void *__info) |
7857 | { | 7857 | { |
7858 | struct perf_event_context *ctx = __info; | 7858 | struct perf_event_context *ctx = __info; |
7859 | struct perf_event *event, *tmp; | 7859 | struct perf_event *event; |
7860 | 7860 | ||
7861 | perf_pmu_rotate_stop(ctx->pmu); | 7861 | perf_pmu_rotate_stop(ctx->pmu); |
7862 | 7862 | ||
7863 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) | 7863 | rcu_read_lock(); |
7864 | __perf_remove_from_context(event); | 7864 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) |
7865 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) | ||
7866 | __perf_remove_from_context(event); | 7865 | __perf_remove_from_context(event); |
7866 | rcu_read_unlock(); | ||
7867 | } | 7867 | } |
7868 | 7868 | ||
7869 | static void perf_event_exit_cpu_context(int cpu) | 7869 | static void perf_event_exit_cpu_context(int cpu) |
@@ -7887,11 +7887,11 @@ static void perf_event_exit_cpu(int cpu) | |||
7887 | { | 7887 | { |
7888 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 7888 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
7889 | 7889 | ||
7890 | perf_event_exit_cpu_context(cpu); | ||
7891 | |||
7890 | mutex_lock(&swhash->hlist_mutex); | 7892 | mutex_lock(&swhash->hlist_mutex); |
7891 | swevent_hlist_release(swhash); | 7893 | swevent_hlist_release(swhash); |
7892 | mutex_unlock(&swhash->hlist_mutex); | 7894 | mutex_unlock(&swhash->hlist_mutex); |
7893 | |||
7894 | perf_event_exit_cpu_context(cpu); | ||
7895 | } | 7895 | } |
7896 | #else | 7896 | #else |
7897 | static inline void perf_event_exit_cpu(int cpu) { } | 7897 | static inline void perf_event_exit_cpu(int cpu) { } |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index cf68bb36fe58..f14033700c25 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
12 | #include <linux/of_address.h> | 12 | #include <linux/of_address.h> |
13 | #include <linux/of_irq.h> | ||
13 | #include <linux/topology.h> | 14 | #include <linux/topology.h> |
14 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ada0c548c36a..de1a8ed29b40 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -832,8 +832,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, | |||
832 | 832 | ||
833 | static void wake_threads_waitq(struct irq_desc *desc) | 833 | static void wake_threads_waitq(struct irq_desc *desc) |
834 | { | 834 | { |
835 | if (atomic_dec_and_test(&desc->threads_active) && | 835 | if (atomic_dec_and_test(&desc->threads_active)) |
836 | waitqueue_active(&desc->wait_for_threads)) | ||
837 | wake_up(&desc->wait_for_threads); | 836 | wake_up(&desc->wait_for_threads); |
838 | } | 837 | } |
839 | 838 | ||
@@ -954,6 +953,23 @@ static void irq_setup_forced_threading(struct irqaction *new) | |||
954 | } | 953 | } |
955 | } | 954 | } |
956 | 955 | ||
956 | static int irq_request_resources(struct irq_desc *desc) | ||
957 | { | ||
958 | struct irq_data *d = &desc->irq_data; | ||
959 | struct irq_chip *c = d->chip; | ||
960 | |||
961 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | ||
962 | } | ||
963 | |||
964 | static void irq_release_resources(struct irq_desc *desc) | ||
965 | { | ||
966 | struct irq_data *d = &desc->irq_data; | ||
967 | struct irq_chip *c = d->chip; | ||
968 | |||
969 | if (c->irq_release_resources) | ||
970 | c->irq_release_resources(d); | ||
971 | } | ||
972 | |||
957 | /* | 973 | /* |
958 | * Internal function to register an irqaction - typically used to | 974 | * Internal function to register an irqaction - typically used to |
959 | * allocate special interrupts that are part of the architecture. | 975 | * allocate special interrupts that are part of the architecture. |
@@ -1149,6 +1165,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1149 | } | 1165 | } |
1150 | 1166 | ||
1151 | if (!shared) { | 1167 | if (!shared) { |
1168 | ret = irq_request_resources(desc); | ||
1169 | if (ret) { | ||
1170 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | ||
1171 | new->name, irq, desc->irq_data.chip->name); | ||
1172 | goto out_mask; | ||
1173 | } | ||
1174 | |||
1152 | init_waitqueue_head(&desc->wait_for_threads); | 1175 | init_waitqueue_head(&desc->wait_for_threads); |
1153 | 1176 | ||
1154 | /* Setup the type (level, edge polarity) if configured: */ | 1177 | /* Setup the type (level, edge polarity) if configured: */ |
@@ -1319,8 +1342,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1319 | *action_ptr = action->next; | 1342 | *action_ptr = action->next; |
1320 | 1343 | ||
1321 | /* If this was the last handler, shut down the IRQ line: */ | 1344 | /* If this was the last handler, shut down the IRQ line: */ |
1322 | if (!desc->action) | 1345 | if (!desc->action) { |
1323 | irq_shutdown(desc); | 1346 | irq_shutdown(desc); |
1347 | irq_release_resources(desc); | ||
1348 | } | ||
1324 | 1349 | ||
1325 | #ifdef CONFIG_SMP | 1350 | #ifdef CONFIG_SMP |
1326 | /* make sure affinity_hint is cleaned up */ | 1351 | /* make sure affinity_hint is cleaned up */ |
diff --git a/kernel/power/console.c b/kernel/power/console.c index eacb8bd8cab4..aba9c545a0e3 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kbd_kern.h> | 9 | #include <linux/kbd_kern.h> |
10 | #include <linux/vt.h> | 10 | #include <linux/vt.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | ||
12 | #include "power.h" | 13 | #include "power.h" |
13 | 14 | ||
14 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) | 15 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b1d255f04135..4dae9cbe9259 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1076 | next_seq = log_next_seq; | 1076 | next_seq = log_next_seq; |
1077 | 1077 | ||
1078 | len = 0; | 1078 | len = 0; |
1079 | prev = 0; | ||
1080 | while (len >= 0 && seq < next_seq) { | 1079 | while (len >= 0 && seq < next_seq) { |
1081 | struct printk_log *msg = log_from_idx(idx); | 1080 | struct printk_log *msg = log_from_idx(idx); |
1082 | int textlen; | 1081 | int textlen; |
@@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
2788 | next_idx = idx; | 2787 | next_idx = idx; |
2789 | 2788 | ||
2790 | l = 0; | 2789 | l = 0; |
2791 | prev = 0; | ||
2792 | while (seq < dumper->next_seq) { | 2790 | while (seq < dumper->next_seq) { |
2793 | struct printk_log *msg = log_from_idx(idx); | 2791 | struct printk_log *msg = log_from_idx(idx); |
2794 | 2792 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aab..6edbef296ece 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy, | |||
1952 | { | 1952 | { |
1953 | 1953 | ||
1954 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); | 1954 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); |
1955 | u64 period = attr->sched_period; | 1955 | u64 period = attr->sched_period ?: attr->sched_deadline; |
1956 | u64 runtime = attr->sched_runtime; | 1956 | u64 runtime = attr->sched_runtime; |
1957 | u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; | 1957 | u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; |
1958 | int cpus, err = -1; | 1958 | int cpus, err = -1; |
@@ -3661,13 +3661,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | |||
3661 | * @pid: the pid in question. | 3661 | * @pid: the pid in question. |
3662 | * @uattr: structure containing the extended parameters. | 3662 | * @uattr: structure containing the extended parameters. |
3663 | */ | 3663 | */ |
3664 | SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) | 3664 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, |
3665 | unsigned int, flags) | ||
3665 | { | 3666 | { |
3666 | struct sched_attr attr; | 3667 | struct sched_attr attr; |
3667 | struct task_struct *p; | 3668 | struct task_struct *p; |
3668 | int retval; | 3669 | int retval; |
3669 | 3670 | ||
3670 | if (!uattr || pid < 0) | 3671 | if (!uattr || pid < 0 || flags) |
3671 | return -EINVAL; | 3672 | return -EINVAL; |
3672 | 3673 | ||
3673 | if (sched_copy_attr(uattr, &attr)) | 3674 | if (sched_copy_attr(uattr, &attr)) |
@@ -3786,7 +3787,7 @@ static int sched_read_attr(struct sched_attr __user *uattr, | |||
3786 | attr->size = usize; | 3787 | attr->size = usize; |
3787 | } | 3788 | } |
3788 | 3789 | ||
3789 | ret = copy_to_user(uattr, attr, usize); | 3790 | ret = copy_to_user(uattr, attr, attr->size); |
3790 | if (ret) | 3791 | if (ret) |
3791 | return -EFAULT; | 3792 | return -EFAULT; |
3792 | 3793 | ||
@@ -3804,8 +3805,8 @@ err_size: | |||
3804 | * @uattr: structure containing the extended parameters. | 3805 | * @uattr: structure containing the extended parameters. |
3805 | * @size: sizeof(attr) for fwd/bwd comp. | 3806 | * @size: sizeof(attr) for fwd/bwd comp. |
3806 | */ | 3807 | */ |
3807 | SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | 3808 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, |
3808 | unsigned int, size) | 3809 | unsigned int, size, unsigned int, flags) |
3809 | { | 3810 | { |
3810 | struct sched_attr attr = { | 3811 | struct sched_attr attr = { |
3811 | .size = sizeof(struct sched_attr), | 3812 | .size = sizeof(struct sched_attr), |
@@ -3814,7 +3815,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | |||
3814 | int retval; | 3815 | int retval; |
3815 | 3816 | ||
3816 | if (!uattr || pid < 0 || size > PAGE_SIZE || | 3817 | if (!uattr || pid < 0 || size > PAGE_SIZE || |
3817 | size < SCHED_ATTR_SIZE_VER0) | 3818 | size < SCHED_ATTR_SIZE_VER0 || flags) |
3818 | return -EINVAL; | 3819 | return -EINVAL; |
3819 | 3820 | ||
3820 | rcu_read_lock(); | 3821 | rcu_read_lock(); |
@@ -7422,6 +7423,7 @@ static int sched_dl_global_constraints(void) | |||
7422 | u64 period = global_rt_period(); | 7423 | u64 period = global_rt_period(); |
7423 | u64 new_bw = to_ratio(period, runtime); | 7424 | u64 new_bw = to_ratio(period, runtime); |
7424 | int cpu, ret = 0; | 7425 | int cpu, ret = 0; |
7426 | unsigned long flags; | ||
7425 | 7427 | ||
7426 | /* | 7428 | /* |
7427 | * Here we want to check the bandwidth not being set to some | 7429 | * Here we want to check the bandwidth not being set to some |
@@ -7435,10 +7437,10 @@ static int sched_dl_global_constraints(void) | |||
7435 | for_each_possible_cpu(cpu) { | 7437 | for_each_possible_cpu(cpu) { |
7436 | struct dl_bw *dl_b = dl_bw_of(cpu); | 7438 | struct dl_bw *dl_b = dl_bw_of(cpu); |
7437 | 7439 | ||
7438 | raw_spin_lock(&dl_b->lock); | 7440 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
7439 | if (new_bw < dl_b->total_bw) | 7441 | if (new_bw < dl_b->total_bw) |
7440 | ret = -EBUSY; | 7442 | ret = -EBUSY; |
7441 | raw_spin_unlock(&dl_b->lock); | 7443 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
7442 | 7444 | ||
7443 | if (ret) | 7445 | if (ret) |
7444 | break; | 7446 | break; |
@@ -7451,6 +7453,7 @@ static void sched_dl_do_global(void) | |||
7451 | { | 7453 | { |
7452 | u64 new_bw = -1; | 7454 | u64 new_bw = -1; |
7453 | int cpu; | 7455 | int cpu; |
7456 | unsigned long flags; | ||
7454 | 7457 | ||
7455 | def_dl_bandwidth.dl_period = global_rt_period(); | 7458 | def_dl_bandwidth.dl_period = global_rt_period(); |
7456 | def_dl_bandwidth.dl_runtime = global_rt_runtime(); | 7459 | def_dl_bandwidth.dl_runtime = global_rt_runtime(); |
@@ -7464,9 +7467,9 @@ static void sched_dl_do_global(void) | |||
7464 | for_each_possible_cpu(cpu) { | 7467 | for_each_possible_cpu(cpu) { |
7465 | struct dl_bw *dl_b = dl_bw_of(cpu); | 7468 | struct dl_bw *dl_b = dl_bw_of(cpu); |
7466 | 7469 | ||
7467 | raw_spin_lock(&dl_b->lock); | 7470 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
7468 | dl_b->bw = new_bw; | 7471 | dl_b->bw = new_bw; |
7469 | raw_spin_unlock(&dl_b->lock); | 7472 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
7470 | } | 7473 | } |
7471 | } | 7474 | } |
7472 | 7475 | ||
@@ -7475,7 +7478,8 @@ static int sched_rt_global_validate(void) | |||
7475 | if (sysctl_sched_rt_period <= 0) | 7478 | if (sysctl_sched_rt_period <= 0) |
7476 | return -EINVAL; | 7479 | return -EINVAL; |
7477 | 7480 | ||
7478 | if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) | 7481 | if ((sysctl_sched_rt_runtime != RUNTIME_INF) && |
7482 | (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) | ||
7479 | return -EINVAL; | 7483 | return -EINVAL; |
7480 | 7484 | ||
7481 | return 0; | 7485 | return 0; |
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 045fc74e3f09..5b9bb42b2d47 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) | |||
70 | 70 | ||
71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) | 71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) |
72 | { | 72 | { |
73 | WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); | 73 | WARN_ON(idx == IDX_INVALID || !cpu_present(idx)); |
74 | 74 | ||
75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { | 75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { |
76 | cp->elements[idx].dl = new_dl; | 76 | cp->elements[idx].dl = new_dl; |
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, | |||
117 | } | 117 | } |
118 | 118 | ||
119 | out: | 119 | out: |
120 | WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); | 120 | WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); |
121 | 121 | ||
122 | return best_cpu; | 122 | return best_cpu; |
123 | } | 123 | } |
@@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
137 | int old_idx, new_cpu; | 137 | int old_idx, new_cpu; |
138 | unsigned long flags; | 138 | unsigned long flags; |
139 | 139 | ||
140 | WARN_ON(cpu > num_present_cpus()); | 140 | WARN_ON(!cpu_present(cpu)); |
141 | 141 | ||
142 | raw_spin_lock_irqsave(&cp->lock, flags); | 142 | raw_spin_lock_irqsave(&cp->lock, flags); |
143 | old_idx = cp->cpu_to_idx[cpu]; | 143 | old_idx = cp->cpu_to_idx[cpu]; |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0dd5e0971a07..6e79b3faa4cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq) | |||
121 | 121 | ||
122 | static void update_dl_migration(struct dl_rq *dl_rq) | 122 | static void update_dl_migration(struct dl_rq *dl_rq) |
123 | { | 123 | { |
124 | if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { | 124 | if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { |
125 | if (!dl_rq->overloaded) { | 125 | if (!dl_rq->overloaded) { |
126 | dl_set_overload(rq_of_dl_rq(dl_rq)); | 126 | dl_set_overload(rq_of_dl_rq(dl_rq)); |
127 | dl_rq->overloaded = 1; | 127 | dl_rq->overloaded = 1; |
@@ -135,9 +135,7 @@ static void update_dl_migration(struct dl_rq *dl_rq) | |||
135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
136 | { | 136 | { |
137 | struct task_struct *p = dl_task_of(dl_se); | 137 | struct task_struct *p = dl_task_of(dl_se); |
138 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
139 | 138 | ||
140 | dl_rq->dl_nr_total++; | ||
141 | if (p->nr_cpus_allowed > 1) | 139 | if (p->nr_cpus_allowed > 1) |
142 | dl_rq->dl_nr_migratory++; | 140 | dl_rq->dl_nr_migratory++; |
143 | 141 | ||
@@ -147,9 +145,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
147 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 145 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
148 | { | 146 | { |
149 | struct task_struct *p = dl_task_of(dl_se); | 147 | struct task_struct *p = dl_task_of(dl_se); |
150 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
151 | 148 | ||
152 | dl_rq->dl_nr_total--; | ||
153 | if (p->nr_cpus_allowed > 1) | 149 | if (p->nr_cpus_allowed > 1) |
154 | dl_rq->dl_nr_migratory--; | 150 | dl_rq->dl_nr_migratory--; |
155 | 151 | ||
@@ -566,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | |||
566 | return 1; | 562 | return 1; |
567 | } | 563 | } |
568 | 564 | ||
565 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | ||
566 | |||
569 | /* | 567 | /* |
570 | * Update the current task's runtime statistics (provided it is still | 568 | * Update the current task's runtime statistics (provided it is still |
571 | * a -deadline task and has not been removed from the dl_rq). | 569 | * a -deadline task and has not been removed from the dl_rq). |
@@ -629,11 +627,13 @@ static void update_curr_dl(struct rq *rq) | |||
629 | struct rt_rq *rt_rq = &rq->rt; | 627 | struct rt_rq *rt_rq = &rq->rt; |
630 | 628 | ||
631 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 629 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
632 | rt_rq->rt_time += delta_exec; | ||
633 | /* | 630 | /* |
634 | * We'll let actual RT tasks worry about the overflow here, we | 631 | * We'll let actual RT tasks worry about the overflow here, we |
635 | * have our own CBS to keep us inline -- see above. | 632 | * have our own CBS to keep us inline; only account when RT |
633 | * bandwidth is relevant. | ||
636 | */ | 634 | */ |
635 | if (sched_rt_bandwidth_account(rt_rq)) | ||
636 | rt_rq->rt_time += delta_exec; | ||
637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
638 | } | 638 | } |
639 | } | 639 | } |
@@ -717,6 +717,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
717 | 717 | ||
718 | WARN_ON(!dl_prio(prio)); | 718 | WARN_ON(!dl_prio(prio)); |
719 | dl_rq->dl_nr_running++; | 719 | dl_rq->dl_nr_running++; |
720 | inc_nr_running(rq_of_dl_rq(dl_rq)); | ||
720 | 721 | ||
721 | inc_dl_deadline(dl_rq, deadline); | 722 | inc_dl_deadline(dl_rq, deadline); |
722 | inc_dl_migration(dl_se, dl_rq); | 723 | inc_dl_migration(dl_se, dl_rq); |
@@ -730,6 +731,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
730 | WARN_ON(!dl_prio(prio)); | 731 | WARN_ON(!dl_prio(prio)); |
731 | WARN_ON(!dl_rq->dl_nr_running); | 732 | WARN_ON(!dl_rq->dl_nr_running); |
732 | dl_rq->dl_nr_running--; | 733 | dl_rq->dl_nr_running--; |
734 | dec_nr_running(rq_of_dl_rq(dl_rq)); | ||
733 | 735 | ||
734 | dec_dl_deadline(dl_rq, dl_se->deadline); | 736 | dec_dl_deadline(dl_rq, dl_se->deadline); |
735 | dec_dl_migration(dl_se, dl_rq); | 737 | dec_dl_migration(dl_se, dl_rq); |
@@ -836,8 +838,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
836 | 838 | ||
837 | if (!task_current(rq, p) && p->nr_cpus_allowed > 1) | 839 | if (!task_current(rq, p) && p->nr_cpus_allowed > 1) |
838 | enqueue_pushable_dl_task(rq, p); | 840 | enqueue_pushable_dl_task(rq, p); |
839 | |||
840 | inc_nr_running(rq); | ||
841 | } | 841 | } |
842 | 842 | ||
843 | static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | 843 | static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) |
@@ -850,8 +850,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
850 | { | 850 | { |
851 | update_curr_dl(rq); | 851 | update_curr_dl(rq); |
852 | __dequeue_task_dl(rq, p, flags); | 852 | __dequeue_task_dl(rq, p, flags); |
853 | |||
854 | dec_nr_running(rq); | ||
855 | } | 853 | } |
856 | 854 | ||
857 | /* | 855 | /* |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 966cc2bfcb77..9b4c4f320130 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work) | |||
1757 | start = end; | 1757 | start = end; |
1758 | if (pages <= 0) | 1758 | if (pages <= 0) |
1759 | goto out; | 1759 | goto out; |
1760 | |||
1761 | cond_resched(); | ||
1760 | } while (end != vma->vm_end); | 1762 | } while (end != vma->vm_end); |
1761 | } | 1763 | } |
1762 | 1764 | ||
@@ -6999,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
6999 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
7000 | 7002 | ||
7001 | /* | 7003 | /* |
7002 | * Ensure the task's vruntime is normalized, so that when its | 7004 | * Ensure the task's vruntime is normalized, so that when it's |
7003 | * switched back to the fair class the enqueue_entity(.flags=0) will | 7005 | * switched back to the fair class the enqueue_entity(.flags=0) will |
7004 | * do the right thing. | 7006 | * do the right thing. |
7005 | * | 7007 | * |
7006 | * If it was on_rq, then the dequeue_entity(.flags=0) will already | 7008 | * If it's on_rq, then the dequeue_entity(.flags=0) will already |
7007 | * have normalized the vruntime, if it was !on_rq, then only when | 7009 | * have normalized the vruntime, if it's !on_rq, then only when |
7008 | * the task is sleeping will it still have non-normalized vruntime. | 7010 | * the task is sleeping will it still have non-normalized vruntime. |
7009 | */ | 7011 | */ |
7010 | if (!se->on_rq && p->state != TASK_RUNNING) { | 7012 | if (!p->on_rq && p->state != TASK_RUNNING) { |
7011 | /* | 7013 | /* |
7012 | * Fix up our vruntime so that the current sleep doesn't | 7014 | * Fix up our vruntime so that the current sleep doesn't |
7013 | * cause 'unlimited' sleep bonus. | 7015 | * cause 'unlimited' sleep bonus. |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b45..1999021042c7 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
538 | 538 | ||
539 | #endif /* CONFIG_RT_GROUP_SCHED */ | 539 | #endif /* CONFIG_RT_GROUP_SCHED */ |
540 | 540 | ||
541 | bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) | ||
542 | { | ||
543 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | ||
544 | |||
545 | return (hrtimer_active(&rt_b->rt_period_timer) || | ||
546 | rt_rq->rt_time < rt_b->rt_runtime); | ||
547 | } | ||
548 | |||
541 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
542 | /* | 550 | /* |
543 | * We ran out of runtime, see if we can borrow some from our neighbours. | 551 | * We ran out of runtime, see if we can borrow some from our neighbours. |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c2119fd20f8b..f964add50f38 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -462,7 +462,6 @@ struct dl_rq { | |||
462 | } earliest_dl; | 462 | } earliest_dl; |
463 | 463 | ||
464 | unsigned long dl_nr_migratory; | 464 | unsigned long dl_nr_migratory; |
465 | unsigned long dl_nr_total; | ||
466 | int overloaded; | 465 | int overloaded; |
467 | 466 | ||
468 | /* | 467 | /* |
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 0abb36464281..4d23dc4d8139 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
@@ -116,20 +116,42 @@ static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) | |||
116 | void __init sched_clock_register(u64 (*read)(void), int bits, | 116 | void __init sched_clock_register(u64 (*read)(void), int bits, |
117 | unsigned long rate) | 117 | unsigned long rate) |
118 | { | 118 | { |
119 | u64 res, wrap, new_mask, new_epoch, cyc, ns; | ||
120 | u32 new_mult, new_shift; | ||
121 | ktime_t new_wrap_kt; | ||
119 | unsigned long r; | 122 | unsigned long r; |
120 | u64 res, wrap; | ||
121 | char r_unit; | 123 | char r_unit; |
122 | 124 | ||
123 | if (cd.rate > rate) | 125 | if (cd.rate > rate) |
124 | return; | 126 | return; |
125 | 127 | ||
126 | WARN_ON(!irqs_disabled()); | 128 | WARN_ON(!irqs_disabled()); |
127 | read_sched_clock = read; | ||
128 | sched_clock_mask = CLOCKSOURCE_MASK(bits); | ||
129 | cd.rate = rate; | ||
130 | 129 | ||
131 | /* calculate the mult/shift to convert counter ticks to ns. */ | 130 | /* calculate the mult/shift to convert counter ticks to ns. */ |
132 | clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); | 131 | clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); |
132 | |||
133 | new_mask = CLOCKSOURCE_MASK(bits); | ||
134 | |||
135 | /* calculate how many ns until we wrap */ | ||
136 | wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); | ||
137 | new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); | ||
138 | |||
139 | /* update epoch for new counter and update epoch_ns from old counter*/ | ||
140 | new_epoch = read(); | ||
141 | cyc = read_sched_clock(); | ||
142 | ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, | ||
143 | cd.mult, cd.shift); | ||
144 | |||
145 | raw_write_seqcount_begin(&cd.seq); | ||
146 | read_sched_clock = read; | ||
147 | sched_clock_mask = new_mask; | ||
148 | cd.rate = rate; | ||
149 | cd.wrap_kt = new_wrap_kt; | ||
150 | cd.mult = new_mult; | ||
151 | cd.shift = new_shift; | ||
152 | cd.epoch_cyc = new_epoch; | ||
153 | cd.epoch_ns = ns; | ||
154 | raw_write_seqcount_end(&cd.seq); | ||
133 | 155 | ||
134 | r = rate; | 156 | r = rate; |
135 | if (r >= 4000000) { | 157 | if (r >= 4000000) { |
@@ -141,22 +163,12 @@ void __init sched_clock_register(u64 (*read)(void), int bits, | |||
141 | } else | 163 | } else |
142 | r_unit = ' '; | 164 | r_unit = ' '; |
143 | 165 | ||
144 | /* calculate how many ns until we wrap */ | ||
145 | wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask); | ||
146 | cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); | ||
147 | |||
148 | /* calculate the ns resolution of this counter */ | 166 | /* calculate the ns resolution of this counter */ |
149 | res = cyc_to_ns(1ULL, cd.mult, cd.shift); | 167 | res = cyc_to_ns(1ULL, new_mult, new_shift); |
168 | |||
150 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", | 169 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", |
151 | bits, r, r_unit, res, wrap); | 170 | bits, r, r_unit, res, wrap); |
152 | 171 | ||
153 | update_sched_clock(); | ||
154 | |||
155 | /* | ||
156 | * Ensure that sched_clock() starts off at 0ns | ||
157 | */ | ||
158 | cd.epoch_ns = 0; | ||
159 | |||
160 | /* Enable IRQ time accounting if we have a fast enough sched_clock */ | 172 | /* Enable IRQ time accounting if we have a fast enough sched_clock */ |
161 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) | 173 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) |
162 | enable_sched_clock_irqtime(); | 174 | enable_sched_clock_irqtime(); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4eccb5..f3989ceb5cd5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1777,6 +1777,16 @@ static void trace_module_add_events(struct module *mod) | |||
1777 | { | 1777 | { |
1778 | struct ftrace_event_call **call, **start, **end; | 1778 | struct ftrace_event_call **call, **start, **end; |
1779 | 1779 | ||
1780 | if (!mod->num_trace_events) | ||
1781 | return; | ||
1782 | |||
1783 | /* Don't add infrastructure for mods without tracepoints */ | ||
1784 | if (trace_module_has_bad_taint(mod)) { | ||
1785 | pr_err("%s: module has bad taint, not creating trace events\n", | ||
1786 | mod->name); | ||
1787 | return; | ||
1788 | } | ||
1789 | |||
1780 | start = mod->trace_events; | 1790 | start = mod->trace_events; |
1781 | end = mod->trace_events + mod->num_trace_events; | 1791 | end = mod->trace_events + mod->num_trace_events; |
1782 | 1792 | ||
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 29f26540e9c9..031cc5655a51 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) | |||
631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | 631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); |
632 | 632 | ||
633 | #ifdef CONFIG_MODULES | 633 | #ifdef CONFIG_MODULES |
634 | bool trace_module_has_bad_taint(struct module *mod) | ||
635 | { | ||
636 | return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); | ||
637 | } | ||
638 | |||
634 | static int tracepoint_module_coming(struct module *mod) | 639 | static int tracepoint_module_coming(struct module *mod) |
635 | { | 640 | { |
636 | struct tp_module *tp_mod, *iter; | 641 | struct tp_module *tp_mod, *iter; |
@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod) | |||
641 | * module headers (for forced load), to make sure we don't cause a crash. | 646 | * module headers (for forced load), to make sure we don't cause a crash. |
642 | * Staging and out-of-tree GPL modules are fine. | 647 | * Staging and out-of-tree GPL modules are fine. |
643 | */ | 648 | */ |
644 | if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) | 649 | if (trace_module_has_bad_taint(mod)) |
645 | return 0; | 650 | return 0; |
646 | mutex_lock(&tracepoints_mutex); | 651 | mutex_lock(&tracepoints_mutex); |
647 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | 652 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 240fb62cf394..dd06439b9c84 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -225,7 +225,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id) | |||
225 | * | 225 | * |
226 | * When there is no mapping defined for the user-namespace uid | 226 | * When there is no mapping defined for the user-namespace uid |
227 | * pair INVALID_UID is returned. Callers are expected to test | 227 | * pair INVALID_UID is returned. Callers are expected to test |
228 | * for and handle handle INVALID_UID being returned. INVALID_UID | 228 | * for and handle INVALID_UID being returned. INVALID_UID |
229 | * may be tested for using uid_valid(). | 229 | * may be tested for using uid_valid(). |
230 | */ | 230 | */ |
231 | kuid_t make_kuid(struct user_namespace *ns, uid_t uid) | 231 | kuid_t make_kuid(struct user_namespace *ns, uid_t uid) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 82ef9f3b7473..193e977a10ea 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1851,6 +1851,12 @@ static void destroy_worker(struct worker *worker) | |||
1851 | if (worker->flags & WORKER_IDLE) | 1851 | if (worker->flags & WORKER_IDLE) |
1852 | pool->nr_idle--; | 1852 | pool->nr_idle--; |
1853 | 1853 | ||
1854 | /* | ||
1855 | * Once WORKER_DIE is set, the kworker may destroy itself at any | ||
1856 | * point. Pin to ensure the task stays until we're done with it. | ||
1857 | */ | ||
1858 | get_task_struct(worker->task); | ||
1859 | |||
1854 | list_del_init(&worker->entry); | 1860 | list_del_init(&worker->entry); |
1855 | worker->flags |= WORKER_DIE; | 1861 | worker->flags |= WORKER_DIE; |
1856 | 1862 | ||
@@ -1859,6 +1865,7 @@ static void destroy_worker(struct worker *worker) | |||
1859 | spin_unlock_irq(&pool->lock); | 1865 | spin_unlock_irq(&pool->lock); |
1860 | 1866 | ||
1861 | kthread_stop(worker->task); | 1867 | kthread_stop(worker->task); |
1868 | put_task_struct(worker->task); | ||
1862 | kfree(worker); | 1869 | kfree(worker); |
1863 | 1870 | ||
1864 | spin_lock_irq(&pool->lock); | 1871 | spin_lock_irq(&pool->lock); |