diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/auditsc.c | 2 | ||||
-rw-r--r-- | kernel/cgroup.c | 60 | ||||
-rw-r--r-- | kernel/irq/Kconfig | 1 | ||||
-rw-r--r-- | kernel/irq/devres.c | 45 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 1 | ||||
-rw-r--r-- | kernel/kmod.c | 2 | ||||
-rw-r--r-- | kernel/power/console.c | 1 | ||||
-rw-r--r-- | kernel/printk/printk.c | 2 | ||||
-rw-r--r-- | kernel/time/jiffies.c | 6 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 1 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 7 | ||||
-rw-r--r-- | kernel/user_namespace.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 7 |
13 files changed, 104 insertions, 33 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 10176cd5956a..7aef2f4b6c64 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name) | |||
1719 | struct audit_context *context = current->audit_context; | 1719 | struct audit_context *context = current->audit_context; |
1720 | 1720 | ||
1721 | BUG_ON(!context); | 1721 | BUG_ON(!context); |
1722 | if (!context->in_syscall) { | 1722 | if (!name->aname || !context->in_syscall) { |
1723 | #if AUDIT_DEBUG == 2 | 1723 | #if AUDIT_DEBUG == 2 |
1724 | printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", | 1724 | printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", |
1725 | __FILE__, __LINE__, context->serial, name); | 1725 | __FILE__, __LINE__, context->serial, name); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e2f46ba37f72..105f273b6f86 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -886,7 +886,9 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
886 | * per-subsystem and moved to css->id so that lookups are | 886 | * per-subsystem and moved to css->id so that lookups are |
887 | * successful until the target css is released. | 887 | * successful until the target css is released. |
888 | */ | 888 | */ |
889 | mutex_lock(&cgroup_mutex); | ||
889 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | 890 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); |
891 | mutex_unlock(&cgroup_mutex); | ||
890 | cgrp->id = -1; | 892 | cgrp->id = -1; |
891 | 893 | ||
892 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); | 894 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
@@ -1566,10 +1568,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, | |||
1566 | mutex_lock(&cgroup_mutex); | 1568 | mutex_lock(&cgroup_mutex); |
1567 | mutex_lock(&cgroup_root_mutex); | 1569 | mutex_lock(&cgroup_root_mutex); |
1568 | 1570 | ||
1569 | root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp, | 1571 | ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); |
1570 | 0, 1, GFP_KERNEL); | 1572 | if (ret < 0) |
1571 | if (root_cgrp->id < 0) | ||
1572 | goto unlock_drop; | 1573 | goto unlock_drop; |
1574 | root_cgrp->id = ret; | ||
1573 | 1575 | ||
1574 | /* Check for name clashes with existing mounts */ | 1576 | /* Check for name clashes with existing mounts */ |
1575 | ret = -EBUSY; | 1577 | ret = -EBUSY; |
@@ -2763,10 +2765,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
2763 | */ | 2765 | */ |
2764 | update_before = cgroup_serial_nr_next; | 2766 | update_before = cgroup_serial_nr_next; |
2765 | 2767 | ||
2766 | mutex_unlock(&cgroup_mutex); | ||
2767 | |||
2768 | /* add/rm files for all cgroups created before */ | 2768 | /* add/rm files for all cgroups created before */ |
2769 | rcu_read_lock(); | ||
2770 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { | 2769 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { |
2771 | struct cgroup *cgrp = css->cgroup; | 2770 | struct cgroup *cgrp = css->cgroup; |
2772 | 2771 | ||
@@ -2775,23 +2774,19 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
2775 | 2774 | ||
2776 | inode = cgrp->dentry->d_inode; | 2775 | inode = cgrp->dentry->d_inode; |
2777 | dget(cgrp->dentry); | 2776 | dget(cgrp->dentry); |
2778 | rcu_read_unlock(); | ||
2779 | |||
2780 | dput(prev); | 2777 | dput(prev); |
2781 | prev = cgrp->dentry; | 2778 | prev = cgrp->dentry; |
2782 | 2779 | ||
2780 | mutex_unlock(&cgroup_mutex); | ||
2783 | mutex_lock(&inode->i_mutex); | 2781 | mutex_lock(&inode->i_mutex); |
2784 | mutex_lock(&cgroup_mutex); | 2782 | mutex_lock(&cgroup_mutex); |
2785 | if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) | 2783 | if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) |
2786 | ret = cgroup_addrm_files(cgrp, cfts, is_add); | 2784 | ret = cgroup_addrm_files(cgrp, cfts, is_add); |
2787 | mutex_unlock(&cgroup_mutex); | ||
2788 | mutex_unlock(&inode->i_mutex); | 2785 | mutex_unlock(&inode->i_mutex); |
2789 | |||
2790 | rcu_read_lock(); | ||
2791 | if (ret) | 2786 | if (ret) |
2792 | break; | 2787 | break; |
2793 | } | 2788 | } |
2794 | rcu_read_unlock(); | 2789 | mutex_unlock(&cgroup_mutex); |
2795 | dput(prev); | 2790 | dput(prev); |
2796 | deactivate_super(sb); | 2791 | deactivate_super(sb); |
2797 | return ret; | 2792 | return ret; |
@@ -2910,9 +2905,14 @@ static void cgroup_enable_task_cg_lists(void) | |||
2910 | * We should check if the process is exiting, otherwise | 2905 | * We should check if the process is exiting, otherwise |
2911 | * it will race with cgroup_exit() in that the list | 2906 | * it will race with cgroup_exit() in that the list |
2912 | * entry won't be deleted though the process has exited. | 2907 | * entry won't be deleted though the process has exited. |
2908 | * Do it while holding siglock so that we don't end up | ||
2909 | * racing against cgroup_exit(). | ||
2913 | */ | 2910 | */ |
2911 | spin_lock_irq(&p->sighand->siglock); | ||
2914 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) | 2912 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) |
2915 | list_add(&p->cg_list, &task_css_set(p)->tasks); | 2913 | list_add(&p->cg_list, &task_css_set(p)->tasks); |
2914 | spin_unlock_irq(&p->sighand->siglock); | ||
2915 | |||
2916 | task_unlock(p); | 2916 | task_unlock(p); |
2917 | } while_each_thread(g, p); | 2917 | } while_each_thread(g, p); |
2918 | read_unlock(&tasklist_lock); | 2918 | read_unlock(&tasklist_lock); |
@@ -4158,7 +4158,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4158 | struct cgroup *cgrp; | 4158 | struct cgroup *cgrp; |
4159 | struct cgroup_name *name; | 4159 | struct cgroup_name *name; |
4160 | struct cgroupfs_root *root = parent->root; | 4160 | struct cgroupfs_root *root = parent->root; |
4161 | int ssid, err = 0; | 4161 | int ssid, err; |
4162 | struct cgroup_subsys *ss; | 4162 | struct cgroup_subsys *ss; |
4163 | struct super_block *sb = root->sb; | 4163 | struct super_block *sb = root->sb; |
4164 | 4164 | ||
@@ -4168,19 +4168,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4168 | return -ENOMEM; | 4168 | return -ENOMEM; |
4169 | 4169 | ||
4170 | name = cgroup_alloc_name(dentry); | 4170 | name = cgroup_alloc_name(dentry); |
4171 | if (!name) | 4171 | if (!name) { |
4172 | err = -ENOMEM; | ||
4172 | goto err_free_cgrp; | 4173 | goto err_free_cgrp; |
4174 | } | ||
4173 | rcu_assign_pointer(cgrp->name, name); | 4175 | rcu_assign_pointer(cgrp->name, name); |
4174 | 4176 | ||
4175 | /* | 4177 | /* |
4176 | * Temporarily set the pointer to NULL, so idr_find() won't return | ||
4177 | * a half-baked cgroup. | ||
4178 | */ | ||
4179 | cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); | ||
4180 | if (cgrp->id < 0) | ||
4181 | goto err_free_name; | ||
4182 | |||
4183 | /* | ||
4184 | * Only live parents can have children. Note that the liveliness | 4178 | * Only live parents can have children. Note that the liveliness |
4185 | * check isn't strictly necessary because cgroup_mkdir() and | 4179 | * check isn't strictly necessary because cgroup_mkdir() and |
4186 | * cgroup_rmdir() are fully synchronized by i_mutex; however, do it | 4180 | * cgroup_rmdir() are fully synchronized by i_mutex; however, do it |
@@ -4189,7 +4183,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4189 | */ | 4183 | */ |
4190 | if (!cgroup_lock_live_group(parent)) { | 4184 | if (!cgroup_lock_live_group(parent)) { |
4191 | err = -ENODEV; | 4185 | err = -ENODEV; |
4192 | goto err_free_id; | 4186 | goto err_free_name; |
4187 | } | ||
4188 | |||
4189 | /* | ||
4190 | * Temporarily set the pointer to NULL, so idr_find() won't return | ||
4191 | * a half-baked cgroup. | ||
4192 | */ | ||
4193 | cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); | ||
4194 | if (cgrp->id < 0) { | ||
4195 | err = -ENOMEM; | ||
4196 | goto err_unlock; | ||
4193 | } | 4197 | } |
4194 | 4198 | ||
4195 | /* Grab a reference on the superblock so the hierarchy doesn't | 4199 | /* Grab a reference on the superblock so the hierarchy doesn't |
@@ -4221,7 +4225,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4221 | */ | 4225 | */ |
4222 | err = cgroup_create_file(dentry, S_IFDIR | mode, sb); | 4226 | err = cgroup_create_file(dentry, S_IFDIR | mode, sb); |
4223 | if (err < 0) | 4227 | if (err < 0) |
4224 | goto err_unlock; | 4228 | goto err_free_id; |
4225 | lockdep_assert_held(&dentry->d_inode->i_mutex); | 4229 | lockdep_assert_held(&dentry->d_inode->i_mutex); |
4226 | 4230 | ||
4227 | cgrp->serial_nr = cgroup_serial_nr_next++; | 4231 | cgrp->serial_nr = cgroup_serial_nr_next++; |
@@ -4257,12 +4261,12 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4257 | 4261 | ||
4258 | return 0; | 4262 | return 0; |
4259 | 4263 | ||
4260 | err_unlock: | ||
4261 | mutex_unlock(&cgroup_mutex); | ||
4262 | /* Release the reference count that we took on the superblock */ | ||
4263 | deactivate_super(sb); | ||
4264 | err_free_id: | 4264 | err_free_id: |
4265 | idr_remove(&root->cgroup_idr, cgrp->id); | 4265 | idr_remove(&root->cgroup_idr, cgrp->id); |
4266 | /* Release the reference count that we took on the superblock */ | ||
4267 | deactivate_super(sb); | ||
4268 | err_unlock: | ||
4269 | mutex_unlock(&cgroup_mutex); | ||
4266 | err_free_name: | 4270 | err_free_name: |
4267 | kfree(rcu_dereference_raw(cgrp->name)); | 4271 | kfree(rcu_dereference_raw(cgrp->name)); |
4268 | err_free_cgrp: | 4272 | err_free_cgrp: |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 4a1fef09f658..07cbdfea9ae2 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER | |||
40 | # Generic configurable interrupt chip implementation | 40 | # Generic configurable interrupt chip implementation |
41 | config GENERIC_IRQ_CHIP | 41 | config GENERIC_IRQ_CHIP |
42 | bool | 42 | bool |
43 | select IRQ_DOMAIN | ||
43 | 44 | ||
44 | # Generic irq_domain hw <--> linux irq number translation | 45 | # Generic irq_domain hw <--> linux irq number translation |
45 | config IRQ_DOMAIN | 46 | config IRQ_DOMAIN |
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index bd8e788d71e0..1ef0606797c9 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c | |||
@@ -73,6 +73,51 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq, | |||
73 | EXPORT_SYMBOL(devm_request_threaded_irq); | 73 | EXPORT_SYMBOL(devm_request_threaded_irq); |
74 | 74 | ||
75 | /** | 75 | /** |
76 | * devm_request_any_context_irq - allocate an interrupt line for a managed device | ||
77 | * @dev: device to request interrupt for | ||
78 | * @irq: Interrupt line to allocate | ||
79 | * @handler: Function to be called when the IRQ occurs | ||
80 | * @thread_fn: function to be called in a threaded interrupt context. NULL | ||
81 | * for devices which handle everything in @handler | ||
82 | * @irqflags: Interrupt type flags | ||
83 | * @devname: An ascii name for the claiming device | ||
84 | * @dev_id: A cookie passed back to the handler function | ||
85 | * | ||
86 | * Except for the extra @dev argument, this function takes the | ||
87 | * same arguments and performs the same function as | ||
88 | * request_any_context_irq(). IRQs requested with this function will be | ||
89 | * automatically freed on driver detach. | ||
90 | * | ||
91 | * If an IRQ allocated with this function needs to be freed | ||
92 | * separately, devm_free_irq() must be used. | ||
93 | */ | ||
94 | int devm_request_any_context_irq(struct device *dev, unsigned int irq, | ||
95 | irq_handler_t handler, unsigned long irqflags, | ||
96 | const char *devname, void *dev_id) | ||
97 | { | ||
98 | struct irq_devres *dr; | ||
99 | int rc; | ||
100 | |||
101 | dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), | ||
102 | GFP_KERNEL); | ||
103 | if (!dr) | ||
104 | return -ENOMEM; | ||
105 | |||
106 | rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id); | ||
107 | if (rc) { | ||
108 | devres_free(dr); | ||
109 | return rc; | ||
110 | } | ||
111 | |||
112 | dr->irq = irq; | ||
113 | dr->dev_id = dev_id; | ||
114 | devres_add(dev, dr); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | EXPORT_SYMBOL(devm_request_any_context_irq); | ||
119 | |||
120 | /** | ||
76 | * devm_free_irq - free an interrupt | 121 | * devm_free_irq - free an interrupt |
77 | * @dev: device to free interrupt for | 122 | * @dev: device to free interrupt for |
78 | * @irq: Interrupt line to free | 123 | * @irq: Interrupt line to free |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 192a302d6cfd..8ab8e9390297 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
274 | { | 274 | { |
275 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 275 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
276 | } | 276 | } |
277 | EXPORT_SYMBOL(irq_to_desc); | ||
277 | 278 | ||
278 | static void free_desc(unsigned int irq) | 279 | static void free_desc(unsigned int irq) |
279 | { | 280 | { |
diff --git a/kernel/kmod.c b/kernel/kmod.c index b086006c59e7..6b375af4958d 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data) | |||
239 | 239 | ||
240 | commit_creds(new); | 240 | commit_creds(new); |
241 | 241 | ||
242 | retval = do_execve(sub_info->path, | 242 | retval = do_execve(getname_kernel(sub_info->path), |
243 | (const char __user *const __user *)sub_info->argv, | 243 | (const char __user *const __user *)sub_info->argv, |
244 | (const char __user *const __user *)sub_info->envp); | 244 | (const char __user *const __user *)sub_info->envp); |
245 | if (!retval) | 245 | if (!retval) |
diff --git a/kernel/power/console.c b/kernel/power/console.c index eacb8bd8cab4..aba9c545a0e3 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kbd_kern.h> | 9 | #include <linux/kbd_kern.h> |
10 | #include <linux/vt.h> | 10 | #include <linux/vt.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | ||
12 | #include "power.h" | 13 | #include "power.h" |
13 | 14 | ||
14 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) | 15 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b1d255f04135..4dae9cbe9259 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1076 | next_seq = log_next_seq; | 1076 | next_seq = log_next_seq; |
1077 | 1077 | ||
1078 | len = 0; | 1078 | len = 0; |
1079 | prev = 0; | ||
1080 | while (len >= 0 && seq < next_seq) { | 1079 | while (len >= 0 && seq < next_seq) { |
1081 | struct printk_log *msg = log_from_idx(idx); | 1080 | struct printk_log *msg = log_from_idx(idx); |
1082 | int textlen; | 1081 | int textlen; |
@@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
2788 | next_idx = idx; | 2787 | next_idx = idx; |
2789 | 2788 | ||
2790 | l = 0; | 2789 | l = 0; |
2791 | prev = 0; | ||
2792 | while (seq < dumper->next_seq) { | 2790 | while (seq < dumper->next_seq) { |
2793 | struct printk_log *msg = log_from_idx(idx); | 2791 | struct printk_log *msg = log_from_idx(idx); |
2794 | 2792 | ||
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 7a925ba456fb..a6a5bf53e86d 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
@@ -51,7 +51,13 @@ | |||
51 | * HZ shrinks, so values greater than 8 overflow 32bits when | 51 | * HZ shrinks, so values greater than 8 overflow 32bits when |
52 | * HZ=100. | 52 | * HZ=100. |
53 | */ | 53 | */ |
54 | #if HZ < 34 | ||
55 | #define JIFFIES_SHIFT 6 | ||
56 | #elif HZ < 67 | ||
57 | #define JIFFIES_SHIFT 7 | ||
58 | #else | ||
54 | #define JIFFIES_SHIFT 8 | 59 | #define JIFFIES_SHIFT 8 |
60 | #endif | ||
55 | 61 | ||
56 | static cycle_t jiffies_read(struct clocksource *cs) | 62 | static cycle_t jiffies_read(struct clocksource *cs) |
57 | { | 63 | { |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 43780ab5e279..98977a57ac72 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -756,6 +756,7 @@ out: | |||
756 | static void tick_broadcast_clear_oneshot(int cpu) | 756 | static void tick_broadcast_clear_oneshot(int cpu) |
757 | { | 757 | { |
758 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | 758 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
759 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | ||
759 | } | 760 | } |
760 | 761 | ||
761 | static void tick_broadcast_init_next_event(struct cpumask *mask, | 762 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 294b8a271a04..fc4da2d97f9b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2397,6 +2397,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
2397 | write &= RB_WRITE_MASK; | 2397 | write &= RB_WRITE_MASK; |
2398 | tail = write - length; | 2398 | tail = write - length; |
2399 | 2399 | ||
2400 | /* | ||
2401 | * If this is the first commit on the page, then it has the same | ||
2402 | * timestamp as the page itself. | ||
2403 | */ | ||
2404 | if (!tail) | ||
2405 | delta = 0; | ||
2406 | |||
2400 | /* See if we shot pass the end of this buffer page */ | 2407 | /* See if we shot pass the end of this buffer page */ |
2401 | if (unlikely(write > BUF_PAGE_SIZE)) | 2408 | if (unlikely(write > BUF_PAGE_SIZE)) |
2402 | return rb_move_tail(cpu_buffer, length, tail, | 2409 | return rb_move_tail(cpu_buffer, length, tail, |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 240fb62cf394..dd06439b9c84 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -225,7 +225,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id) | |||
225 | * | 225 | * |
226 | * When there is no mapping defined for the user-namespace uid | 226 | * When there is no mapping defined for the user-namespace uid |
227 | * pair INVALID_UID is returned. Callers are expected to test | 227 | * pair INVALID_UID is returned. Callers are expected to test |
228 | * for and handle handle INVALID_UID being returned. INVALID_UID | 228 | * for and handle INVALID_UID being returned. INVALID_UID |
229 | * may be tested for using uid_valid(). | 229 | * may be tested for using uid_valid(). |
230 | */ | 230 | */ |
231 | kuid_t make_kuid(struct user_namespace *ns, uid_t uid) | 231 | kuid_t make_kuid(struct user_namespace *ns, uid_t uid) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 82ef9f3b7473..193e977a10ea 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1851,6 +1851,12 @@ static void destroy_worker(struct worker *worker) | |||
1851 | if (worker->flags & WORKER_IDLE) | 1851 | if (worker->flags & WORKER_IDLE) |
1852 | pool->nr_idle--; | 1852 | pool->nr_idle--; |
1853 | 1853 | ||
1854 | /* | ||
1855 | * Once WORKER_DIE is set, the kworker may destroy itself at any | ||
1856 | * point. Pin to ensure the task stays until we're done with it. | ||
1857 | */ | ||
1858 | get_task_struct(worker->task); | ||
1859 | |||
1854 | list_del_init(&worker->entry); | 1860 | list_del_init(&worker->entry); |
1855 | worker->flags |= WORKER_DIE; | 1861 | worker->flags |= WORKER_DIE; |
1856 | 1862 | ||
@@ -1859,6 +1865,7 @@ static void destroy_worker(struct worker *worker) | |||
1859 | spin_unlock_irq(&pool->lock); | 1865 | spin_unlock_irq(&pool->lock); |
1860 | 1866 | ||
1861 | kthread_stop(worker->task); | 1867 | kthread_stop(worker->task); |
1868 | put_task_struct(worker->task); | ||
1862 | kfree(worker); | 1869 | kfree(worker); |
1863 | 1870 | ||
1864 | spin_lock_irq(&pool->lock); | 1871 | spin_lock_irq(&pool->lock); |