diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/audit.c | 2 | ||||
| -rw-r--r-- | kernel/cgroup.c | 23 | ||||
| -rw-r--r-- | kernel/cpuset.c | 2 | ||||
| -rw-r--r-- | kernel/cred.c | 25 | ||||
| -rw-r--r-- | kernel/debug/debug_core.c | 2 | ||||
| -rw-r--r-- | kernel/debug/gdbstub.c | 2 | ||||
| -rw-r--r-- | kernel/module.c | 4 | ||||
| -rw-r--r-- | kernel/padata.c | 755 | ||||
| -rw-r--r-- | kernel/pm_qos_params.c | 215 | ||||
| -rw-r--r-- | kernel/power/hibernate.c | 26 | ||||
| -rw-r--r-- | kernel/power/main.c | 55 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 2 | ||||
| -rw-r--r-- | kernel/power/suspend.c | 13 | ||||
| -rw-r--r-- | kernel/power/swap.c | 6 | ||||
| -rw-r--r-- | kernel/printk.c | 33 | ||||
| -rw-r--r-- | kernel/signal.c | 9 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 2 | ||||
| -rw-r--r-- | kernel/timer.c | 13 | ||||
| -rw-r--r-- | kernel/user_namespace.c | 44 |
19 files changed, 905 insertions, 328 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index c71bd26631a2..8296aa516c5a 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -407,7 +407,7 @@ static void kauditd_send_skb(struct sk_buff *skb) | |||
| 407 | audit_hold_skb(skb); | 407 | audit_hold_skb(skb); |
| 408 | } else | 408 | } else |
| 409 | /* drop the extra reference if sent ok */ | 409 | /* drop the extra reference if sent ok */ |
| 410 | kfree_skb(skb); | 410 | consume_skb(skb); |
| 411 | } | 411 | } |
| 412 | 412 | ||
| 413 | static int kauditd_thread(void *dummy) | 413 | static int kauditd_thread(void *dummy) |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3ac6f5b0a64b..a8ce09954404 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1788,6 +1788,29 @@ out: | |||
| 1788 | return retval; | 1788 | return retval; |
| 1789 | } | 1789 | } |
| 1790 | 1790 | ||
| 1791 | /** | ||
| 1792 | * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup | ||
| 1793 | * @tsk: the task to be attached | ||
| 1794 | */ | ||
| 1795 | int cgroup_attach_task_current_cg(struct task_struct *tsk) | ||
| 1796 | { | ||
| 1797 | struct cgroupfs_root *root; | ||
| 1798 | struct cgroup *cur_cg; | ||
| 1799 | int retval = 0; | ||
| 1800 | |||
| 1801 | cgroup_lock(); | ||
| 1802 | for_each_active_root(root) { | ||
| 1803 | cur_cg = task_cgroup_from_root(current, root); | ||
| 1804 | retval = cgroup_attach_task(cur_cg, tsk); | ||
| 1805 | if (retval) | ||
| 1806 | break; | ||
| 1807 | } | ||
| 1808 | cgroup_unlock(); | ||
| 1809 | |||
| 1810 | return retval; | ||
| 1811 | } | ||
| 1812 | EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); | ||
| 1813 | |||
| 1791 | /* | 1814 | /* |
| 1792 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex | 1815 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex |
| 1793 | * held. May take task_lock of task | 1816 | * held. May take task_lock of task |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 02b9611eadde..7cb37d86a005 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -105,7 +105,7 @@ struct cpuset { | |||
| 105 | /* for custom sched domain */ | 105 | /* for custom sched domain */ |
| 106 | int relax_domain_level; | 106 | int relax_domain_level; |
| 107 | 107 | ||
| 108 | /* used for walking a cpuset heirarchy */ | 108 | /* used for walking a cpuset hierarchy */ |
| 109 | struct list_head stack_list; | 109 | struct list_head stack_list; |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
diff --git a/kernel/cred.c b/kernel/cred.c index a2d5504fbcc2..60bc8b1e32e6 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
| @@ -209,6 +209,31 @@ void exit_creds(struct task_struct *tsk) | |||
| 209 | } | 209 | } |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | /** | ||
| 213 | * get_task_cred - Get another task's objective credentials | ||
| 214 | * @task: The task to query | ||
| 215 | * | ||
| 216 | * Get the objective credentials of a task, pinning them so that they can't go | ||
| 217 | * away. Accessing a task's credentials directly is not permitted. | ||
| 218 | * | ||
| 219 | * The caller must also make sure task doesn't get deleted, either by holding a | ||
| 220 | * ref on task or by holding tasklist_lock to prevent it from being unlinked. | ||
| 221 | */ | ||
| 222 | const struct cred *get_task_cred(struct task_struct *task) | ||
| 223 | { | ||
| 224 | const struct cred *cred; | ||
| 225 | |||
| 226 | rcu_read_lock(); | ||
| 227 | |||
| 228 | do { | ||
| 229 | cred = __task_cred((task)); | ||
| 230 | BUG_ON(!cred); | ||
| 231 | } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage)); | ||
| 232 | |||
| 233 | rcu_read_unlock(); | ||
| 234 | return cred; | ||
| 235 | } | ||
| 236 | |||
| 212 | /* | 237 | /* |
| 213 | * Allocate blank credentials, such that the credentials can be filled in at a | 238 | * Allocate blank credentials, such that the credentials can be filled in at a |
| 214 | * later date without risk of ENOMEM. | 239 | * later date without risk of ENOMEM. |
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 8bc5eeffec8a..51d14fe87648 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * Copyright (C) 2000-2001 VERITAS Software Corporation. | 6 | * Copyright (C) 2000-2001 VERITAS Software Corporation. |
| 7 | * Copyright (C) 2002-2004 Timesys Corporation | 7 | * Copyright (C) 2002-2004 Timesys Corporation |
| 8 | * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> | 8 | * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> |
| 9 | * Copyright (C) 2004 Pavel Machek <pavel@suse.cz> | 9 | * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz> |
| 10 | * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> | 10 | * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> |
| 11 | * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. | 11 | * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. |
| 12 | * Copyright (C) 2005-2009 Wind River Systems, Inc. | 12 | * Copyright (C) 2005-2009 Wind River Systems, Inc. |
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index e8fd6868682d..6e81fd59566b 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * Copyright (C) 2000-2001 VERITAS Software Corporation. | 6 | * Copyright (C) 2000-2001 VERITAS Software Corporation. |
| 7 | * Copyright (C) 2002-2004 Timesys Corporation | 7 | * Copyright (C) 2002-2004 Timesys Corporation |
| 8 | * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> | 8 | * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> |
| 9 | * Copyright (C) 2004 Pavel Machek <pavel@suse.cz> | 9 | * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz> |
| 10 | * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> | 10 | * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> |
| 11 | * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. | 11 | * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. |
| 12 | * Copyright (C) 2005-2009 Wind River Systems, Inc. | 12 | * Copyright (C) 2005-2009 Wind River Systems, Inc. |
diff --git a/kernel/module.c b/kernel/module.c index 5d2d28197c82..6c562828c85c 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -787,7 +787,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | |||
| 787 | 787 | ||
| 788 | /* Store the name of the last unloaded module for diagnostic purposes */ | 788 | /* Store the name of the last unloaded module for diagnostic purposes */ |
| 789 | strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); | 789 | strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); |
| 790 | ddebug_remove_module(mod->name); | ||
| 791 | 790 | ||
| 792 | free_module(mod); | 791 | free_module(mod); |
| 793 | return 0; | 792 | return 0; |
| @@ -1550,6 +1549,9 @@ static void free_module(struct module *mod) | |||
| 1550 | remove_sect_attrs(mod); | 1549 | remove_sect_attrs(mod); |
| 1551 | mod_kobject_remove(mod); | 1550 | mod_kobject_remove(mod); |
| 1552 | 1551 | ||
| 1552 | /* Remove dynamic debug info */ | ||
| 1553 | ddebug_remove_module(mod->name); | ||
| 1554 | |||
| 1553 | /* Arch-specific cleanup. */ | 1555 | /* Arch-specific cleanup. */ |
| 1554 | module_arch_cleanup(mod); | 1556 | module_arch_cleanup(mod); |
| 1555 | 1557 | ||
diff --git a/kernel/padata.c b/kernel/padata.c index fdd8ae609ce3..751019415d23 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
| @@ -26,18 +26,19 @@ | |||
| 26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
| 27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/sysfs.h> | ||
| 29 | #include <linux/rcupdate.h> | 30 | #include <linux/rcupdate.h> |
| 30 | 31 | ||
| 31 | #define MAX_SEQ_NR INT_MAX - NR_CPUS | 32 | #define MAX_SEQ_NR (INT_MAX - NR_CPUS) |
| 32 | #define MAX_OBJ_NUM 1000 | 33 | #define MAX_OBJ_NUM 1000 |
| 33 | 34 | ||
| 34 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | 35 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) |
| 35 | { | 36 | { |
| 36 | int cpu, target_cpu; | 37 | int cpu, target_cpu; |
| 37 | 38 | ||
| 38 | target_cpu = cpumask_first(pd->cpumask); | 39 | target_cpu = cpumask_first(pd->cpumask.pcpu); |
| 39 | for (cpu = 0; cpu < cpu_index; cpu++) | 40 | for (cpu = 0; cpu < cpu_index; cpu++) |
| 40 | target_cpu = cpumask_next(target_cpu, pd->cpumask); | 41 | target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
| 41 | 42 | ||
| 42 | return target_cpu; | 43 | return target_cpu; |
| 43 | } | 44 | } |
| @@ -53,26 +54,27 @@ static int padata_cpu_hash(struct padata_priv *padata) | |||
| 53 | * Hash the sequence numbers to the cpus by taking | 54 | * Hash the sequence numbers to the cpus by taking |
| 54 | * seq_nr mod. number of cpus in use. | 55 | * seq_nr mod. number of cpus in use. |
| 55 | */ | 56 | */ |
| 56 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask); | 57 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu); |
| 57 | 58 | ||
| 58 | return padata_index_to_cpu(pd, cpu_index); | 59 | return padata_index_to_cpu(pd, cpu_index); |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 61 | static void padata_parallel_worker(struct work_struct *work) | 62 | static void padata_parallel_worker(struct work_struct *parallel_work) |
| 62 | { | 63 | { |
| 63 | struct padata_queue *queue; | 64 | struct padata_parallel_queue *pqueue; |
| 64 | struct parallel_data *pd; | 65 | struct parallel_data *pd; |
| 65 | struct padata_instance *pinst; | 66 | struct padata_instance *pinst; |
| 66 | LIST_HEAD(local_list); | 67 | LIST_HEAD(local_list); |
| 67 | 68 | ||
| 68 | local_bh_disable(); | 69 | local_bh_disable(); |
| 69 | queue = container_of(work, struct padata_queue, pwork); | 70 | pqueue = container_of(parallel_work, |
| 70 | pd = queue->pd; | 71 | struct padata_parallel_queue, work); |
| 72 | pd = pqueue->pd; | ||
| 71 | pinst = pd->pinst; | 73 | pinst = pd->pinst; |
| 72 | 74 | ||
| 73 | spin_lock(&queue->parallel.lock); | 75 | spin_lock(&pqueue->parallel.lock); |
| 74 | list_replace_init(&queue->parallel.list, &local_list); | 76 | list_replace_init(&pqueue->parallel.list, &local_list); |
| 75 | spin_unlock(&queue->parallel.lock); | 77 | spin_unlock(&pqueue->parallel.lock); |
| 76 | 78 | ||
| 77 | while (!list_empty(&local_list)) { | 79 | while (!list_empty(&local_list)) { |
| 78 | struct padata_priv *padata; | 80 | struct padata_priv *padata; |
| @@ -94,7 +96,7 @@ static void padata_parallel_worker(struct work_struct *work) | |||
| 94 | * @pinst: padata instance | 96 | * @pinst: padata instance |
| 95 | * @padata: object to be parallelized | 97 | * @padata: object to be parallelized |
| 96 | * @cb_cpu: cpu the serialization callback function will run on, | 98 | * @cb_cpu: cpu the serialization callback function will run on, |
| 97 | * must be in the cpumask of padata. | 99 | * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). |
| 98 | * | 100 | * |
| 99 | * The parallelization callback function will run with BHs off. | 101 | * The parallelization callback function will run with BHs off. |
| 100 | * Note: Every object which is parallelized by padata_do_parallel | 102 | * Note: Every object which is parallelized by padata_do_parallel |
| @@ -104,15 +106,18 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
| 104 | struct padata_priv *padata, int cb_cpu) | 106 | struct padata_priv *padata, int cb_cpu) |
| 105 | { | 107 | { |
| 106 | int target_cpu, err; | 108 | int target_cpu, err; |
| 107 | struct padata_queue *queue; | 109 | struct padata_parallel_queue *queue; |
| 108 | struct parallel_data *pd; | 110 | struct parallel_data *pd; |
| 109 | 111 | ||
| 110 | rcu_read_lock_bh(); | 112 | rcu_read_lock_bh(); |
| 111 | 113 | ||
| 112 | pd = rcu_dereference(pinst->pd); | 114 | pd = rcu_dereference(pinst->pd); |
| 113 | 115 | ||
| 114 | err = 0; | 116 | err = -EINVAL; |
| 115 | if (!(pinst->flags & PADATA_INIT)) | 117 | if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) |
| 118 | goto out; | ||
| 119 | |||
| 120 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) | ||
| 116 | goto out; | 121 | goto out; |
| 117 | 122 | ||
| 118 | err = -EBUSY; | 123 | err = -EBUSY; |
| @@ -122,11 +127,7 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
| 122 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) | 127 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) |
| 123 | goto out; | 128 | goto out; |
| 124 | 129 | ||
| 125 | err = -EINVAL; | 130 | err = 0; |
| 126 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) | ||
| 127 | goto out; | ||
| 128 | |||
| 129 | err = -EINPROGRESS; | ||
| 130 | atomic_inc(&pd->refcnt); | 131 | atomic_inc(&pd->refcnt); |
| 131 | padata->pd = pd; | 132 | padata->pd = pd; |
| 132 | padata->cb_cpu = cb_cpu; | 133 | padata->cb_cpu = cb_cpu; |
| @@ -137,13 +138,13 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
| 137 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); | 138 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); |
| 138 | 139 | ||
| 139 | target_cpu = padata_cpu_hash(padata); | 140 | target_cpu = padata_cpu_hash(padata); |
| 140 | queue = per_cpu_ptr(pd->queue, target_cpu); | 141 | queue = per_cpu_ptr(pd->pqueue, target_cpu); |
| 141 | 142 | ||
| 142 | spin_lock(&queue->parallel.lock); | 143 | spin_lock(&queue->parallel.lock); |
| 143 | list_add_tail(&padata->list, &queue->parallel.list); | 144 | list_add_tail(&padata->list, &queue->parallel.list); |
| 144 | spin_unlock(&queue->parallel.lock); | 145 | spin_unlock(&queue->parallel.lock); |
| 145 | 146 | ||
| 146 | queue_work_on(target_cpu, pinst->wq, &queue->pwork); | 147 | queue_work_on(target_cpu, pinst->wq, &queue->work); |
| 147 | 148 | ||
| 148 | out: | 149 | out: |
| 149 | rcu_read_unlock_bh(); | 150 | rcu_read_unlock_bh(); |
| @@ -171,84 +172,52 @@ EXPORT_SYMBOL(padata_do_parallel); | |||
| 171 | */ | 172 | */ |
| 172 | static struct padata_priv *padata_get_next(struct parallel_data *pd) | 173 | static struct padata_priv *padata_get_next(struct parallel_data *pd) |
| 173 | { | 174 | { |
| 174 | int cpu, num_cpus, empty, calc_seq_nr; | 175 | int cpu, num_cpus; |
| 175 | int seq_nr, next_nr, overrun, next_overrun; | 176 | int next_nr, next_index; |
| 176 | struct padata_queue *queue, *next_queue; | 177 | struct padata_parallel_queue *queue, *next_queue; |
| 177 | struct padata_priv *padata; | 178 | struct padata_priv *padata; |
| 178 | struct padata_list *reorder; | 179 | struct padata_list *reorder; |
| 179 | 180 | ||
| 180 | empty = 0; | 181 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
| 181 | next_nr = -1; | ||
| 182 | next_overrun = 0; | ||
| 183 | next_queue = NULL; | ||
| 184 | |||
| 185 | num_cpus = cpumask_weight(pd->cpumask); | ||
| 186 | |||
| 187 | for_each_cpu(cpu, pd->cpumask) { | ||
| 188 | queue = per_cpu_ptr(pd->queue, cpu); | ||
| 189 | reorder = &queue->reorder; | ||
| 190 | |||
| 191 | /* | ||
| 192 | * Calculate the seq_nr of the object that should be | ||
| 193 | * next in this reorder queue. | ||
| 194 | */ | ||
| 195 | overrun = 0; | ||
| 196 | calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus) | ||
| 197 | + queue->cpu_index; | ||
| 198 | 182 | ||
| 199 | if (unlikely(calc_seq_nr > pd->max_seq_nr)) { | 183 | /* |
| 200 | calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1; | 184 | * Calculate the percpu reorder queue and the sequence |
| 201 | overrun = 1; | 185 | * number of the next object. |
| 202 | } | 186 | */ |
| 203 | 187 | next_nr = pd->processed; | |
| 204 | if (!list_empty(&reorder->list)) { | 188 | next_index = next_nr % num_cpus; |
| 205 | padata = list_entry(reorder->list.next, | 189 | cpu = padata_index_to_cpu(pd, next_index); |
| 206 | struct padata_priv, list); | 190 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
| 207 | 191 | ||
| 208 | seq_nr = padata->seq_nr; | 192 | if (unlikely(next_nr > pd->max_seq_nr)) { |
| 209 | BUG_ON(calc_seq_nr != seq_nr); | 193 | next_nr = next_nr - pd->max_seq_nr - 1; |
| 210 | } else { | 194 | next_index = next_nr % num_cpus; |
| 211 | seq_nr = calc_seq_nr; | 195 | cpu = padata_index_to_cpu(pd, next_index); |
| 212 | empty++; | 196 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
| 213 | } | 197 | pd->processed = 0; |
| 214 | |||
| 215 | if (next_nr < 0 || seq_nr < next_nr | ||
| 216 | || (next_overrun && !overrun)) { | ||
| 217 | next_nr = seq_nr; | ||
| 218 | next_overrun = overrun; | ||
| 219 | next_queue = queue; | ||
| 220 | } | ||
| 221 | } | 198 | } |
| 222 | 199 | ||
| 223 | padata = NULL; | 200 | padata = NULL; |
| 224 | 201 | ||
| 225 | if (empty == num_cpus) | ||
| 226 | goto out; | ||
| 227 | |||
| 228 | reorder = &next_queue->reorder; | 202 | reorder = &next_queue->reorder; |
| 229 | 203 | ||
| 230 | if (!list_empty(&reorder->list)) { | 204 | if (!list_empty(&reorder->list)) { |
| 231 | padata = list_entry(reorder->list.next, | 205 | padata = list_entry(reorder->list.next, |
| 232 | struct padata_priv, list); | 206 | struct padata_priv, list); |
| 233 | 207 | ||
| 234 | if (unlikely(next_overrun)) { | 208 | BUG_ON(next_nr != padata->seq_nr); |
| 235 | for_each_cpu(cpu, pd->cpumask) { | ||
| 236 | queue = per_cpu_ptr(pd->queue, cpu); | ||
| 237 | atomic_set(&queue->num_obj, 0); | ||
| 238 | } | ||
| 239 | } | ||
| 240 | 209 | ||
| 241 | spin_lock(&reorder->lock); | 210 | spin_lock(&reorder->lock); |
| 242 | list_del_init(&padata->list); | 211 | list_del_init(&padata->list); |
| 243 | atomic_dec(&pd->reorder_objects); | 212 | atomic_dec(&pd->reorder_objects); |
| 244 | spin_unlock(&reorder->lock); | 213 | spin_unlock(&reorder->lock); |
| 245 | 214 | ||
| 246 | atomic_inc(&next_queue->num_obj); | 215 | pd->processed++; |
| 247 | 216 | ||
| 248 | goto out; | 217 | goto out; |
| 249 | } | 218 | } |
| 250 | 219 | ||
| 251 | queue = per_cpu_ptr(pd->queue, smp_processor_id()); | 220 | queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); |
| 252 | if (queue->cpu_index == next_queue->cpu_index) { | 221 | if (queue->cpu_index == next_queue->cpu_index) { |
| 253 | padata = ERR_PTR(-ENODATA); | 222 | padata = ERR_PTR(-ENODATA); |
| 254 | goto out; | 223 | goto out; |
| @@ -262,7 +231,7 @@ out: | |||
| 262 | static void padata_reorder(struct parallel_data *pd) | 231 | static void padata_reorder(struct parallel_data *pd) |
| 263 | { | 232 | { |
| 264 | struct padata_priv *padata; | 233 | struct padata_priv *padata; |
| 265 | struct padata_queue *queue; | 234 | struct padata_serial_queue *squeue; |
| 266 | struct padata_instance *pinst = pd->pinst; | 235 | struct padata_instance *pinst = pd->pinst; |
| 267 | 236 | ||
| 268 | /* | 237 | /* |
| @@ -301,13 +270,13 @@ static void padata_reorder(struct parallel_data *pd) | |||
| 301 | return; | 270 | return; |
| 302 | } | 271 | } |
| 303 | 272 | ||
| 304 | queue = per_cpu_ptr(pd->queue, padata->cb_cpu); | 273 | squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu); |
| 305 | 274 | ||
| 306 | spin_lock(&queue->serial.lock); | 275 | spin_lock(&squeue->serial.lock); |
| 307 | list_add_tail(&padata->list, &queue->serial.list); | 276 | list_add_tail(&padata->list, &squeue->serial.list); |
| 308 | spin_unlock(&queue->serial.lock); | 277 | spin_unlock(&squeue->serial.lock); |
| 309 | 278 | ||
| 310 | queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork); | 279 | queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work); |
| 311 | } | 280 | } |
| 312 | 281 | ||
| 313 | spin_unlock_bh(&pd->lock); | 282 | spin_unlock_bh(&pd->lock); |
| @@ -333,19 +302,19 @@ static void padata_reorder_timer(unsigned long arg) | |||
| 333 | padata_reorder(pd); | 302 | padata_reorder(pd); |
| 334 | } | 303 | } |
| 335 | 304 | ||
| 336 | static void padata_serial_worker(struct work_struct *work) | 305 | static void padata_serial_worker(struct work_struct *serial_work) |
| 337 | { | 306 | { |
| 338 | struct padata_queue *queue; | 307 | struct padata_serial_queue *squeue; |
| 339 | struct parallel_data *pd; | 308 | struct parallel_data *pd; |
| 340 | LIST_HEAD(local_list); | 309 | LIST_HEAD(local_list); |
| 341 | 310 | ||
| 342 | local_bh_disable(); | 311 | local_bh_disable(); |
| 343 | queue = container_of(work, struct padata_queue, swork); | 312 | squeue = container_of(serial_work, struct padata_serial_queue, work); |
| 344 | pd = queue->pd; | 313 | pd = squeue->pd; |
| 345 | 314 | ||
| 346 | spin_lock(&queue->serial.lock); | 315 | spin_lock(&squeue->serial.lock); |
| 347 | list_replace_init(&queue->serial.list, &local_list); | 316 | list_replace_init(&squeue->serial.list, &local_list); |
| 348 | spin_unlock(&queue->serial.lock); | 317 | spin_unlock(&squeue->serial.lock); |
| 349 | 318 | ||
| 350 | while (!list_empty(&local_list)) { | 319 | while (!list_empty(&local_list)) { |
| 351 | struct padata_priv *padata; | 320 | struct padata_priv *padata; |
| @@ -372,18 +341,18 @@ static void padata_serial_worker(struct work_struct *work) | |||
| 372 | void padata_do_serial(struct padata_priv *padata) | 341 | void padata_do_serial(struct padata_priv *padata) |
| 373 | { | 342 | { |
| 374 | int cpu; | 343 | int cpu; |
| 375 | struct padata_queue *queue; | 344 | struct padata_parallel_queue *pqueue; |
| 376 | struct parallel_data *pd; | 345 | struct parallel_data *pd; |
| 377 | 346 | ||
| 378 | pd = padata->pd; | 347 | pd = padata->pd; |
| 379 | 348 | ||
| 380 | cpu = get_cpu(); | 349 | cpu = get_cpu(); |
| 381 | queue = per_cpu_ptr(pd->queue, cpu); | 350 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
| 382 | 351 | ||
| 383 | spin_lock(&queue->reorder.lock); | 352 | spin_lock(&pqueue->reorder.lock); |
| 384 | atomic_inc(&pd->reorder_objects); | 353 | atomic_inc(&pd->reorder_objects); |
| 385 | list_add_tail(&padata->list, &queue->reorder.list); | 354 | list_add_tail(&padata->list, &pqueue->reorder.list); |
| 386 | spin_unlock(&queue->reorder.lock); | 355 | spin_unlock(&pqueue->reorder.lock); |
| 387 | 356 | ||
| 388 | put_cpu(); | 357 | put_cpu(); |
| 389 | 358 | ||
| @@ -391,52 +360,89 @@ void padata_do_serial(struct padata_priv *padata) | |||
| 391 | } | 360 | } |
| 392 | EXPORT_SYMBOL(padata_do_serial); | 361 | EXPORT_SYMBOL(padata_do_serial); |
| 393 | 362 | ||
| 394 | /* Allocate and initialize the internal cpumask dependend resources. */ | 363 | static int padata_setup_cpumasks(struct parallel_data *pd, |
| 395 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | 364 | const struct cpumask *pcpumask, |
| 396 | const struct cpumask *cpumask) | 365 | const struct cpumask *cbcpumask) |
| 397 | { | 366 | { |
| 398 | int cpu, cpu_index, num_cpus; | 367 | if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) |
| 399 | struct padata_queue *queue; | 368 | return -ENOMEM; |
| 400 | struct parallel_data *pd; | ||
| 401 | |||
| 402 | cpu_index = 0; | ||
| 403 | 369 | ||
| 404 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); | 370 | cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask); |
| 405 | if (!pd) | 371 | if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { |
| 406 | goto err; | 372 | free_cpumask_var(pd->cpumask.cbcpu); |
| 373 | return -ENOMEM; | ||
| 374 | } | ||
| 407 | 375 | ||
| 408 | pd->queue = alloc_percpu(struct padata_queue); | 376 | cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask); |
| 409 | if (!pd->queue) | 377 | return 0; |
| 410 | goto err_free_pd; | 378 | } |
| 411 | 379 | ||
| 412 | if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL)) | 380 | static void __padata_list_init(struct padata_list *pd_list) |
| 413 | goto err_free_queue; | 381 | { |
| 382 | INIT_LIST_HEAD(&pd_list->list); | ||
| 383 | spin_lock_init(&pd_list->lock); | ||
| 384 | } | ||
| 414 | 385 | ||
| 415 | cpumask_and(pd->cpumask, cpumask, cpu_active_mask); | 386 | /* Initialize all percpu queues used by serial workers */ |
| 387 | static void padata_init_squeues(struct parallel_data *pd) | ||
| 388 | { | ||
| 389 | int cpu; | ||
| 390 | struct padata_serial_queue *squeue; | ||
| 416 | 391 | ||
| 417 | for_each_cpu(cpu, pd->cpumask) { | 392 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
| 418 | queue = per_cpu_ptr(pd->queue, cpu); | 393 | squeue = per_cpu_ptr(pd->squeue, cpu); |
| 394 | squeue->pd = pd; | ||
| 395 | __padata_list_init(&squeue->serial); | ||
| 396 | INIT_WORK(&squeue->work, padata_serial_worker); | ||
| 397 | } | ||
| 398 | } | ||
| 419 | 399 | ||
| 420 | queue->pd = pd; | 400 | /* Initialize all percpu queues used by parallel workers */ |
| 401 | static void padata_init_pqueues(struct parallel_data *pd) | ||
| 402 | { | ||
| 403 | int cpu_index, num_cpus, cpu; | ||
| 404 | struct padata_parallel_queue *pqueue; | ||
| 421 | 405 | ||
| 422 | queue->cpu_index = cpu_index; | 406 | cpu_index = 0; |
| 407 | for_each_cpu(cpu, pd->cpumask.pcpu) { | ||
| 408 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | ||
| 409 | pqueue->pd = pd; | ||
| 410 | pqueue->cpu_index = cpu_index; | ||
| 423 | cpu_index++; | 411 | cpu_index++; |
| 424 | 412 | ||
| 425 | INIT_LIST_HEAD(&queue->reorder.list); | 413 | __padata_list_init(&pqueue->reorder); |
| 426 | INIT_LIST_HEAD(&queue->parallel.list); | 414 | __padata_list_init(&pqueue->parallel); |
| 427 | INIT_LIST_HEAD(&queue->serial.list); | 415 | INIT_WORK(&pqueue->work, padata_parallel_worker); |
| 428 | spin_lock_init(&queue->reorder.lock); | 416 | atomic_set(&pqueue->num_obj, 0); |
| 429 | spin_lock_init(&queue->parallel.lock); | ||
| 430 | spin_lock_init(&queue->serial.lock); | ||
| 431 | |||
| 432 | INIT_WORK(&queue->pwork, padata_parallel_worker); | ||
| 433 | INIT_WORK(&queue->swork, padata_serial_worker); | ||
| 434 | atomic_set(&queue->num_obj, 0); | ||
| 435 | } | 417 | } |
| 436 | 418 | ||
| 437 | num_cpus = cpumask_weight(pd->cpumask); | 419 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
| 438 | pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; | 420 | pd->max_seq_nr = num_cpus ? (MAX_SEQ_NR / num_cpus) * num_cpus - 1 : 0; |
| 421 | } | ||
| 422 | |||
| 423 | /* Allocate and initialize the internal cpumask dependend resources. */ | ||
| 424 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | ||
| 425 | const struct cpumask *pcpumask, | ||
| 426 | const struct cpumask *cbcpumask) | ||
| 427 | { | ||
| 428 | struct parallel_data *pd; | ||
| 439 | 429 | ||
| 430 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); | ||
| 431 | if (!pd) | ||
| 432 | goto err; | ||
| 433 | |||
| 434 | pd->pqueue = alloc_percpu(struct padata_parallel_queue); | ||
| 435 | if (!pd->pqueue) | ||
| 436 | goto err_free_pd; | ||
| 437 | |||
| 438 | pd->squeue = alloc_percpu(struct padata_serial_queue); | ||
| 439 | if (!pd->squeue) | ||
| 440 | goto err_free_pqueue; | ||
| 441 | if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) | ||
| 442 | goto err_free_squeue; | ||
| 443 | |||
| 444 | padata_init_pqueues(pd); | ||
| 445 | padata_init_squeues(pd); | ||
| 440 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); | 446 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); |
| 441 | atomic_set(&pd->seq_nr, -1); | 447 | atomic_set(&pd->seq_nr, -1); |
| 442 | atomic_set(&pd->reorder_objects, 0); | 448 | atomic_set(&pd->reorder_objects, 0); |
| @@ -446,8 +452,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | |||
| 446 | 452 | ||
| 447 | return pd; | 453 | return pd; |
| 448 | 454 | ||
| 449 | err_free_queue: | 455 | err_free_squeue: |
| 450 | free_percpu(pd->queue); | 456 | free_percpu(pd->squeue); |
| 457 | err_free_pqueue: | ||
| 458 | free_percpu(pd->pqueue); | ||
| 451 | err_free_pd: | 459 | err_free_pd: |
| 452 | kfree(pd); | 460 | kfree(pd); |
| 453 | err: | 461 | err: |
| @@ -456,8 +464,10 @@ err: | |||
| 456 | 464 | ||
| 457 | static void padata_free_pd(struct parallel_data *pd) | 465 | static void padata_free_pd(struct parallel_data *pd) |
| 458 | { | 466 | { |
| 459 | free_cpumask_var(pd->cpumask); | 467 | free_cpumask_var(pd->cpumask.pcpu); |
| 460 | free_percpu(pd->queue); | 468 | free_cpumask_var(pd->cpumask.cbcpu); |
| 469 | free_percpu(pd->pqueue); | ||
| 470 | free_percpu(pd->squeue); | ||
| 461 | kfree(pd); | 471 | kfree(pd); |
| 462 | } | 472 | } |
| 463 | 473 | ||
| @@ -465,11 +475,12 @@ static void padata_free_pd(struct parallel_data *pd) | |||
| 465 | static void padata_flush_queues(struct parallel_data *pd) | 475 | static void padata_flush_queues(struct parallel_data *pd) |
| 466 | { | 476 | { |
| 467 | int cpu; | 477 | int cpu; |
| 468 | struct padata_queue *queue; | 478 | struct padata_parallel_queue *pqueue; |
| 479 | struct padata_serial_queue *squeue; | ||
| 469 | 480 | ||
| 470 | for_each_cpu(cpu, pd->cpumask) { | 481 | for_each_cpu(cpu, pd->cpumask.pcpu) { |
| 471 | queue = per_cpu_ptr(pd->queue, cpu); | 482 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
| 472 | flush_work(&queue->pwork); | 483 | flush_work(&pqueue->work); |
| 473 | } | 484 | } |
| 474 | 485 | ||
| 475 | del_timer_sync(&pd->timer); | 486 | del_timer_sync(&pd->timer); |
| @@ -477,19 +488,39 @@ static void padata_flush_queues(struct parallel_data *pd) | |||
| 477 | if (atomic_read(&pd->reorder_objects)) | 488 | if (atomic_read(&pd->reorder_objects)) |
| 478 | padata_reorder(pd); | 489 | padata_reorder(pd); |
| 479 | 490 | ||
| 480 | for_each_cpu(cpu, pd->cpumask) { | 491 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
| 481 | queue = per_cpu_ptr(pd->queue, cpu); | 492 | squeue = per_cpu_ptr(pd->squeue, cpu); |
| 482 | flush_work(&queue->swork); | 493 | flush_work(&squeue->work); |
| 483 | } | 494 | } |
| 484 | 495 | ||
| 485 | BUG_ON(atomic_read(&pd->refcnt) != 0); | 496 | BUG_ON(atomic_read(&pd->refcnt) != 0); |
| 486 | } | 497 | } |
| 487 | 498 | ||
| 499 | static void __padata_start(struct padata_instance *pinst) | ||
| 500 | { | ||
| 501 | pinst->flags |= PADATA_INIT; | ||
| 502 | } | ||
| 503 | |||
| 504 | static void __padata_stop(struct padata_instance *pinst) | ||
| 505 | { | ||
| 506 | if (!(pinst->flags & PADATA_INIT)) | ||
| 507 | return; | ||
| 508 | |||
| 509 | pinst->flags &= ~PADATA_INIT; | ||
| 510 | |||
| 511 | synchronize_rcu(); | ||
| 512 | |||
| 513 | get_online_cpus(); | ||
| 514 | padata_flush_queues(pinst->pd); | ||
| 515 | put_online_cpus(); | ||
| 516 | } | ||
| 517 | |||
| 488 | /* Replace the internal control stucture with a new one. */ | 518 | /* Replace the internal control stucture with a new one. */ |
| 489 | static void padata_replace(struct padata_instance *pinst, | 519 | static void padata_replace(struct padata_instance *pinst, |
| 490 | struct parallel_data *pd_new) | 520 | struct parallel_data *pd_new) |
| 491 | { | 521 | { |
| 492 | struct parallel_data *pd_old = pinst->pd; | 522 | struct parallel_data *pd_old = pinst->pd; |
| 523 | int notification_mask = 0; | ||
| 493 | 524 | ||
| 494 | pinst->flags |= PADATA_RESET; | 525 | pinst->flags |= PADATA_RESET; |
| 495 | 526 | ||
| @@ -497,41 +528,162 @@ static void padata_replace(struct padata_instance *pinst, | |||
| 497 | 528 | ||
| 498 | synchronize_rcu(); | 529 | synchronize_rcu(); |
| 499 | 530 | ||
| 531 | if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) | ||
| 532 | notification_mask |= PADATA_CPU_PARALLEL; | ||
| 533 | if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) | ||
| 534 | notification_mask |= PADATA_CPU_SERIAL; | ||
| 535 | |||
| 500 | padata_flush_queues(pd_old); | 536 | padata_flush_queues(pd_old); |
| 501 | padata_free_pd(pd_old); | 537 | padata_free_pd(pd_old); |
| 502 | 538 | ||
| 539 | if (notification_mask) | ||
| 540 | blocking_notifier_call_chain(&pinst->cpumask_change_notifier, | ||
| 541 | notification_mask, | ||
| 542 | &pd_new->cpumask); | ||
| 543 | |||
| 503 | pinst->flags &= ~PADATA_RESET; | 544 | pinst->flags &= ~PADATA_RESET; |
| 504 | } | 545 | } |
| 505 | 546 | ||
| 506 | /** | 547 | /** |
| 507 | * padata_set_cpumask - set the cpumask that padata should use | 548 | * padata_register_cpumask_notifier - Registers a notifier that will be called |
| 549 | * if either pcpu or cbcpu or both cpumasks change. | ||
| 508 | * | 550 | * |
| 509 | * @pinst: padata instance | 551 | * @pinst: A poineter to padata instance |
| 510 | * @cpumask: the cpumask to use | 552 | * @nblock: A pointer to notifier block. |
| 511 | */ | 553 | */ |
| 512 | int padata_set_cpumask(struct padata_instance *pinst, | 554 | int padata_register_cpumask_notifier(struct padata_instance *pinst, |
| 513 | cpumask_var_t cpumask) | 555 | struct notifier_block *nblock) |
| 514 | { | 556 | { |
| 557 | return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, | ||
| 558 | nblock); | ||
| 559 | } | ||
| 560 | EXPORT_SYMBOL(padata_register_cpumask_notifier); | ||
| 561 | |||
| 562 | /** | ||
| 563 | * padata_unregister_cpumask_notifier - Unregisters cpumask notifier | ||
| 564 | * registered earlier using padata_register_cpumask_notifier | ||
| 565 | * | ||
| 566 | * @pinst: A pointer to data instance. | ||
| 567 | * @nlock: A pointer to notifier block. | ||
| 568 | */ | ||
| 569 | int padata_unregister_cpumask_notifier(struct padata_instance *pinst, | ||
| 570 | struct notifier_block *nblock) | ||
| 571 | { | ||
| 572 | return blocking_notifier_chain_unregister( | ||
| 573 | &pinst->cpumask_change_notifier, | ||
| 574 | nblock); | ||
| 575 | } | ||
| 576 | EXPORT_SYMBOL(padata_unregister_cpumask_notifier); | ||
| 577 | |||
| 578 | |||
| 579 | /* If cpumask contains no active cpu, we mark the instance as invalid. */ | ||
| 580 | static bool padata_validate_cpumask(struct padata_instance *pinst, | ||
| 581 | const struct cpumask *cpumask) | ||
| 582 | { | ||
| 583 | if (!cpumask_intersects(cpumask, cpu_active_mask)) { | ||
| 584 | pinst->flags |= PADATA_INVALID; | ||
| 585 | return false; | ||
| 586 | } | ||
| 587 | |||
| 588 | pinst->flags &= ~PADATA_INVALID; | ||
| 589 | return true; | ||
| 590 | } | ||
| 591 | |||
| 592 | static int __padata_set_cpumasks(struct padata_instance *pinst, | ||
| 593 | cpumask_var_t pcpumask, | ||
| 594 | cpumask_var_t cbcpumask) | ||
| 595 | { | ||
| 596 | int valid; | ||
| 515 | struct parallel_data *pd; | 597 | struct parallel_data *pd; |
| 516 | int err = 0; | 598 | |
| 599 | valid = padata_validate_cpumask(pinst, pcpumask); | ||
| 600 | if (!valid) { | ||
| 601 | __padata_stop(pinst); | ||
| 602 | goto out_replace; | ||
| 603 | } | ||
| 604 | |||
| 605 | valid = padata_validate_cpumask(pinst, cbcpumask); | ||
| 606 | if (!valid) | ||
| 607 | __padata_stop(pinst); | ||
| 608 | |||
| 609 | out_replace: | ||
| 610 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); | ||
| 611 | if (!pd) | ||
| 612 | return -ENOMEM; | ||
| 613 | |||
| 614 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); | ||
| 615 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | ||
| 616 | |||
| 617 | padata_replace(pinst, pd); | ||
| 618 | |||
| 619 | if (valid) | ||
| 620 | __padata_start(pinst); | ||
| 621 | |||
| 622 | return 0; | ||
| 623 | } | ||
| 624 | |||
| 625 | /** | ||
| 626 | * padata_set_cpumasks - Set both parallel and serial cpumasks. The first | ||
| 627 | * one is used by parallel workers and the second one | ||
| 628 | * by the wokers doing serialization. | ||
| 629 | * | ||
| 630 | * @pinst: padata instance | ||
| 631 | * @pcpumask: the cpumask to use for parallel workers | ||
| 632 | * @cbcpumask: the cpumsak to use for serial workers | ||
| 633 | */ | ||
| 634 | int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, | ||
| 635 | cpumask_var_t cbcpumask) | ||
| 636 | { | ||
| 637 | int err; | ||
| 517 | 638 | ||
| 518 | mutex_lock(&pinst->lock); | 639 | mutex_lock(&pinst->lock); |
| 640 | get_online_cpus(); | ||
| 519 | 641 | ||
| 642 | err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask); | ||
| 643 | |||
| 644 | put_online_cpus(); | ||
| 645 | mutex_unlock(&pinst->lock); | ||
| 646 | |||
| 647 | return err; | ||
| 648 | |||
| 649 | } | ||
| 650 | EXPORT_SYMBOL(padata_set_cpumasks); | ||
| 651 | |||
| 652 | /** | ||
| 653 | * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value | ||
| 654 | * equivalent to @cpumask. | ||
| 655 | * | ||
| 656 | * @pinst: padata instance | ||
| 657 | * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding | ||
| 658 | * to parallel and serial cpumasks respectively. | ||
| 659 | * @cpumask: the cpumask to use | ||
| 660 | */ | ||
| 661 | int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, | ||
| 662 | cpumask_var_t cpumask) | ||
| 663 | { | ||
| 664 | struct cpumask *serial_mask, *parallel_mask; | ||
| 665 | int err = -EINVAL; | ||
| 666 | |||
| 667 | mutex_lock(&pinst->lock); | ||
| 520 | get_online_cpus(); | 668 | get_online_cpus(); |
| 521 | 669 | ||
| 522 | pd = padata_alloc_pd(pinst, cpumask); | 670 | switch (cpumask_type) { |
| 523 | if (!pd) { | 671 | case PADATA_CPU_PARALLEL: |
| 524 | err = -ENOMEM; | 672 | serial_mask = pinst->cpumask.cbcpu; |
| 525 | goto out; | 673 | parallel_mask = cpumask; |
| 674 | break; | ||
| 675 | case PADATA_CPU_SERIAL: | ||
| 676 | parallel_mask = pinst->cpumask.pcpu; | ||
| 677 | serial_mask = cpumask; | ||
| 678 | break; | ||
| 679 | default: | ||
| 680 | goto out; | ||
| 526 | } | 681 | } |
| 527 | 682 | ||
| 528 | cpumask_copy(pinst->cpumask, cpumask); | 683 | err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); |
| 529 | |||
| 530 | padata_replace(pinst, pd); | ||
| 531 | 684 | ||
| 532 | out: | 685 | out: |
| 533 | put_online_cpus(); | 686 | put_online_cpus(); |
| 534 | |||
| 535 | mutex_unlock(&pinst->lock); | 687 | mutex_unlock(&pinst->lock); |
| 536 | 688 | ||
| 537 | return err; | 689 | return err; |
| @@ -543,30 +695,48 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu) | |||
| 543 | struct parallel_data *pd; | 695 | struct parallel_data *pd; |
| 544 | 696 | ||
| 545 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { | 697 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { |
| 546 | pd = padata_alloc_pd(pinst, pinst->cpumask); | 698 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
| 699 | pinst->cpumask.cbcpu); | ||
| 547 | if (!pd) | 700 | if (!pd) |
| 548 | return -ENOMEM; | 701 | return -ENOMEM; |
| 549 | 702 | ||
| 550 | padata_replace(pinst, pd); | 703 | padata_replace(pinst, pd); |
| 704 | |||
| 705 | if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && | ||
| 706 | padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | ||
| 707 | __padata_start(pinst); | ||
| 551 | } | 708 | } |
| 552 | 709 | ||
| 553 | return 0; | 710 | return 0; |
| 554 | } | 711 | } |
| 555 | 712 | ||
| 556 | /** | 713 | /** |
| 557 | * padata_add_cpu - add a cpu to the padata cpumask | 714 | * padata_add_cpu - add a cpu to one or both(parallel and serial) |
| 715 | * padata cpumasks. | ||
| 558 | * | 716 | * |
| 559 | * @pinst: padata instance | 717 | * @pinst: padata instance |
| 560 | * @cpu: cpu to add | 718 | * @cpu: cpu to add |
| 719 | * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. | ||
| 720 | * The @mask may be any combination of the following flags: | ||
| 721 | * PADATA_CPU_SERIAL - serial cpumask | ||
| 722 | * PADATA_CPU_PARALLEL - parallel cpumask | ||
| 561 | */ | 723 | */ |
| 562 | int padata_add_cpu(struct padata_instance *pinst, int cpu) | 724 | |
| 725 | int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) | ||
| 563 | { | 726 | { |
| 564 | int err; | 727 | int err; |
| 565 | 728 | ||
| 729 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) | ||
| 730 | return -EINVAL; | ||
| 731 | |||
| 566 | mutex_lock(&pinst->lock); | 732 | mutex_lock(&pinst->lock); |
| 567 | 733 | ||
| 568 | get_online_cpus(); | 734 | get_online_cpus(); |
| 569 | cpumask_set_cpu(cpu, pinst->cpumask); | 735 | if (mask & PADATA_CPU_SERIAL) |
| 736 | cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); | ||
| 737 | if (mask & PADATA_CPU_PARALLEL) | ||
| 738 | cpumask_set_cpu(cpu, pinst->cpumask.pcpu); | ||
| 739 | |||
| 570 | err = __padata_add_cpu(pinst, cpu); | 740 | err = __padata_add_cpu(pinst, cpu); |
| 571 | put_online_cpus(); | 741 | put_online_cpus(); |
| 572 | 742 | ||
| @@ -578,10 +748,16 @@ EXPORT_SYMBOL(padata_add_cpu); | |||
| 578 | 748 | ||
| 579 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | 749 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) |
| 580 | { | 750 | { |
| 581 | struct parallel_data *pd; | 751 | struct parallel_data *pd = NULL; |
| 582 | 752 | ||
| 583 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { | 753 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
| 584 | pd = padata_alloc_pd(pinst, pinst->cpumask); | 754 | |
| 755 | if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || | ||
| 756 | !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | ||
| 757 | __padata_stop(pinst); | ||
| 758 | |||
| 759 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, | ||
| 760 | pinst->cpumask.cbcpu); | ||
| 585 | if (!pd) | 761 | if (!pd) |
| 586 | return -ENOMEM; | 762 | return -ENOMEM; |
| 587 | 763 | ||
| @@ -591,20 +767,32 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | |||
| 591 | return 0; | 767 | return 0; |
| 592 | } | 768 | } |
| 593 | 769 | ||
| 594 | /** | 770 | /** |
| 595 | * padata_remove_cpu - remove a cpu from the padata cpumask | 771 | * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) |
| 772 | * padata cpumasks. | ||
| 596 | * | 773 | * |
| 597 | * @pinst: padata instance | 774 | * @pinst: padata instance |
| 598 | * @cpu: cpu to remove | 775 | * @cpu: cpu to remove |
| 776 | * @mask: bitmask specifying from which cpumask @cpu should be removed | ||
| 777 | * The @mask may be any combination of the following flags: | ||
| 778 | * PADATA_CPU_SERIAL - serial cpumask | ||
| 779 | * PADATA_CPU_PARALLEL - parallel cpumask | ||
| 599 | */ | 780 | */ |
| 600 | int padata_remove_cpu(struct padata_instance *pinst, int cpu) | 781 | int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) |
| 601 | { | 782 | { |
| 602 | int err; | 783 | int err; |
| 603 | 784 | ||
| 785 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) | ||
| 786 | return -EINVAL; | ||
| 787 | |||
| 604 | mutex_lock(&pinst->lock); | 788 | mutex_lock(&pinst->lock); |
| 605 | 789 | ||
| 606 | get_online_cpus(); | 790 | get_online_cpus(); |
| 607 | cpumask_clear_cpu(cpu, pinst->cpumask); | 791 | if (mask & PADATA_CPU_SERIAL) |
| 792 | cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); | ||
| 793 | if (mask & PADATA_CPU_PARALLEL) | ||
| 794 | cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); | ||
| 795 | |||
| 608 | err = __padata_remove_cpu(pinst, cpu); | 796 | err = __padata_remove_cpu(pinst, cpu); |
| 609 | put_online_cpus(); | 797 | put_online_cpus(); |
| 610 | 798 | ||
| @@ -619,11 +807,20 @@ EXPORT_SYMBOL(padata_remove_cpu); | |||
| 619 | * | 807 | * |
| 620 | * @pinst: padata instance to start | 808 | * @pinst: padata instance to start |
| 621 | */ | 809 | */ |
| 622 | void padata_start(struct padata_instance *pinst) | 810 | int padata_start(struct padata_instance *pinst) |
| 623 | { | 811 | { |
| 812 | int err = 0; | ||
| 813 | |||
| 624 | mutex_lock(&pinst->lock); | 814 | mutex_lock(&pinst->lock); |
| 625 | pinst->flags |= PADATA_INIT; | 815 | |
| 816 | if (pinst->flags & PADATA_INVALID) | ||
| 817 | err =-EINVAL; | ||
| 818 | |||
| 819 | __padata_start(pinst); | ||
| 820 | |||
| 626 | mutex_unlock(&pinst->lock); | 821 | mutex_unlock(&pinst->lock); |
| 822 | |||
| 823 | return err; | ||
| 627 | } | 824 | } |
| 628 | EXPORT_SYMBOL(padata_start); | 825 | EXPORT_SYMBOL(padata_start); |
| 629 | 826 | ||
| @@ -635,12 +832,20 @@ EXPORT_SYMBOL(padata_start); | |||
| 635 | void padata_stop(struct padata_instance *pinst) | 832 | void padata_stop(struct padata_instance *pinst) |
| 636 | { | 833 | { |
| 637 | mutex_lock(&pinst->lock); | 834 | mutex_lock(&pinst->lock); |
| 638 | pinst->flags &= ~PADATA_INIT; | 835 | __padata_stop(pinst); |
| 639 | mutex_unlock(&pinst->lock); | 836 | mutex_unlock(&pinst->lock); |
| 640 | } | 837 | } |
| 641 | EXPORT_SYMBOL(padata_stop); | 838 | EXPORT_SYMBOL(padata_stop); |
| 642 | 839 | ||
| 643 | #ifdef CONFIG_HOTPLUG_CPU | 840 | #ifdef CONFIG_HOTPLUG_CPU |
| 841 | |||
| 842 | static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) | ||
| 843 | { | ||
| 844 | return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || | ||
| 845 | cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); | ||
| 846 | } | ||
| 847 | |||
| 848 | |||
| 644 | static int padata_cpu_callback(struct notifier_block *nfb, | 849 | static int padata_cpu_callback(struct notifier_block *nfb, |
| 645 | unsigned long action, void *hcpu) | 850 | unsigned long action, void *hcpu) |
| 646 | { | 851 | { |
| @@ -653,7 +858,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
| 653 | switch (action) { | 858 | switch (action) { |
| 654 | case CPU_ONLINE: | 859 | case CPU_ONLINE: |
| 655 | case CPU_ONLINE_FROZEN: | 860 | case CPU_ONLINE_FROZEN: |
| 656 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 861 | if (!pinst_has_cpu(pinst, cpu)) |
| 657 | break; | 862 | break; |
| 658 | mutex_lock(&pinst->lock); | 863 | mutex_lock(&pinst->lock); |
| 659 | err = __padata_add_cpu(pinst, cpu); | 864 | err = __padata_add_cpu(pinst, cpu); |
| @@ -664,7 +869,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
| 664 | 869 | ||
| 665 | case CPU_DOWN_PREPARE: | 870 | case CPU_DOWN_PREPARE: |
| 666 | case CPU_DOWN_PREPARE_FROZEN: | 871 | case CPU_DOWN_PREPARE_FROZEN: |
| 667 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 872 | if (!pinst_has_cpu(pinst, cpu)) |
| 668 | break; | 873 | break; |
| 669 | mutex_lock(&pinst->lock); | 874 | mutex_lock(&pinst->lock); |
| 670 | err = __padata_remove_cpu(pinst, cpu); | 875 | err = __padata_remove_cpu(pinst, cpu); |
| @@ -675,7 +880,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
| 675 | 880 | ||
| 676 | case CPU_UP_CANCELED: | 881 | case CPU_UP_CANCELED: |
| 677 | case CPU_UP_CANCELED_FROZEN: | 882 | case CPU_UP_CANCELED_FROZEN: |
| 678 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 883 | if (!pinst_has_cpu(pinst, cpu)) |
| 679 | break; | 884 | break; |
| 680 | mutex_lock(&pinst->lock); | 885 | mutex_lock(&pinst->lock); |
| 681 | __padata_remove_cpu(pinst, cpu); | 886 | __padata_remove_cpu(pinst, cpu); |
| @@ -683,7 +888,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
| 683 | 888 | ||
| 684 | case CPU_DOWN_FAILED: | 889 | case CPU_DOWN_FAILED: |
| 685 | case CPU_DOWN_FAILED_FROZEN: | 890 | case CPU_DOWN_FAILED_FROZEN: |
| 686 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 891 | if (!pinst_has_cpu(pinst, cpu)) |
| 687 | break; | 892 | break; |
| 688 | mutex_lock(&pinst->lock); | 893 | mutex_lock(&pinst->lock); |
| 689 | __padata_add_cpu(pinst, cpu); | 894 | __padata_add_cpu(pinst, cpu); |
| @@ -694,36 +899,202 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
| 694 | } | 899 | } |
| 695 | #endif | 900 | #endif |
| 696 | 901 | ||
| 902 | static void __padata_free(struct padata_instance *pinst) | ||
| 903 | { | ||
| 904 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 905 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | ||
| 906 | #endif | ||
| 907 | |||
| 908 | padata_stop(pinst); | ||
| 909 | padata_free_pd(pinst->pd); | ||
| 910 | free_cpumask_var(pinst->cpumask.pcpu); | ||
| 911 | free_cpumask_var(pinst->cpumask.cbcpu); | ||
| 912 | kfree(pinst); | ||
| 913 | } | ||
| 914 | |||
| 915 | #define kobj2pinst(_kobj) \ | ||
| 916 | container_of(_kobj, struct padata_instance, kobj) | ||
| 917 | #define attr2pentry(_attr) \ | ||
| 918 | container_of(_attr, struct padata_sysfs_entry, attr) | ||
| 919 | |||
| 920 | static void padata_sysfs_release(struct kobject *kobj) | ||
| 921 | { | ||
| 922 | struct padata_instance *pinst = kobj2pinst(kobj); | ||
| 923 | __padata_free(pinst); | ||
| 924 | } | ||
| 925 | |||
| 926 | struct padata_sysfs_entry { | ||
| 927 | struct attribute attr; | ||
| 928 | ssize_t (*show)(struct padata_instance *, struct attribute *, char *); | ||
| 929 | ssize_t (*store)(struct padata_instance *, struct attribute *, | ||
| 930 | const char *, size_t); | ||
| 931 | }; | ||
| 932 | |||
| 933 | static ssize_t show_cpumask(struct padata_instance *pinst, | ||
| 934 | struct attribute *attr, char *buf) | ||
| 935 | { | ||
| 936 | struct cpumask *cpumask; | ||
| 937 | ssize_t len; | ||
| 938 | |||
| 939 | mutex_lock(&pinst->lock); | ||
| 940 | if (!strcmp(attr->name, "serial_cpumask")) | ||
| 941 | cpumask = pinst->cpumask.cbcpu; | ||
| 942 | else | ||
| 943 | cpumask = pinst->cpumask.pcpu; | ||
| 944 | |||
| 945 | len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask), | ||
| 946 | nr_cpu_ids); | ||
| 947 | if (PAGE_SIZE - len < 2) | ||
| 948 | len = -EINVAL; | ||
| 949 | else | ||
| 950 | len += sprintf(buf + len, "\n"); | ||
| 951 | |||
| 952 | mutex_unlock(&pinst->lock); | ||
| 953 | return len; | ||
| 954 | } | ||
| 955 | |||
| 956 | static ssize_t store_cpumask(struct padata_instance *pinst, | ||
| 957 | struct attribute *attr, | ||
| 958 | const char *buf, size_t count) | ||
| 959 | { | ||
| 960 | cpumask_var_t new_cpumask; | ||
| 961 | ssize_t ret; | ||
| 962 | int mask_type; | ||
| 963 | |||
| 964 | if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) | ||
| 965 | return -ENOMEM; | ||
| 966 | |||
| 967 | ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), | ||
| 968 | nr_cpumask_bits); | ||
| 969 | if (ret < 0) | ||
| 970 | goto out; | ||
| 971 | |||
| 972 | mask_type = !strcmp(attr->name, "serial_cpumask") ? | ||
| 973 | PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; | ||
| 974 | ret = padata_set_cpumask(pinst, mask_type, new_cpumask); | ||
| 975 | if (!ret) | ||
| 976 | ret = count; | ||
| 977 | |||
| 978 | out: | ||
| 979 | free_cpumask_var(new_cpumask); | ||
| 980 | return ret; | ||
| 981 | } | ||
| 982 | |||
| 983 | #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ | ||
| 984 | static struct padata_sysfs_entry _name##_attr = \ | ||
| 985 | __ATTR(_name, 0644, _show_name, _store_name) | ||
| 986 | #define PADATA_ATTR_RO(_name, _show_name) \ | ||
| 987 | static struct padata_sysfs_entry _name##_attr = \ | ||
| 988 | __ATTR(_name, 0400, _show_name, NULL) | ||
| 989 | |||
| 990 | PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); | ||
| 991 | PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); | ||
| 992 | |||
| 993 | /* | ||
| 994 | * Padata sysfs provides the following objects: | ||
| 995 | * serial_cpumask [RW] - cpumask for serial workers | ||
| 996 | * parallel_cpumask [RW] - cpumask for parallel workers | ||
| 997 | */ | ||
| 998 | static struct attribute *padata_default_attrs[] = { | ||
| 999 | &serial_cpumask_attr.attr, | ||
| 1000 | ¶llel_cpumask_attr.attr, | ||
| 1001 | NULL, | ||
| 1002 | }; | ||
| 1003 | |||
| 1004 | static ssize_t padata_sysfs_show(struct kobject *kobj, | ||
| 1005 | struct attribute *attr, char *buf) | ||
| 1006 | { | ||
| 1007 | struct padata_instance *pinst; | ||
| 1008 | struct padata_sysfs_entry *pentry; | ||
| 1009 | ssize_t ret = -EIO; | ||
| 1010 | |||
| 1011 | pinst = kobj2pinst(kobj); | ||
| 1012 | pentry = attr2pentry(attr); | ||
| 1013 | if (pentry->show) | ||
| 1014 | ret = pentry->show(pinst, attr, buf); | ||
| 1015 | |||
| 1016 | return ret; | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, | ||
| 1020 | const char *buf, size_t count) | ||
| 1021 | { | ||
| 1022 | struct padata_instance *pinst; | ||
| 1023 | struct padata_sysfs_entry *pentry; | ||
| 1024 | ssize_t ret = -EIO; | ||
| 1025 | |||
| 1026 | pinst = kobj2pinst(kobj); | ||
| 1027 | pentry = attr2pentry(attr); | ||
| 1028 | if (pentry->show) | ||
| 1029 | ret = pentry->store(pinst, attr, buf, count); | ||
| 1030 | |||
| 1031 | return ret; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | static const struct sysfs_ops padata_sysfs_ops = { | ||
| 1035 | .show = padata_sysfs_show, | ||
| 1036 | .store = padata_sysfs_store, | ||
| 1037 | }; | ||
| 1038 | |||
| 1039 | static struct kobj_type padata_attr_type = { | ||
| 1040 | .sysfs_ops = &padata_sysfs_ops, | ||
| 1041 | .default_attrs = padata_default_attrs, | ||
| 1042 | .release = padata_sysfs_release, | ||
| 1043 | }; | ||
| 1044 | |||
| 697 | /** | 1045 | /** |
| 698 | * padata_alloc - allocate and initialize a padata instance | 1046 | * padata_alloc_possible - Allocate and initialize padata instance. |
| 1047 | * Use the cpu_possible_mask for serial and | ||
| 1048 | * parallel workers. | ||
| 699 | * | 1049 | * |
| 700 | * @cpumask: cpumask that padata uses for parallelization | ||
| 701 | * @wq: workqueue to use for the allocated padata instance | 1050 | * @wq: workqueue to use for the allocated padata instance |
| 702 | */ | 1051 | */ |
| 703 | struct padata_instance *padata_alloc(const struct cpumask *cpumask, | 1052 | struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) |
| 704 | struct workqueue_struct *wq) | 1053 | { |
| 1054 | return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); | ||
| 1055 | } | ||
| 1056 | EXPORT_SYMBOL(padata_alloc_possible); | ||
| 1057 | |||
| 1058 | /** | ||
| 1059 | * padata_alloc - allocate and initialize a padata instance and specify | ||
| 1060 | * cpumasks for serial and parallel workers. | ||
| 1061 | * | ||
| 1062 | * @wq: workqueue to use for the allocated padata instance | ||
| 1063 | * @pcpumask: cpumask that will be used for padata parallelization | ||
| 1064 | * @cbcpumask: cpumask that will be used for padata serialization | ||
| 1065 | */ | ||
| 1066 | struct padata_instance *padata_alloc(struct workqueue_struct *wq, | ||
| 1067 | const struct cpumask *pcpumask, | ||
| 1068 | const struct cpumask *cbcpumask) | ||
| 705 | { | 1069 | { |
| 706 | struct padata_instance *pinst; | 1070 | struct padata_instance *pinst; |
| 707 | struct parallel_data *pd; | 1071 | struct parallel_data *pd = NULL; |
| 708 | 1072 | ||
| 709 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); | 1073 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); |
| 710 | if (!pinst) | 1074 | if (!pinst) |
| 711 | goto err; | 1075 | goto err; |
| 712 | 1076 | ||
| 713 | get_online_cpus(); | 1077 | get_online_cpus(); |
| 714 | 1078 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) | |
| 715 | pd = padata_alloc_pd(pinst, cpumask); | ||
| 716 | if (!pd) | ||
| 717 | goto err_free_inst; | 1079 | goto err_free_inst; |
| 1080 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { | ||
| 1081 | free_cpumask_var(pinst->cpumask.pcpu); | ||
| 1082 | goto err_free_inst; | ||
| 1083 | } | ||
| 1084 | if (!padata_validate_cpumask(pinst, pcpumask) || | ||
| 1085 | !padata_validate_cpumask(pinst, cbcpumask)) | ||
| 1086 | goto err_free_masks; | ||
| 718 | 1087 | ||
| 719 | if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) | 1088 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); |
| 720 | goto err_free_pd; | 1089 | if (!pd) |
| 1090 | goto err_free_masks; | ||
| 721 | 1091 | ||
| 722 | rcu_assign_pointer(pinst->pd, pd); | 1092 | rcu_assign_pointer(pinst->pd, pd); |
| 723 | 1093 | ||
| 724 | pinst->wq = wq; | 1094 | pinst->wq = wq; |
| 725 | 1095 | ||
| 726 | cpumask_copy(pinst->cpumask, cpumask); | 1096 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); |
| 1097 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | ||
| 727 | 1098 | ||
| 728 | pinst->flags = 0; | 1099 | pinst->flags = 0; |
| 729 | 1100 | ||
| @@ -735,12 +1106,15 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, | |||
| 735 | 1106 | ||
| 736 | put_online_cpus(); | 1107 | put_online_cpus(); |
| 737 | 1108 | ||
| 1109 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); | ||
| 1110 | kobject_init(&pinst->kobj, &padata_attr_type); | ||
| 738 | mutex_init(&pinst->lock); | 1111 | mutex_init(&pinst->lock); |
| 739 | 1112 | ||
| 740 | return pinst; | 1113 | return pinst; |
| 741 | 1114 | ||
| 742 | err_free_pd: | 1115 | err_free_masks: |
| 743 | padata_free_pd(pd); | 1116 | free_cpumask_var(pinst->cpumask.pcpu); |
| 1117 | free_cpumask_var(pinst->cpumask.cbcpu); | ||
| 744 | err_free_inst: | 1118 | err_free_inst: |
| 745 | kfree(pinst); | 1119 | kfree(pinst); |
| 746 | put_online_cpus(); | 1120 | put_online_cpus(); |
| @@ -756,19 +1130,6 @@ EXPORT_SYMBOL(padata_alloc); | |||
| 756 | */ | 1130 | */ |
| 757 | void padata_free(struct padata_instance *pinst) | 1131 | void padata_free(struct padata_instance *pinst) |
| 758 | { | 1132 | { |
| 759 | padata_stop(pinst); | 1133 | kobject_put(&pinst->kobj); |
| 760 | |||
| 761 | synchronize_rcu(); | ||
| 762 | |||
| 763 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 764 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | ||
| 765 | #endif | ||
| 766 | get_online_cpus(); | ||
| 767 | padata_flush_queues(pinst->pd); | ||
| 768 | put_online_cpus(); | ||
| 769 | |||
| 770 | padata_free_pd(pinst->pd); | ||
| 771 | free_cpumask_var(pinst->cpumask); | ||
| 772 | kfree(pinst); | ||
| 773 | } | 1134 | } |
| 774 | EXPORT_SYMBOL(padata_free); | 1135 | EXPORT_SYMBOL(padata_free); |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index f42d3f737a33..996a4dec5f96 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
| @@ -48,59 +48,49 @@ | |||
| 48 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock | 48 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock |
| 49 | * held, taken with _irqsave. One lock to rule them all | 49 | * held, taken with _irqsave. One lock to rule them all |
| 50 | */ | 50 | */ |
| 51 | struct pm_qos_request_list { | 51 | enum pm_qos_type { |
| 52 | struct list_head list; | 52 | PM_QOS_MAX, /* return the largest value */ |
| 53 | union { | 53 | PM_QOS_MIN /* return the smallest value */ |
| 54 | s32 value; | ||
| 55 | s32 usec; | ||
| 56 | s32 kbps; | ||
| 57 | }; | ||
| 58 | int pm_qos_class; | ||
| 59 | }; | 54 | }; |
| 60 | 55 | ||
| 61 | static s32 max_compare(s32 v1, s32 v2); | ||
| 62 | static s32 min_compare(s32 v1, s32 v2); | ||
| 63 | |||
| 64 | struct pm_qos_object { | 56 | struct pm_qos_object { |
| 65 | struct pm_qos_request_list requests; | 57 | struct plist_head requests; |
| 66 | struct blocking_notifier_head *notifiers; | 58 | struct blocking_notifier_head *notifiers; |
| 67 | struct miscdevice pm_qos_power_miscdev; | 59 | struct miscdevice pm_qos_power_miscdev; |
| 68 | char *name; | 60 | char *name; |
| 69 | s32 default_value; | 61 | s32 default_value; |
| 70 | atomic_t target_value; | 62 | enum pm_qos_type type; |
| 71 | s32 (*comparitor)(s32, s32); | ||
| 72 | }; | 63 | }; |
| 73 | 64 | ||
| 65 | static DEFINE_SPINLOCK(pm_qos_lock); | ||
| 66 | |||
| 74 | static struct pm_qos_object null_pm_qos; | 67 | static struct pm_qos_object null_pm_qos; |
| 75 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); | 68 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); |
| 76 | static struct pm_qos_object cpu_dma_pm_qos = { | 69 | static struct pm_qos_object cpu_dma_pm_qos = { |
| 77 | .requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)}, | 70 | .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), |
| 78 | .notifiers = &cpu_dma_lat_notifier, | 71 | .notifiers = &cpu_dma_lat_notifier, |
| 79 | .name = "cpu_dma_latency", | 72 | .name = "cpu_dma_latency", |
| 80 | .default_value = 2000 * USEC_PER_SEC, | 73 | .default_value = 2000 * USEC_PER_SEC, |
| 81 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), | 74 | .type = PM_QOS_MIN, |
| 82 | .comparitor = min_compare | ||
| 83 | }; | 75 | }; |
| 84 | 76 | ||
| 85 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); | 77 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); |
| 86 | static struct pm_qos_object network_lat_pm_qos = { | 78 | static struct pm_qos_object network_lat_pm_qos = { |
| 87 | .requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)}, | 79 | .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), |
| 88 | .notifiers = &network_lat_notifier, | 80 | .notifiers = &network_lat_notifier, |
| 89 | .name = "network_latency", | 81 | .name = "network_latency", |
| 90 | .default_value = 2000 * USEC_PER_SEC, | 82 | .default_value = 2000 * USEC_PER_SEC, |
| 91 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), | 83 | .type = PM_QOS_MIN |
| 92 | .comparitor = min_compare | ||
| 93 | }; | 84 | }; |
| 94 | 85 | ||
| 95 | 86 | ||
| 96 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); | 87 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); |
| 97 | static struct pm_qos_object network_throughput_pm_qos = { | 88 | static struct pm_qos_object network_throughput_pm_qos = { |
| 98 | .requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)}, | 89 | .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), |
| 99 | .notifiers = &network_throughput_notifier, | 90 | .notifiers = &network_throughput_notifier, |
| 100 | .name = "network_throughput", | 91 | .name = "network_throughput", |
| 101 | .default_value = 0, | 92 | .default_value = 0, |
| 102 | .target_value = ATOMIC_INIT(0), | 93 | .type = PM_QOS_MAX, |
| 103 | .comparitor = max_compare | ||
| 104 | }; | 94 | }; |
| 105 | 95 | ||
| 106 | 96 | ||
| @@ -111,8 +101,6 @@ static struct pm_qos_object *pm_qos_array[] = { | |||
| 111 | &network_throughput_pm_qos | 101 | &network_throughput_pm_qos |
| 112 | }; | 102 | }; |
| 113 | 103 | ||
| 114 | static DEFINE_SPINLOCK(pm_qos_lock); | ||
| 115 | |||
| 116 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | 104 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, |
| 117 | size_t count, loff_t *f_pos); | 105 | size_t count, loff_t *f_pos); |
| 118 | static int pm_qos_power_open(struct inode *inode, struct file *filp); | 106 | static int pm_qos_power_open(struct inode *inode, struct file *filp); |
| @@ -124,46 +112,55 @@ static const struct file_operations pm_qos_power_fops = { | |||
| 124 | .release = pm_qos_power_release, | 112 | .release = pm_qos_power_release, |
| 125 | }; | 113 | }; |
| 126 | 114 | ||
| 127 | /* static helper functions */ | 115 | /* unlocked internal variant */ |
| 128 | static s32 max_compare(s32 v1, s32 v2) | 116 | static inline int pm_qos_get_value(struct pm_qos_object *o) |
| 129 | { | 117 | { |
| 130 | return max(v1, v2); | 118 | if (plist_head_empty(&o->requests)) |
| 131 | } | 119 | return o->default_value; |
| 132 | 120 | ||
| 133 | static s32 min_compare(s32 v1, s32 v2) | 121 | switch (o->type) { |
| 134 | { | 122 | case PM_QOS_MIN: |
| 135 | return min(v1, v2); | 123 | return plist_last(&o->requests)->prio; |
| 136 | } | ||
| 137 | 124 | ||
| 125 | case PM_QOS_MAX: | ||
| 126 | return plist_first(&o->requests)->prio; | ||
| 138 | 127 | ||
| 139 | static void update_target(int pm_qos_class) | 128 | default: |
| 129 | /* runtime check for not using enum */ | ||
| 130 | BUG(); | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | static void update_target(struct pm_qos_object *o, struct plist_node *node, | ||
| 135 | int del, int value) | ||
| 140 | { | 136 | { |
| 141 | s32 extreme_value; | ||
| 142 | struct pm_qos_request_list *node; | ||
| 143 | unsigned long flags; | 137 | unsigned long flags; |
| 144 | int call_notifier = 0; | 138 | int prev_value, curr_value; |
| 145 | 139 | ||
| 146 | spin_lock_irqsave(&pm_qos_lock, flags); | 140 | spin_lock_irqsave(&pm_qos_lock, flags); |
| 147 | extreme_value = pm_qos_array[pm_qos_class]->default_value; | 141 | prev_value = pm_qos_get_value(o); |
| 148 | list_for_each_entry(node, | 142 | /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */ |
| 149 | &pm_qos_array[pm_qos_class]->requests.list, list) { | 143 | if (value != PM_QOS_DEFAULT_VALUE) { |
| 150 | extreme_value = pm_qos_array[pm_qos_class]->comparitor( | 144 | /* |
| 151 | extreme_value, node->value); | 145 | * to change the list, we atomically remove, reinit |
| 152 | } | 146 | * with new value and add, then see if the extremal |
| 153 | if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) != | 147 | * changed |
| 154 | extreme_value) { | 148 | */ |
| 155 | call_notifier = 1; | 149 | plist_del(node, &o->requests); |
| 156 | atomic_set(&pm_qos_array[pm_qos_class]->target_value, | 150 | plist_node_init(node, value); |
| 157 | extreme_value); | 151 | plist_add(node, &o->requests); |
| 158 | pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class, | 152 | } else if (del) { |
| 159 | atomic_read(&pm_qos_array[pm_qos_class]->target_value)); | 153 | plist_del(node, &o->requests); |
| 154 | } else { | ||
| 155 | plist_add(node, &o->requests); | ||
| 160 | } | 156 | } |
| 157 | curr_value = pm_qos_get_value(o); | ||
| 161 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 158 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
| 162 | 159 | ||
| 163 | if (call_notifier) | 160 | if (prev_value != curr_value) |
| 164 | blocking_notifier_call_chain( | 161 | blocking_notifier_call_chain(o->notifiers, |
| 165 | pm_qos_array[pm_qos_class]->notifiers, | 162 | (unsigned long)curr_value, |
| 166 | (unsigned long) extreme_value, NULL); | 163 | NULL); |
| 167 | } | 164 | } |
| 168 | 165 | ||
| 169 | static int register_pm_qos_misc(struct pm_qos_object *qos) | 166 | static int register_pm_qos_misc(struct pm_qos_object *qos) |
| @@ -196,10 +193,23 @@ static int find_pm_qos_object_by_minor(int minor) | |||
| 196 | */ | 193 | */ |
| 197 | int pm_qos_request(int pm_qos_class) | 194 | int pm_qos_request(int pm_qos_class) |
| 198 | { | 195 | { |
| 199 | return atomic_read(&pm_qos_array[pm_qos_class]->target_value); | 196 | unsigned long flags; |
| 197 | int value; | ||
| 198 | |||
| 199 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
| 200 | value = pm_qos_get_value(pm_qos_array[pm_qos_class]); | ||
| 201 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
| 202 | |||
| 203 | return value; | ||
| 200 | } | 204 | } |
| 201 | EXPORT_SYMBOL_GPL(pm_qos_request); | 205 | EXPORT_SYMBOL_GPL(pm_qos_request); |
| 202 | 206 | ||
| 207 | int pm_qos_request_active(struct pm_qos_request_list *req) | ||
| 208 | { | ||
| 209 | return req->pm_qos_class != 0; | ||
| 210 | } | ||
| 211 | EXPORT_SYMBOL_GPL(pm_qos_request_active); | ||
| 212 | |||
| 203 | /** | 213 | /** |
| 204 | * pm_qos_add_request - inserts new qos request into the list | 214 | * pm_qos_add_request - inserts new qos request into the list |
| 205 | * @pm_qos_class: identifies which list of qos request to us | 215 | * @pm_qos_class: identifies which list of qos request to us |
| @@ -211,27 +221,23 @@ EXPORT_SYMBOL_GPL(pm_qos_request); | |||
| 211 | * element as a handle for use in updating and removal. Call needs to save | 221 | * element as a handle for use in updating and removal. Call needs to save |
| 212 | * this handle for later use. | 222 | * this handle for later use. |
| 213 | */ | 223 | */ |
| 214 | struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) | 224 | void pm_qos_add_request(struct pm_qos_request_list *dep, |
| 225 | int pm_qos_class, s32 value) | ||
| 215 | { | 226 | { |
| 216 | struct pm_qos_request_list *dep; | 227 | struct pm_qos_object *o = pm_qos_array[pm_qos_class]; |
| 217 | unsigned long flags; | 228 | int new_value; |
| 218 | 229 | ||
| 219 | dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); | 230 | if (pm_qos_request_active(dep)) { |
| 220 | if (dep) { | 231 | WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); |
| 221 | if (value == PM_QOS_DEFAULT_VALUE) | 232 | return; |
| 222 | dep->value = pm_qos_array[pm_qos_class]->default_value; | ||
| 223 | else | ||
| 224 | dep->value = value; | ||
| 225 | dep->pm_qos_class = pm_qos_class; | ||
| 226 | |||
| 227 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
| 228 | list_add(&dep->list, | ||
| 229 | &pm_qos_array[pm_qos_class]->requests.list); | ||
| 230 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
| 231 | update_target(pm_qos_class); | ||
| 232 | } | 233 | } |
| 233 | 234 | if (value == PM_QOS_DEFAULT_VALUE) | |
| 234 | return dep; | 235 | new_value = o->default_value; |
| 236 | else | ||
| 237 | new_value = value; | ||
| 238 | plist_node_init(&dep->list, new_value); | ||
| 239 | dep->pm_qos_class = pm_qos_class; | ||
| 240 | update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); | ||
| 235 | } | 241 | } |
| 236 | EXPORT_SYMBOL_GPL(pm_qos_add_request); | 242 | EXPORT_SYMBOL_GPL(pm_qos_add_request); |
| 237 | 243 | ||
| @@ -246,27 +252,28 @@ EXPORT_SYMBOL_GPL(pm_qos_add_request); | |||
| 246 | * Attempts are made to make this code callable on hot code paths. | 252 | * Attempts are made to make this code callable on hot code paths. |
| 247 | */ | 253 | */ |
| 248 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, | 254 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, |
| 249 | s32 new_value) | 255 | s32 new_value) |
| 250 | { | 256 | { |
| 251 | unsigned long flags; | ||
| 252 | int pending_update = 0; | ||
| 253 | s32 temp; | 257 | s32 temp; |
| 258 | struct pm_qos_object *o; | ||
| 259 | |||
| 260 | if (!pm_qos_req) /*guard against callers passing in null */ | ||
| 261 | return; | ||
| 254 | 262 | ||
| 255 | if (pm_qos_req) { /*guard against callers passing in null */ | 263 | if (!pm_qos_request_active(pm_qos_req)) { |
| 256 | spin_lock_irqsave(&pm_qos_lock, flags); | 264 | WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); |
| 257 | if (new_value == PM_QOS_DEFAULT_VALUE) | 265 | return; |
| 258 | temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value; | ||
| 259 | else | ||
| 260 | temp = new_value; | ||
| 261 | |||
| 262 | if (temp != pm_qos_req->value) { | ||
| 263 | pending_update = 1; | ||
| 264 | pm_qos_req->value = temp; | ||
| 265 | } | ||
| 266 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
| 267 | if (pending_update) | ||
| 268 | update_target(pm_qos_req->pm_qos_class); | ||
| 269 | } | 266 | } |
| 267 | |||
| 268 | o = pm_qos_array[pm_qos_req->pm_qos_class]; | ||
| 269 | |||
| 270 | if (new_value == PM_QOS_DEFAULT_VALUE) | ||
| 271 | temp = o->default_value; | ||
| 272 | else | ||
| 273 | temp = new_value; | ||
| 274 | |||
| 275 | if (temp != pm_qos_req->list.prio) | ||
| 276 | update_target(o, &pm_qos_req->list, 0, temp); | ||
| 270 | } | 277 | } |
| 271 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | 278 | EXPORT_SYMBOL_GPL(pm_qos_update_request); |
| 272 | 279 | ||
| @@ -280,19 +287,20 @@ EXPORT_SYMBOL_GPL(pm_qos_update_request); | |||
| 280 | */ | 287 | */ |
| 281 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) | 288 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) |
| 282 | { | 289 | { |
| 283 | unsigned long flags; | 290 | struct pm_qos_object *o; |
| 284 | int qos_class; | ||
| 285 | 291 | ||
| 286 | if (pm_qos_req == NULL) | 292 | if (pm_qos_req == NULL) |
| 287 | return; | 293 | return; |
| 288 | /* silent return to keep pcm code cleaner */ | 294 | /* silent return to keep pcm code cleaner */ |
| 289 | 295 | ||
| 290 | qos_class = pm_qos_req->pm_qos_class; | 296 | if (!pm_qos_request_active(pm_qos_req)) { |
| 291 | spin_lock_irqsave(&pm_qos_lock, flags); | 297 | WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); |
| 292 | list_del(&pm_qos_req->list); | 298 | return; |
| 293 | kfree(pm_qos_req); | 299 | } |
| 294 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 300 | |
| 295 | update_target(qos_class); | 301 | o = pm_qos_array[pm_qos_req->pm_qos_class]; |
| 302 | update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); | ||
| 303 | memset(pm_qos_req, 0, sizeof(*pm_qos_req)); | ||
| 296 | } | 304 | } |
| 297 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); | 305 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); |
| 298 | 306 | ||
| @@ -340,8 +348,12 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp) | |||
| 340 | 348 | ||
| 341 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); | 349 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); |
| 342 | if (pm_qos_class >= 0) { | 350 | if (pm_qos_class >= 0) { |
| 343 | filp->private_data = (void *) pm_qos_add_request(pm_qos_class, | 351 | struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); |
| 344 | PM_QOS_DEFAULT_VALUE); | 352 | if (!req) |
| 353 | return -ENOMEM; | ||
| 354 | |||
| 355 | pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); | ||
| 356 | filp->private_data = req; | ||
| 345 | 357 | ||
| 346 | if (filp->private_data) | 358 | if (filp->private_data) |
| 347 | return 0; | 359 | return 0; |
| @@ -353,8 +365,9 @@ static int pm_qos_power_release(struct inode *inode, struct file *filp) | |||
| 353 | { | 365 | { |
| 354 | struct pm_qos_request_list *req; | 366 | struct pm_qos_request_list *req; |
| 355 | 367 | ||
| 356 | req = (struct pm_qos_request_list *)filp->private_data; | 368 | req = filp->private_data; |
| 357 | pm_qos_remove_request(req); | 369 | pm_qos_remove_request(req); |
| 370 | kfree(req); | ||
| 358 | 371 | ||
| 359 | return 0; | 372 | return 0; |
| 360 | } | 373 | } |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aa9e916da4d5..8dc31e02ae12 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2003 Patrick Mochel | 4 | * Copyright (c) 2003 Patrick Mochel |
| 5 | * Copyright (c) 2003 Open Source Development Lab | 5 | * Copyright (c) 2003 Open Source Development Lab |
| 6 | * Copyright (c) 2004 Pavel Machek <pavel@suse.cz> | 6 | * Copyright (c) 2004 Pavel Machek <pavel@ucw.cz> |
| 7 | * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. | 7 | * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. |
| 8 | * | 8 | * |
| 9 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
| @@ -277,7 +277,7 @@ static int create_image(int platform_mode) | |||
| 277 | goto Enable_irqs; | 277 | goto Enable_irqs; |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | if (hibernation_test(TEST_CORE)) | 280 | if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) |
| 281 | goto Power_up; | 281 | goto Power_up; |
| 282 | 282 | ||
| 283 | in_suspend = 1; | 283 | in_suspend = 1; |
| @@ -288,8 +288,10 @@ static int create_image(int platform_mode) | |||
| 288 | error); | 288 | error); |
| 289 | /* Restore control flow magically appears here */ | 289 | /* Restore control flow magically appears here */ |
| 290 | restore_processor_state(); | 290 | restore_processor_state(); |
| 291 | if (!in_suspend) | 291 | if (!in_suspend) { |
| 292 | events_check_enabled = false; | ||
| 292 | platform_leave(platform_mode); | 293 | platform_leave(platform_mode); |
| 294 | } | ||
| 293 | 295 | ||
| 294 | Power_up: | 296 | Power_up: |
| 295 | sysdev_resume(); | 297 | sysdev_resume(); |
| @@ -328,7 +330,7 @@ int hibernation_snapshot(int platform_mode) | |||
| 328 | 330 | ||
| 329 | error = platform_begin(platform_mode); | 331 | error = platform_begin(platform_mode); |
| 330 | if (error) | 332 | if (error) |
| 331 | return error; | 333 | goto Close; |
| 332 | 334 | ||
| 333 | /* Preallocate image memory before shutting down devices. */ | 335 | /* Preallocate image memory before shutting down devices. */ |
| 334 | error = hibernate_preallocate_memory(); | 336 | error = hibernate_preallocate_memory(); |
| @@ -511,18 +513,24 @@ int hibernation_platform_enter(void) | |||
| 511 | 513 | ||
| 512 | local_irq_disable(); | 514 | local_irq_disable(); |
| 513 | sysdev_suspend(PMSG_HIBERNATE); | 515 | sysdev_suspend(PMSG_HIBERNATE); |
| 516 | if (!pm_check_wakeup_events()) { | ||
| 517 | error = -EAGAIN; | ||
| 518 | goto Power_up; | ||
| 519 | } | ||
| 520 | |||
| 514 | hibernation_ops->enter(); | 521 | hibernation_ops->enter(); |
| 515 | /* We should never get here */ | 522 | /* We should never get here */ |
| 516 | while (1); | 523 | while (1); |
| 517 | 524 | ||
| 518 | /* | 525 | Power_up: |
| 519 | * We don't need to reenable the nonboot CPUs or resume consoles, since | 526 | sysdev_resume(); |
| 520 | * the system is going to be halted anyway. | 527 | local_irq_enable(); |
| 521 | */ | 528 | enable_nonboot_cpus(); |
| 529 | |||
| 522 | Platform_finish: | 530 | Platform_finish: |
| 523 | hibernation_ops->finish(); | 531 | hibernation_ops->finish(); |
| 524 | 532 | ||
| 525 | dpm_suspend_noirq(PMSG_RESTORE); | 533 | dpm_resume_noirq(PMSG_RESTORE); |
| 526 | 534 | ||
| 527 | Resume_devices: | 535 | Resume_devices: |
| 528 | entering_platform_hibernation = false; | 536 | entering_platform_hibernation = false; |
diff --git a/kernel/power/main.c b/kernel/power/main.c index b58800b21fc0..62b0bc6e4983 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -204,6 +204,60 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
| 204 | 204 | ||
| 205 | power_attr(state); | 205 | power_attr(state); |
| 206 | 206 | ||
| 207 | #ifdef CONFIG_PM_SLEEP | ||
| 208 | /* | ||
| 209 | * The 'wakeup_count' attribute, along with the functions defined in | ||
| 210 | * drivers/base/power/wakeup.c, provides a means by which wakeup events can be | ||
| 211 | * handled in a non-racy way. | ||
| 212 | * | ||
| 213 | * If a wakeup event occurs when the system is in a sleep state, it simply is | ||
| 214 | * woken up. In turn, if an event that would wake the system up from a sleep | ||
| 215 | * state occurs when it is undergoing a transition to that sleep state, the | ||
| 216 | * transition should be aborted. Moreover, if such an event occurs when the | ||
| 217 | * system is in the working state, an attempt to start a transition to the | ||
| 218 | * given sleep state should fail during certain period after the detection of | ||
| 219 | * the event. Using the 'state' attribute alone is not sufficient to satisfy | ||
| 220 | * these requirements, because a wakeup event may occur exactly when 'state' | ||
| 221 | * is being written to and may be delivered to user space right before it is | ||
| 222 | * frozen, so the event will remain only partially processed until the system is | ||
| 223 | * woken up by another event. In particular, it won't cause the transition to | ||
| 224 | * a sleep state to be aborted. | ||
| 225 | * | ||
| 226 | * This difficulty may be overcome if user space uses 'wakeup_count' before | ||
| 227 | * writing to 'state'. It first should read from 'wakeup_count' and store | ||
| 228 | * the read value. Then, after carrying out its own preparations for the system | ||
| 229 | * transition to a sleep state, it should write the stored value to | ||
| 230 | * 'wakeup_count'. If that fails, at least one wakeup event has occured since | ||
| 231 | * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it | ||
| 232 | * is allowed to write to 'state', but the transition will be aborted if there | ||
| 233 | * are any wakeup events detected after 'wakeup_count' was written to. | ||
| 234 | */ | ||
| 235 | |||
| 236 | static ssize_t wakeup_count_show(struct kobject *kobj, | ||
| 237 | struct kobj_attribute *attr, | ||
| 238 | char *buf) | ||
| 239 | { | ||
| 240 | unsigned long val; | ||
| 241 | |||
| 242 | return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; | ||
| 243 | } | ||
| 244 | |||
| 245 | static ssize_t wakeup_count_store(struct kobject *kobj, | ||
| 246 | struct kobj_attribute *attr, | ||
| 247 | const char *buf, size_t n) | ||
| 248 | { | ||
| 249 | unsigned long val; | ||
| 250 | |||
| 251 | if (sscanf(buf, "%lu", &val) == 1) { | ||
| 252 | if (pm_save_wakeup_count(val)) | ||
| 253 | return n; | ||
| 254 | } | ||
| 255 | return -EINVAL; | ||
| 256 | } | ||
| 257 | |||
| 258 | power_attr(wakeup_count); | ||
| 259 | #endif /* CONFIG_PM_SLEEP */ | ||
| 260 | |||
| 207 | #ifdef CONFIG_PM_TRACE | 261 | #ifdef CONFIG_PM_TRACE |
| 208 | int pm_trace_enabled; | 262 | int pm_trace_enabled; |
| 209 | 263 | ||
| @@ -236,6 +290,7 @@ static struct attribute * g[] = { | |||
| 236 | #endif | 290 | #endif |
| 237 | #ifdef CONFIG_PM_SLEEP | 291 | #ifdef CONFIG_PM_SLEEP |
| 238 | &pm_async_attr.attr, | 292 | &pm_async_attr.attr, |
| 293 | &wakeup_count_attr.attr, | ||
| 239 | #ifdef CONFIG_PM_DEBUG | 294 | #ifdef CONFIG_PM_DEBUG |
| 240 | &pm_test_attr.attr, | 295 | &pm_test_attr.attr, |
| 241 | #endif | 296 | #endif |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 25ce010e9f8b..f6cd6faf84fd 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * This file provides system snapshot/restore functionality for swsusp. | 4 | * This file provides system snapshot/restore functionality for swsusp. |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz> | 6 | * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz> |
| 7 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> | 7 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
| 8 | * | 8 | * |
| 9 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f37cb7dd4402..7335952ee473 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
| @@ -136,19 +136,19 @@ static int suspend_enter(suspend_state_t state) | |||
| 136 | if (suspend_ops->prepare) { | 136 | if (suspend_ops->prepare) { |
| 137 | error = suspend_ops->prepare(); | 137 | error = suspend_ops->prepare(); |
| 138 | if (error) | 138 | if (error) |
| 139 | return error; | 139 | goto Platform_finish; |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | error = dpm_suspend_noirq(PMSG_SUSPEND); | 142 | error = dpm_suspend_noirq(PMSG_SUSPEND); |
| 143 | if (error) { | 143 | if (error) { |
| 144 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 144 | printk(KERN_ERR "PM: Some devices failed to power down\n"); |
| 145 | goto Platfrom_finish; | 145 | goto Platform_finish; |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | if (suspend_ops->prepare_late) { | 148 | if (suspend_ops->prepare_late) { |
| 149 | error = suspend_ops->prepare_late(); | 149 | error = suspend_ops->prepare_late(); |
| 150 | if (error) | 150 | if (error) |
| 151 | goto Power_up_devices; | 151 | goto Platform_wake; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | if (suspend_test(TEST_PLATFORM)) | 154 | if (suspend_test(TEST_PLATFORM)) |
| @@ -163,8 +163,10 @@ static int suspend_enter(suspend_state_t state) | |||
| 163 | 163 | ||
| 164 | error = sysdev_suspend(PMSG_SUSPEND); | 164 | error = sysdev_suspend(PMSG_SUSPEND); |
| 165 | if (!error) { | 165 | if (!error) { |
| 166 | if (!suspend_test(TEST_CORE)) | 166 | if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { |
| 167 | error = suspend_ops->enter(state); | 167 | error = suspend_ops->enter(state); |
| 168 | events_check_enabled = false; | ||
| 169 | } | ||
| 168 | sysdev_resume(); | 170 | sysdev_resume(); |
| 169 | } | 171 | } |
| 170 | 172 | ||
| @@ -178,10 +180,9 @@ static int suspend_enter(suspend_state_t state) | |||
| 178 | if (suspend_ops->wake) | 180 | if (suspend_ops->wake) |
| 179 | suspend_ops->wake(); | 181 | suspend_ops->wake(); |
| 180 | 182 | ||
| 181 | Power_up_devices: | ||
| 182 | dpm_resume_noirq(PMSG_RESUME); | 183 | dpm_resume_noirq(PMSG_RESUME); |
| 183 | 184 | ||
| 184 | Platfrom_finish: | 185 | Platform_finish: |
| 185 | if (suspend_ops->finish) | 186 | if (suspend_ops->finish) |
| 186 | suspend_ops->finish(); | 187 | suspend_ops->finish(); |
| 187 | 188 | ||
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b0bb21778391..e6a5bdf61a37 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * This file provides functions for reading the suspend image from | 4 | * This file provides functions for reading the suspend image from |
| 5 | * and writing it to a swap partition. | 5 | * and writing it to a swap partition. |
| 6 | * | 6 | * |
| 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz> | 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
| 8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> | 8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
| 9 | * | 9 | * |
| 10 | * This file is released under the GPLv2. | 10 | * This file is released under the GPLv2. |
| @@ -32,7 +32,7 @@ | |||
| 32 | /* | 32 | /* |
| 33 | * The swap map is a data structure used for keeping track of each page | 33 | * The swap map is a data structure used for keeping track of each page |
| 34 | * written to a swap partition. It consists of many swap_map_page | 34 | * written to a swap partition. It consists of many swap_map_page |
| 35 | * structures that contain each an array of MAP_PAGE_SIZE swap entries. | 35 | * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. |
| 36 | * These structures are stored on the swap and linked together with the | 36 | * These structures are stored on the swap and linked together with the |
| 37 | * help of the .next_swap member. | 37 | * help of the .next_swap member. |
| 38 | * | 38 | * |
| @@ -148,7 +148,7 @@ sector_t alloc_swapdev_block(int swap) | |||
| 148 | 148 | ||
| 149 | /** | 149 | /** |
| 150 | * free_all_swap_pages - free swap pages allocated for saving image data. | 150 | * free_all_swap_pages - free swap pages allocated for saving image data. |
| 151 | * It also frees the extents used to register which swap entres had been | 151 | * It also frees the extents used to register which swap entries had been |
| 152 | * allocated. | 152 | * allocated. |
| 153 | */ | 153 | */ |
| 154 | 154 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 444b770c9595..4ab0164bcf84 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -37,6 +37,8 @@ | |||
| 37 | #include <linux/ratelimit.h> | 37 | #include <linux/ratelimit.h> |
| 38 | #include <linux/kmsg_dump.h> | 38 | #include <linux/kmsg_dump.h> |
| 39 | #include <linux/syslog.h> | 39 | #include <linux/syslog.h> |
| 40 | #include <linux/cpu.h> | ||
| 41 | #include <linux/notifier.h> | ||
| 40 | 42 | ||
| 41 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
| 42 | 44 | ||
| @@ -985,6 +987,32 @@ void resume_console(void) | |||
| 985 | } | 987 | } |
| 986 | 988 | ||
| 987 | /** | 989 | /** |
| 990 | * console_cpu_notify - print deferred console messages after CPU hotplug | ||
| 991 | * @self: notifier struct | ||
| 992 | * @action: CPU hotplug event | ||
| 993 | * @hcpu: unused | ||
| 994 | * | ||
| 995 | * If printk() is called from a CPU that is not online yet, the messages | ||
| 996 | * will be spooled but will not show up on the console. This function is | ||
| 997 | * called when a new CPU comes online (or fails to come up), and ensures | ||
| 998 | * that any such output gets printed. | ||
| 999 | */ | ||
| 1000 | static int __cpuinit console_cpu_notify(struct notifier_block *self, | ||
| 1001 | unsigned long action, void *hcpu) | ||
| 1002 | { | ||
| 1003 | switch (action) { | ||
| 1004 | case CPU_ONLINE: | ||
| 1005 | case CPU_DEAD: | ||
| 1006 | case CPU_DYING: | ||
| 1007 | case CPU_DOWN_FAILED: | ||
| 1008 | case CPU_UP_CANCELED: | ||
| 1009 | acquire_console_sem(); | ||
| 1010 | release_console_sem(); | ||
| 1011 | } | ||
| 1012 | return NOTIFY_OK; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | /** | ||
| 988 | * acquire_console_sem - lock the console system for exclusive use. | 1016 | * acquire_console_sem - lock the console system for exclusive use. |
| 989 | * | 1017 | * |
| 990 | * Acquires a semaphore which guarantees that the caller has | 1018 | * Acquires a semaphore which guarantees that the caller has |
| @@ -1371,7 +1399,7 @@ int unregister_console(struct console *console) | |||
| 1371 | } | 1399 | } |
| 1372 | EXPORT_SYMBOL(unregister_console); | 1400 | EXPORT_SYMBOL(unregister_console); |
| 1373 | 1401 | ||
| 1374 | static int __init disable_boot_consoles(void) | 1402 | static int __init printk_late_init(void) |
| 1375 | { | 1403 | { |
| 1376 | struct console *con; | 1404 | struct console *con; |
| 1377 | 1405 | ||
| @@ -1382,9 +1410,10 @@ static int __init disable_boot_consoles(void) | |||
| 1382 | unregister_console(con); | 1410 | unregister_console(con); |
| 1383 | } | 1411 | } |
| 1384 | } | 1412 | } |
| 1413 | hotcpu_notifier(console_cpu_notify, 0); | ||
| 1385 | return 0; | 1414 | return 0; |
| 1386 | } | 1415 | } |
| 1387 | late_initcall(disable_boot_consoles); | 1416 | late_initcall(printk_late_init); |
| 1388 | 1417 | ||
| 1389 | #if defined CONFIG_PRINTK | 1418 | #if defined CONFIG_PRINTK |
| 1390 | 1419 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index 906ae5a1779c..bded65187780 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -637,7 +637,7 @@ static inline bool si_fromuser(const struct siginfo *info) | |||
| 637 | 637 | ||
| 638 | /* | 638 | /* |
| 639 | * Bad permissions for sending the signal | 639 | * Bad permissions for sending the signal |
| 640 | * - the caller must hold at least the RCU read lock | 640 | * - the caller must hold the RCU read lock |
| 641 | */ | 641 | */ |
| 642 | static int check_kill_permission(int sig, struct siginfo *info, | 642 | static int check_kill_permission(int sig, struct siginfo *info, |
| 643 | struct task_struct *t) | 643 | struct task_struct *t) |
| @@ -1127,11 +1127,14 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long | |||
| 1127 | 1127 | ||
| 1128 | /* | 1128 | /* |
| 1129 | * send signal info to all the members of a group | 1129 | * send signal info to all the members of a group |
| 1130 | * - the caller must hold the RCU read lock at least | ||
| 1131 | */ | 1130 | */ |
| 1132 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1131 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
| 1133 | { | 1132 | { |
| 1134 | int ret = check_kill_permission(sig, info, p); | 1133 | int ret; |
| 1134 | |||
| 1135 | rcu_read_lock(); | ||
| 1136 | ret = check_kill_permission(sig, info, p); | ||
| 1137 | rcu_read_unlock(); | ||
| 1135 | 1138 | ||
| 1136 | if (!ret && sig) | 1139 | if (!ret && sig) |
| 1137 | ret = do_send_sig_info(sig, info, p, true); | 1140 | ret = do_send_sig_info(sig, info, p, true); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index b3bafd5fc66d..48b2761b5668 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -188,7 +188,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 188 | /* | 188 | /* |
| 189 | * Setup the next period for devices, which do not have | 189 | * Setup the next period for devices, which do not have |
| 190 | * periodic mode. We read dev->next_event first and add to it | 190 | * periodic mode. We read dev->next_event first and add to it |
| 191 | * when the event alrady expired. clockevents_program_event() | 191 | * when the event already expired. clockevents_program_event() |
| 192 | * sets dev->next_event only when the event is really | 192 | * sets dev->next_event only when the event is really |
| 193 | * programmed to the device. | 193 | * programmed to the device. |
| 194 | */ | 194 | */ |
diff --git a/kernel/timer.c b/kernel/timer.c index ee305c8d4e18..efde11e197c4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -577,6 +577,19 @@ static void __init_timer(struct timer_list *timer, | |||
| 577 | lockdep_init_map(&timer->lockdep_map, name, key, 0); | 577 | lockdep_init_map(&timer->lockdep_map, name, key, 0); |
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | void setup_deferrable_timer_on_stack_key(struct timer_list *timer, | ||
| 581 | const char *name, | ||
| 582 | struct lock_class_key *key, | ||
| 583 | void (*function)(unsigned long), | ||
| 584 | unsigned long data) | ||
| 585 | { | ||
| 586 | timer->function = function; | ||
| 587 | timer->data = data; | ||
| 588 | init_timer_on_stack_key(timer, name, key); | ||
| 589 | timer_set_deferrable(timer); | ||
| 590 | } | ||
| 591 | EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key); | ||
| 592 | |||
| 580 | /** | 593 | /** |
| 581 | * init_timer_key - initialize a timer | 594 | * init_timer_key - initialize a timer |
| 582 | * @timer: the timer to be initialized | 595 | * @timer: the timer to be initialized |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index b2d70d38dff4..25915832291a 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/nsproxy.h> | 9 | #include <linux/nsproxy.h> |
| 10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 11 | #include <linux/user_namespace.h> | 11 | #include <linux/user_namespace.h> |
| 12 | #include <linux/highuid.h> | ||
| 12 | #include <linux/cred.h> | 13 | #include <linux/cred.h> |
| 13 | 14 | ||
| 14 | /* | 15 | /* |
| @@ -82,3 +83,46 @@ void free_user_ns(struct kref *kref) | |||
| 82 | schedule_work(&ns->destroyer); | 83 | schedule_work(&ns->destroyer); |
| 83 | } | 84 | } |
| 84 | EXPORT_SYMBOL(free_user_ns); | 85 | EXPORT_SYMBOL(free_user_ns); |
| 86 | |||
| 87 | uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid) | ||
| 88 | { | ||
| 89 | struct user_namespace *tmp; | ||
| 90 | |||
| 91 | if (likely(to == cred->user->user_ns)) | ||
| 92 | return uid; | ||
| 93 | |||
| 94 | |||
| 95 | /* Is cred->user the creator of the target user_ns | ||
| 96 | * or the creator of one of it's parents? | ||
| 97 | */ | ||
| 98 | for ( tmp = to; tmp != &init_user_ns; | ||
| 99 | tmp = tmp->creator->user_ns ) { | ||
| 100 | if (cred->user == tmp->creator) { | ||
| 101 | return (uid_t)0; | ||
| 102 | } | ||
| 103 | } | ||
| 104 | |||
| 105 | /* No useful relationship so no mapping */ | ||
| 106 | return overflowuid; | ||
| 107 | } | ||
| 108 | |||
| 109 | gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid) | ||
| 110 | { | ||
| 111 | struct user_namespace *tmp; | ||
| 112 | |||
| 113 | if (likely(to == cred->user->user_ns)) | ||
| 114 | return gid; | ||
| 115 | |||
| 116 | /* Is cred->user the creator of the target user_ns | ||
| 117 | * or the creator of one of it's parents? | ||
| 118 | */ | ||
| 119 | for ( tmp = to; tmp != &init_user_ns; | ||
| 120 | tmp = tmp->creator->user_ns ) { | ||
| 121 | if (cred->user == tmp->creator) { | ||
| 122 | return (gid_t)0; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | /* No useful relationship so no mapping */ | ||
| 127 | return overflowgid; | ||
| 128 | } | ||
