summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 15:11:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 15:11:14 -0400
commit8b53c76533aa4356602aea98f98a2f3b4051464c (patch)
treeab10ba58e21501407f8108a6bb9003daa2176962 /kernel/workqueue.c
parent6cfae0c26b21dce323fe8799b66cf4bc996e3565 (diff)
parent9575d1a5c0780ea26ff8dd29c94a32be32ce3c85 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Add the ability to abort a skcipher walk. Algorithms: - Fix XTS to actually do the stealing. - Add library helpers for AES and DES for single-block users. - Add library helpers for SHA256. - Add new DES key verification helper. - Add surrounding bits for ESSIV generator. - Add accelerations for aegis128. - Add test vectors for lzo-rle. Drivers: - Add i.MX8MQ support to caam. - Add gcm/ccm/cfb/ofb aes support in inside-secure. - Add ofb/cfb aes support in media-tek. - Add HiSilicon ZIP accelerator support. Others: - Fix potential race condition in padata. - Use unbound workqueues in padata" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (311 commits) crypto: caam - Cast to long first before pointer conversion crypto: ccree - enable CTS support in AES-XTS crypto: inside-secure - Probe transform record cache RAM sizes crypto: inside-secure - Base RD fetchcount on actual RD FIFO size crypto: inside-secure - Base CD fetchcount on actual CD FIFO size crypto: inside-secure - Enable extended algorithms on newer HW crypto: inside-secure: Corrected configuration of EIP96_TOKEN_CTRL crypto: inside-secure - Add EIP97/EIP197 and endianness detection padata: remove cpu_index from the parallel_queue padata: unbind parallel jobs from specific CPUs padata: use separate workqueues for parallel and serial work padata, pcrypt: take CPU hotplug lock internally in padata_alloc_possible crypto: pcrypt - remove padata cpumask notifier padata: make padata_do_parallel find alternate callback CPU workqueue: require CPU hotplug read exclusion for apply_workqueue_attrs workqueue: unconfine alloc/apply/free_workqueue_attrs() padata: allocate workqueue internally arm64: dts: imx8mq: Add CAAM node random: Use wait_event_freezable() in add_hwgenerator_randomness() crypto: ux500 - Fix COMPILE_TEST warnings ...
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 601d61150b65..bc2e09a8ea61 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3329,7 +3329,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
3329 * 3329 *
3330 * Undo alloc_workqueue_attrs(). 3330 * Undo alloc_workqueue_attrs().
3331 */ 3331 */
3332static void free_workqueue_attrs(struct workqueue_attrs *attrs) 3332void free_workqueue_attrs(struct workqueue_attrs *attrs)
3333{ 3333{
3334 if (attrs) { 3334 if (attrs) {
3335 free_cpumask_var(attrs->cpumask); 3335 free_cpumask_var(attrs->cpumask);
@@ -3345,7 +3345,7 @@ static void free_workqueue_attrs(struct workqueue_attrs *attrs)
3345 * 3345 *
3346 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3346 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3347 */ 3347 */
3348static struct workqueue_attrs *alloc_workqueue_attrs(void) 3348struct workqueue_attrs *alloc_workqueue_attrs(void)
3349{ 3349{
3350 struct workqueue_attrs *attrs; 3350 struct workqueue_attrs *attrs;
3351 3351
@@ -4030,16 +4030,20 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4030 * 4030 *
4031 * Performs GFP_KERNEL allocations. 4031 * Performs GFP_KERNEL allocations.
4032 * 4032 *
4033 * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
4034 *
4033 * Return: 0 on success and -errno on failure. 4035 * Return: 0 on success and -errno on failure.
4034 */ 4036 */
4035static int apply_workqueue_attrs(struct workqueue_struct *wq, 4037int apply_workqueue_attrs(struct workqueue_struct *wq,
4036 const struct workqueue_attrs *attrs) 4038 const struct workqueue_attrs *attrs)
4037{ 4039{
4038 int ret; 4040 int ret;
4039 4041
4040 apply_wqattrs_lock(); 4042 lockdep_assert_cpus_held();
4043
4044 mutex_lock(&wq_pool_mutex);
4041 ret = apply_workqueue_attrs_locked(wq, attrs); 4045 ret = apply_workqueue_attrs_locked(wq, attrs);
4042 apply_wqattrs_unlock(); 4046 mutex_unlock(&wq_pool_mutex);
4043 4047
4044 return ret; 4048 return ret;
4045} 4049}
@@ -4152,16 +4156,21 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4152 mutex_unlock(&wq->mutex); 4156 mutex_unlock(&wq->mutex);
4153 } 4157 }
4154 return 0; 4158 return 0;
4155 } else if (wq->flags & __WQ_ORDERED) { 4159 }
4160
4161 get_online_cpus();
4162 if (wq->flags & __WQ_ORDERED) {
4156 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 4163 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4157 /* there should only be single pwq for ordering guarantee */ 4164 /* there should only be single pwq for ordering guarantee */
4158 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 4165 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4159 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 4166 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4160 "ordering guarantee broken for workqueue %s\n", wq->name); 4167 "ordering guarantee broken for workqueue %s\n", wq->name);
4161 return ret;
4162 } else { 4168 } else {
4163 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4169 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4164 } 4170 }
4171 put_online_cpus();
4172
4173 return ret;
4165} 4174}
4166 4175
4167static int wq_clamp_max_active(int max_active, unsigned int flags, 4176static int wq_clamp_max_active(int max_active, unsigned int flags,