aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/slow-work.c36
1 files changed, 16 insertions, 20 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index dd08f376e406..fccf421eb5c1 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -145,6 +145,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
145static int slow_work_user_count; 145static int slow_work_user_count;
146static DEFINE_MUTEX(slow_work_user_lock); 146static DEFINE_MUTEX(slow_work_user_lock);
147 147
148static inline int slow_work_get_ref(struct slow_work *work)
149{
150 if (work->ops->get_ref)
151 return work->ops->get_ref(work);
152
153 return 0;
154}
155
156static inline void slow_work_put_ref(struct slow_work *work)
157{
158 if (work->ops->put_ref)
159 work->ops->put_ref(work);
160}
161
148/* 162/*
149 * Calculate the maximum number of active threads in the pool that are 163 * Calculate the maximum number of active threads in the pool that are
150 * permitted to process very slow work items. 164 * permitted to process very slow work items.
@@ -248,7 +262,7 @@ static bool slow_work_execute(int id)
248 } 262 }
249 263
250 /* sort out the race between module unloading and put_ref() */ 264 /* sort out the race between module unloading and put_ref() */
251 work->ops->put_ref(work); 265 slow_work_put_ref(work);
252 266
253#ifdef CONFIG_MODULES 267#ifdef CONFIG_MODULES
254 module = slow_work_thread_processing[id]; 268 module = slow_work_thread_processing[id];
@@ -309,7 +323,6 @@ int slow_work_enqueue(struct slow_work *work)
309 BUG_ON(slow_work_user_count <= 0); 323 BUG_ON(slow_work_user_count <= 0);
310 BUG_ON(!work); 324 BUG_ON(!work);
311 BUG_ON(!work->ops); 325 BUG_ON(!work->ops);
312 BUG_ON(!work->ops->get_ref);
313 326
314 /* when honouring an enqueue request, we only promise that we will run 327 /* when honouring an enqueue request, we only promise that we will run
315 * the work function in the future; we do not promise to run it once 328 * the work function in the future; we do not promise to run it once
@@ -339,7 +352,7 @@ int slow_work_enqueue(struct slow_work *work)
339 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 352 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
340 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 353 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
341 } else { 354 } else {
342 if (work->ops->get_ref(work) < 0) 355 if (slow_work_get_ref(work) < 0)
343 goto cant_get_ref; 356 goto cant_get_ref;
344 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 357 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
345 list_add_tail(&work->link, &vslow_work_queue); 358 list_add_tail(&work->link, &vslow_work_queue);
@@ -480,21 +493,6 @@ static void slow_work_cull_timeout(unsigned long data)
480} 493}
481 494
482/* 495/*
483 * Get a reference on slow work thread starter
484 */
485static int slow_work_new_thread_get_ref(struct slow_work *work)
486{
487 return 0;
488}
489
490/*
491 * Drop a reference on slow work thread starter
492 */
493static void slow_work_new_thread_put_ref(struct slow_work *work)
494{
495}
496
497/*
498 * Start a new slow work thread 496 * Start a new slow work thread
499 */ 497 */
500static void slow_work_new_thread_execute(struct slow_work *work) 498static void slow_work_new_thread_execute(struct slow_work *work)
@@ -529,8 +527,6 @@ static void slow_work_new_thread_execute(struct slow_work *work)
529 527
530static const struct slow_work_ops slow_work_new_thread_ops = { 528static const struct slow_work_ops slow_work_new_thread_ops = {
531 .owner = THIS_MODULE, 529 .owner = THIS_MODULE,
532 .get_ref = slow_work_new_thread_get_ref,
533 .put_ref = slow_work_new_thread_put_ref,
534 .execute = slow_work_new_thread_execute, 530 .execute = slow_work_new_thread_execute,
535}; 531};
536 532