aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/slow-work.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r--kernel/slow-work.c81
1 files changed, 75 insertions, 6 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index fccf421eb5c1..671cc434532a 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -236,12 +236,17 @@ static bool slow_work_execute(int id)
236 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) 236 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
237 BUG(); 237 BUG();
238 238
239 work->ops->execute(work); 239 /* don't execute if the work is in the process of being cancelled */
240 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
241 work->ops->execute(work);
240 242
241 if (very_slow) 243 if (very_slow)
242 atomic_dec(&vslow_work_executing_count); 244 atomic_dec(&vslow_work_executing_count);
243 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); 245 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
244 246
247 /* wake up anyone waiting for this work to be complete */
248 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
249
245 /* if someone tried to enqueue the item whilst we were executing it, 250 /* if someone tried to enqueue the item whilst we were executing it,
246 * then it'll be left unenqueued to avoid multiple threads trying to 251 * then it'll be left unenqueued to avoid multiple threads trying to
247 * execute it simultaneously 252 * execute it simultaneously
@@ -314,11 +319,16 @@ auto_requeue:
314 * allowed to pick items to execute. This ensures that very slow items won't 319 * allowed to pick items to execute. This ensures that very slow items won't
315 * overly block ones that are just ordinarily slow. 320 * overly block ones that are just ordinarily slow.
316 * 321 *
317 * Returns 0 if successful, -EAGAIN if not. 322 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
323 * attempted queued)
318 */ 324 */
319int slow_work_enqueue(struct slow_work *work) 325int slow_work_enqueue(struct slow_work *work)
320{ 326{
321 unsigned long flags; 327 unsigned long flags;
328 int ret;
329
330 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
331 return -ECANCELED;
322 332
323 BUG_ON(slow_work_user_count <= 0); 333 BUG_ON(slow_work_user_count <= 0);
324 BUG_ON(!work); 334 BUG_ON(!work);
@@ -335,6 +345,9 @@ int slow_work_enqueue(struct slow_work *work)
335 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 345 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
336 spin_lock_irqsave(&slow_work_queue_lock, flags); 346 spin_lock_irqsave(&slow_work_queue_lock, flags);
337 347
348 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
349 goto cancelled;
350
338 /* we promise that we will not attempt to execute the work 351 /* we promise that we will not attempt to execute the work
339 * function in more than one thread simultaneously 352 * function in more than one thread simultaneously
340 * 353 *
@@ -352,8 +365,9 @@ int slow_work_enqueue(struct slow_work *work)
352 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 365 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
353 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 366 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
354 } else { 367 } else {
355 if (slow_work_get_ref(work) < 0) 368 ret = slow_work_get_ref(work);
356 goto cant_get_ref; 369 if (ret < 0)
370 goto failed;
357 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 371 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
358 list_add_tail(&work->link, &vslow_work_queue); 372 list_add_tail(&work->link, &vslow_work_queue);
359 else 373 else
@@ -365,12 +379,67 @@ int slow_work_enqueue(struct slow_work *work)
365 } 379 }
366 return 0; 380 return 0;
367 381
368cant_get_ref: 382cancelled:
383 ret = -ECANCELED;
384failed:
369 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 385 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
370 return -EAGAIN; 386 return ret;
371} 387}
372EXPORT_SYMBOL(slow_work_enqueue); 388EXPORT_SYMBOL(slow_work_enqueue);
373 389
390static int slow_work_wait(void *word)
391{
392 schedule();
393 return 0;
394}
395
396/**
397 * slow_work_cancel - Cancel a slow work item
398 * @work: The work item to cancel
399 *
400 * This function will cancel a previously enqueued work item. If we cannot
401 * cancel the work item, it is guarenteed to have run when this function
402 * returns.
403 */
404void slow_work_cancel(struct slow_work *work)
405{
406 bool wait = true, put = false;
407
408 set_bit(SLOW_WORK_CANCELLING, &work->flags);
409
410 spin_lock_irq(&slow_work_queue_lock);
411
412 if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
413 !list_empty(&work->link)) {
414 /* the link in the pending queue holds a reference on the item
415 * that we will need to release */
416 list_del_init(&work->link);
417 wait = false;
418 put = true;
419 clear_bit(SLOW_WORK_PENDING, &work->flags);
420
421 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
422 /* the executor is holding our only reference on the item, so
423 * we merely need to wait for it to finish executing */
424 clear_bit(SLOW_WORK_PENDING, &work->flags);
425 }
426
427 spin_unlock_irq(&slow_work_queue_lock);
428
429 /* the EXECUTING flag is set by the executor whilst the spinlock is set
430 * and before the item is dequeued - so assuming the above doesn't
431 * actually dequeue it, simply waiting for the EXECUTING flag to be
432 * released here should be sufficient */
433 if (wait)
434 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
435 TASK_UNINTERRUPTIBLE);
436
437 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
438 if (put)
439 slow_work_put_ref(work);
440}
441EXPORT_SYMBOL(slow_work_cancel);
442
374/* 443/*
375 * Schedule a cull of the thread pool at some time in the near future 444 * Schedule a cull of the thread pool at some time in the near future
376 */ 445 */