aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-03 13:30:46 -0400
committerTejun Heo <tj@kernel.org>2012-08-03 13:30:46 -0400
commitbf4ede014ea886b71ef71368738da35b316cb7c0 (patch)
tree9ae8f14883406241c54e5d0febc3c27258f4b45a /kernel/workqueue.c
parent715f1300802e6eaefa85f6cfc70ae99af3d5d497 (diff)
workqueue: move try_to_grab_pending() upwards
try_to_grab_pending() will be used by to-be-implemented mod_delayed_work[_on](). Move try_to_grab_pending() and related functions above queueing functions. This patch only moves functions around. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c286
1 files changed, 143 insertions, 143 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6cbdc22f8ec7..0f50f4078e36 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -904,6 +904,149 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
904} 904}
905 905
906/** 906/**
907 * move_linked_works - move linked works to a list
908 * @work: start of series of works to be scheduled
909 * @head: target list to append @work to
910 * @nextp: out paramter for nested worklist walking
911 *
912 * Schedule linked works starting from @work to @head. Work series to
913 * be scheduled starts at @work and includes any consecutive work with
914 * WORK_STRUCT_LINKED set in its predecessor.
915 *
916 * If @nextp is not NULL, it's updated to point to the next work of
917 * the last scheduled work. This allows move_linked_works() to be
918 * nested inside outer list_for_each_entry_safe().
919 *
920 * CONTEXT:
921 * spin_lock_irq(gcwq->lock).
922 */
923static void move_linked_works(struct work_struct *work, struct list_head *head,
924 struct work_struct **nextp)
925{
926 struct work_struct *n;
927
928 /*
929 * Linked worklist will always end before the end of the list,
930 * use NULL for list head.
931 */
932 list_for_each_entry_safe_from(work, n, NULL, entry) {
933 list_move_tail(&work->entry, head);
934 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
935 break;
936 }
937
938 /*
939 * If we're already inside safe list traversal and have moved
940 * multiple works to the scheduled queue, the next position
941 * needs to be updated.
942 */
943 if (nextp)
944 *nextp = n;
945}
946
947static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
948{
949 struct work_struct *work = list_first_entry(&cwq->delayed_works,
950 struct work_struct, entry);
951
952 trace_workqueue_activate_work(work);
953 move_linked_works(work, &cwq->pool->worklist, NULL);
954 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
955 cwq->nr_active++;
956}
957
958/**
959 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
960 * @cwq: cwq of interest
961 * @color: color of work which left the queue
962 * @delayed: for a delayed work
963 *
964 * A work either has completed or is removed from pending queue,
965 * decrement nr_in_flight of its cwq and handle workqueue flushing.
966 *
967 * CONTEXT:
968 * spin_lock_irq(gcwq->lock).
969 */
970static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
971 bool delayed)
972{
973 /* ignore uncolored works */
974 if (color == WORK_NO_COLOR)
975 return;
976
977 cwq->nr_in_flight[color]--;
978
979 if (!delayed) {
980 cwq->nr_active--;
981 if (!list_empty(&cwq->delayed_works)) {
982 /* one down, submit a delayed one */
983 if (cwq->nr_active < cwq->max_active)
984 cwq_activate_first_delayed(cwq);
985 }
986 }
987
988 /* is flush in progress and are we at the flushing tip? */
989 if (likely(cwq->flush_color != color))
990 return;
991
992 /* are there still in-flight works? */
993 if (cwq->nr_in_flight[color])
994 return;
995
996 /* this cwq is done, clear flush_color */
997 cwq->flush_color = -1;
998
999 /*
1000 * If this was the last cwq, wake up the first flusher. It
1001 * will handle the rest.
1002 */
1003 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1004 complete(&cwq->wq->first_flusher->done);
1005}
1006
1007/*
1008 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
1009 * so this work can't be re-armed in any way.
1010 */
1011static int try_to_grab_pending(struct work_struct *work)
1012{
1013 struct global_cwq *gcwq;
1014 int ret = -1;
1015
1016 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1017 return 0;
1018
1019 /*
1020 * The queueing is in progress, or it is already queued. Try to
1021 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1022 */
1023 gcwq = get_work_gcwq(work);
1024 if (!gcwq)
1025 return ret;
1026
1027 spin_lock_irq(&gcwq->lock);
1028 if (!list_empty(&work->entry)) {
1029 /*
1030 * This work is queued, but perhaps we locked the wrong gcwq.
1031 * In that case we must see the new value after rmb(), see
1032 * insert_work()->wmb().
1033 */
1034 smp_rmb();
1035 if (gcwq == get_work_gcwq(work)) {
1036 debug_work_deactivate(work);
1037 list_del_init(&work->entry);
1038 cwq_dec_nr_in_flight(get_work_cwq(work),
1039 get_work_color(work),
1040 *work_data_bits(work) & WORK_STRUCT_DELAYED);
1041 ret = 1;
1042 }
1043 }
1044 spin_unlock_irq(&gcwq->lock);
1045
1046 return ret;
1047}
1048
1049/**
907 * insert_work - insert a work into gcwq 1050 * insert_work - insert a work into gcwq
908 * @cwq: cwq @work belongs to 1051 * @cwq: cwq @work belongs to
909 * @work: work to insert 1052 * @work: work to insert
@@ -1832,107 +1975,6 @@ static bool manage_workers(struct worker *worker)
1832} 1975}
1833 1976
1834/** 1977/**
1835 * move_linked_works - move linked works to a list
1836 * @work: start of series of works to be scheduled
1837 * @head: target list to append @work to
1838 * @nextp: out paramter for nested worklist walking
1839 *
1840 * Schedule linked works starting from @work to @head. Work series to
1841 * be scheduled starts at @work and includes any consecutive work with
1842 * WORK_STRUCT_LINKED set in its predecessor.
1843 *
1844 * If @nextp is not NULL, it's updated to point to the next work of
1845 * the last scheduled work. This allows move_linked_works() to be
1846 * nested inside outer list_for_each_entry_safe().
1847 *
1848 * CONTEXT:
1849 * spin_lock_irq(gcwq->lock).
1850 */
1851static void move_linked_works(struct work_struct *work, struct list_head *head,
1852 struct work_struct **nextp)
1853{
1854 struct work_struct *n;
1855
1856 /*
1857 * Linked worklist will always end before the end of the list,
1858 * use NULL for list head.
1859 */
1860 list_for_each_entry_safe_from(work, n, NULL, entry) {
1861 list_move_tail(&work->entry, head);
1862 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1863 break;
1864 }
1865
1866 /*
1867 * If we're already inside safe list traversal and have moved
1868 * multiple works to the scheduled queue, the next position
1869 * needs to be updated.
1870 */
1871 if (nextp)
1872 *nextp = n;
1873}
1874
1875static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1876{
1877 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1878 struct work_struct, entry);
1879
1880 trace_workqueue_activate_work(work);
1881 move_linked_works(work, &cwq->pool->worklist, NULL);
1882 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1883 cwq->nr_active++;
1884}
1885
1886/**
1887 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1888 * @cwq: cwq of interest
1889 * @color: color of work which left the queue
1890 * @delayed: for a delayed work
1891 *
1892 * A work either has completed or is removed from pending queue,
1893 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1894 *
1895 * CONTEXT:
1896 * spin_lock_irq(gcwq->lock).
1897 */
1898static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1899 bool delayed)
1900{
1901 /* ignore uncolored works */
1902 if (color == WORK_NO_COLOR)
1903 return;
1904
1905 cwq->nr_in_flight[color]--;
1906
1907 if (!delayed) {
1908 cwq->nr_active--;
1909 if (!list_empty(&cwq->delayed_works)) {
1910 /* one down, submit a delayed one */
1911 if (cwq->nr_active < cwq->max_active)
1912 cwq_activate_first_delayed(cwq);
1913 }
1914 }
1915
1916 /* is flush in progress and are we at the flushing tip? */
1917 if (likely(cwq->flush_color != color))
1918 return;
1919
1920 /* are there still in-flight works? */
1921 if (cwq->nr_in_flight[color])
1922 return;
1923
1924 /* this cwq is done, clear flush_color */
1925 cwq->flush_color = -1;
1926
1927 /*
1928 * If this was the last cwq, wake up the first flusher. It
1929 * will handle the rest.
1930 */
1931 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1932 complete(&cwq->wq->first_flusher->done);
1933}
1934
1935/**
1936 * process_one_work - process single work 1978 * process_one_work - process single work
1937 * @worker: self 1979 * @worker: self
1938 * @work: work to process 1980 * @work: work to process
@@ -2767,48 +2809,6 @@ bool flush_work_sync(struct work_struct *work)
2767} 2809}
2768EXPORT_SYMBOL_GPL(flush_work_sync); 2810EXPORT_SYMBOL_GPL(flush_work_sync);
2769 2811
2770/*
2771 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2772 * so this work can't be re-armed in any way.
2773 */
2774static int try_to_grab_pending(struct work_struct *work)
2775{
2776 struct global_cwq *gcwq;
2777 int ret = -1;
2778
2779 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2780 return 0;
2781
2782 /*
2783 * The queueing is in progress, or it is already queued. Try to
2784 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2785 */
2786 gcwq = get_work_gcwq(work);
2787 if (!gcwq)
2788 return ret;
2789
2790 spin_lock_irq(&gcwq->lock);
2791 if (!list_empty(&work->entry)) {
2792 /*
2793 * This work is queued, but perhaps we locked the wrong gcwq.
2794 * In that case we must see the new value after rmb(), see
2795 * insert_work()->wmb().
2796 */
2797 smp_rmb();
2798 if (gcwq == get_work_gcwq(work)) {
2799 debug_work_deactivate(work);
2800 list_del_init(&work->entry);
2801 cwq_dec_nr_in_flight(get_work_cwq(work),
2802 get_work_color(work),
2803 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2804 ret = 1;
2805 }
2806 }
2807 spin_unlock_irq(&gcwq->lock);
2808
2809 return ret;
2810}
2811
2812static bool __cancel_work_timer(struct work_struct *work, 2812static bool __cancel_work_timer(struct work_struct *work,
2813 struct timer_list* timer) 2813 struct timer_list* timer)
2814{ 2814{