diff options
author | Tejun Heo <tj@kernel.org> | 2013-03-13 22:47:40 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-13 22:47:40 -0400 |
commit | 2e109a2855bf6cf675a8b74dbd89b6492e8def42 (patch) | |
tree | 22357bb67654f29db07d0f80b83e4e845cbee8c2 /kernel/workqueue.c | |
parent | 794b18bc8a3f80445e1f85c9c87c74de9575c93a (diff) |
workqueue: rename workqueue_lock to wq_mayday_lock
With the recent locking updates, the only thing protected by
workqueue_lock is workqueue->maydays list. Rename workqueue_lock to
wq_mayday_lock.
This patch is pure rename.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 63856dfbd082..969be0b72071 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -125,10 +125,10 @@ enum { | |||
125 | * | 125 | * |
126 | * PW: pwq_lock protected. | 126 | * PW: pwq_lock protected. |
127 | * | 127 | * |
128 | * W: workqueue_lock protected. | ||
129 | * | ||
130 | * FR: wq->flush_mutex and pwq_lock protected for writes. Sched-RCU | 128 | * FR: wq->flush_mutex and pwq_lock protected for writes. Sched-RCU |
131 | * protected for reads. | 129 | * protected for reads. |
130 | * | ||
131 | * MD: wq_mayday_lock protected. | ||
132 | */ | 132 | */ |
133 | 133 | ||
134 | /* struct worker is defined in workqueue_internal.h */ | 134 | /* struct worker is defined in workqueue_internal.h */ |
@@ -194,7 +194,7 @@ struct pool_workqueue { | |||
194 | int max_active; /* L: max active works */ | 194 | int max_active; /* L: max active works */ |
195 | struct list_head delayed_works; /* L: delayed works */ | 195 | struct list_head delayed_works; /* L: delayed works */ |
196 | struct list_head pwqs_node; /* FR: node on wq->pwqs */ | 196 | struct list_head pwqs_node; /* FR: node on wq->pwqs */ |
197 | struct list_head mayday_node; /* W: node on wq->maydays */ | 197 | struct list_head mayday_node; /* MD: node on wq->maydays */ |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Release of unbound pwq is punted to system_wq. See put_pwq() | 200 | * Release of unbound pwq is punted to system_wq. See put_pwq() |
@@ -235,7 +235,7 @@ struct workqueue_struct { | |||
235 | struct list_head flusher_queue; /* F: flush waiters */ | 235 | struct list_head flusher_queue; /* F: flush waiters */ |
236 | struct list_head flusher_overflow; /* F: flush overflow list */ | 236 | struct list_head flusher_overflow; /* F: flush overflow list */ |
237 | 237 | ||
238 | struct list_head maydays; /* W: pwqs requesting rescue */ | 238 | struct list_head maydays; /* MD: pwqs requesting rescue */ |
239 | struct worker *rescuer; /* I: rescue worker */ | 239 | struct worker *rescuer; /* I: rescue worker */ |
240 | 240 | ||
241 | int nr_drainers; /* WQ: drain in progress */ | 241 | int nr_drainers; /* WQ: drain in progress */ |
@@ -254,7 +254,7 @@ static struct kmem_cache *pwq_cache; | |||
254 | 254 | ||
255 | static DEFINE_MUTEX(wq_mutex); /* protects workqueues and pools */ | 255 | static DEFINE_MUTEX(wq_mutex); /* protects workqueues and pools */ |
256 | static DEFINE_SPINLOCK(pwq_lock); /* protects pool_workqueues */ | 256 | static DEFINE_SPINLOCK(pwq_lock); /* protects pool_workqueues */ |
257 | static DEFINE_SPINLOCK(workqueue_lock); | 257 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
258 | 258 | ||
259 | static LIST_HEAD(workqueues); /* WQ: list of all workqueues */ | 259 | static LIST_HEAD(workqueues); /* WQ: list of all workqueues */ |
260 | static bool workqueue_freezing; /* WQ: have wqs started freezing? */ | 260 | static bool workqueue_freezing; /* WQ: have wqs started freezing? */ |
@@ -1894,7 +1894,7 @@ static void send_mayday(struct work_struct *work) | |||
1894 | struct pool_workqueue *pwq = get_work_pwq(work); | 1894 | struct pool_workqueue *pwq = get_work_pwq(work); |
1895 | struct workqueue_struct *wq = pwq->wq; | 1895 | struct workqueue_struct *wq = pwq->wq; |
1896 | 1896 | ||
1897 | lockdep_assert_held(&workqueue_lock); | 1897 | lockdep_assert_held(&wq_mayday_lock); |
1898 | 1898 | ||
1899 | if (!wq->rescuer) | 1899 | if (!wq->rescuer) |
1900 | return; | 1900 | return; |
@@ -1911,7 +1911,7 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
1911 | struct worker_pool *pool = (void *)__pool; | 1911 | struct worker_pool *pool = (void *)__pool; |
1912 | struct work_struct *work; | 1912 | struct work_struct *work; |
1913 | 1913 | ||
1914 | spin_lock_irq(&workqueue_lock); /* for wq->maydays */ | 1914 | spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */ |
1915 | spin_lock(&pool->lock); | 1915 | spin_lock(&pool->lock); |
1916 | 1916 | ||
1917 | if (need_to_create_worker(pool)) { | 1917 | if (need_to_create_worker(pool)) { |
@@ -1926,7 +1926,7 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
1926 | } | 1926 | } |
1927 | 1927 | ||
1928 | spin_unlock(&pool->lock); | 1928 | spin_unlock(&pool->lock); |
1929 | spin_unlock_irq(&workqueue_lock); | 1929 | spin_unlock_irq(&wq_mayday_lock); |
1930 | 1930 | ||
1931 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); | 1931 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); |
1932 | } | 1932 | } |
@@ -2404,7 +2404,7 @@ repeat: | |||
2404 | } | 2404 | } |
2405 | 2405 | ||
2406 | /* see whether any pwq is asking for help */ | 2406 | /* see whether any pwq is asking for help */ |
2407 | spin_lock_irq(&workqueue_lock); | 2407 | spin_lock_irq(&wq_mayday_lock); |
2408 | 2408 | ||
2409 | while (!list_empty(&wq->maydays)) { | 2409 | while (!list_empty(&wq->maydays)) { |
2410 | struct pool_workqueue *pwq = list_first_entry(&wq->maydays, | 2410 | struct pool_workqueue *pwq = list_first_entry(&wq->maydays, |
@@ -2415,7 +2415,7 @@ repeat: | |||
2415 | __set_current_state(TASK_RUNNING); | 2415 | __set_current_state(TASK_RUNNING); |
2416 | list_del_init(&pwq->mayday_node); | 2416 | list_del_init(&pwq->mayday_node); |
2417 | 2417 | ||
2418 | spin_unlock_irq(&workqueue_lock); | 2418 | spin_unlock_irq(&wq_mayday_lock); |
2419 | 2419 | ||
2420 | /* migrate to the target cpu if possible */ | 2420 | /* migrate to the target cpu if possible */ |
2421 | worker_maybe_bind_and_lock(pool); | 2421 | worker_maybe_bind_and_lock(pool); |
@@ -2442,10 +2442,10 @@ repeat: | |||
2442 | 2442 | ||
2443 | rescuer->pool = NULL; | 2443 | rescuer->pool = NULL; |
2444 | spin_unlock(&pool->lock); | 2444 | spin_unlock(&pool->lock); |
2445 | spin_lock(&workqueue_lock); | 2445 | spin_lock(&wq_mayday_lock); |
2446 | } | 2446 | } |
2447 | 2447 | ||
2448 | spin_unlock_irq(&workqueue_lock); | 2448 | spin_unlock_irq(&wq_mayday_lock); |
2449 | 2449 | ||
2450 | /* rescuers should never participate in concurrency management */ | 2450 | /* rescuers should never participate in concurrency management */ |
2451 | WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); | 2451 | WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); |