aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2014-02-27 21:46:19 -0500
committerJosef Bacik <jbacik@fb.com>2014-03-10 15:17:16 -0400
commitd458b0540ebd728b4d6ef47cc5ef0dbfd4dd361a (patch)
treecc00182614b878fcfde3218ec0daf5ac4332cab5 /fs/btrfs
parenta046e9c88b0f46677923864295eac7c92cd962cb (diff)
btrfs: Cleanup the "_struct" suffix in btrfs_workequeue
Since the "_struct" suffix is mainly used for distinguish the differnt btrfs_work between the original and the newly created one, there is no need using the suffix since all btrfs_workers are changed into btrfs_workqueue. Also this patch fixed some codes whose code style is changed due to the too long "_struct" suffix. Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Tested-by: David Sterba <dsterba@suse.cz> Signed-off-by: Josef Bacik <jbacik@fb.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/async-thread.c66
-rw-r--r--fs/btrfs/async-thread.h34
-rw-r--r--fs/btrfs/ctree.h44
-rw-r--r--fs/btrfs/delayed-inode.c4
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/inode.c18
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/ordered-data.h4
-rw-r--r--fs/btrfs/qgroup.c2
-rw-r--r--fs/btrfs/raid56.c14
-rw-r--r--fs/btrfs/reada.c5
-rw-r--r--fs/btrfs/scrub.c23
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/volumes.h2
15 files changed, 116 insertions, 120 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 2a5f383c3636..a709585e2c97 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -32,7 +32,7 @@
32#define NO_THRESHOLD (-1) 32#define NO_THRESHOLD (-1)
33#define DFT_THRESHOLD (32) 33#define DFT_THRESHOLD (32)
34 34
35struct __btrfs_workqueue_struct { 35struct __btrfs_workqueue {
36 struct workqueue_struct *normal_wq; 36 struct workqueue_struct *normal_wq;
37 /* List head pointing to ordered work list */ 37 /* List head pointing to ordered work list */
38 struct list_head ordered_list; 38 struct list_head ordered_list;
@@ -49,15 +49,15 @@ struct __btrfs_workqueue_struct {
49 spinlock_t thres_lock; 49 spinlock_t thres_lock;
50}; 50};
51 51
52struct btrfs_workqueue_struct { 52struct btrfs_workqueue {
53 struct __btrfs_workqueue_struct *normal; 53 struct __btrfs_workqueue *normal;
54 struct __btrfs_workqueue_struct *high; 54 struct __btrfs_workqueue *high;
55}; 55};
56 56
57static inline struct __btrfs_workqueue_struct 57static inline struct __btrfs_workqueue
58*__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh) 58*__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh)
59{ 59{
60 struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS); 60 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
61 61
62 if (unlikely(!ret)) 62 if (unlikely(!ret))
63 return NULL; 63 return NULL;
@@ -95,14 +95,14 @@ static inline struct __btrfs_workqueue_struct
95} 95}
96 96
97static inline void 97static inline void
98__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq); 98__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
99 99
100struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name, 100struct btrfs_workqueue *btrfs_alloc_workqueue(char *name,
101 int flags, 101 int flags,
102 int max_active, 102 int max_active,
103 int thresh) 103 int thresh)
104{ 104{
105 struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS); 105 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
106 106
107 if (unlikely(!ret)) 107 if (unlikely(!ret))
108 return NULL; 108 return NULL;
@@ -131,7 +131,7 @@ struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
131 * This hook WILL be called in IRQ handler context, 131 * This hook WILL be called in IRQ handler context,
132 * so workqueue_set_max_active MUST NOT be called in this hook 132 * so workqueue_set_max_active MUST NOT be called in this hook
133 */ 133 */
134static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq) 134static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
135{ 135{
136 if (wq->thresh == NO_THRESHOLD) 136 if (wq->thresh == NO_THRESHOLD)
137 return; 137 return;
@@ -143,7 +143,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq)
143 * This hook is called in kthread content. 143 * This hook is called in kthread content.
144 * So workqueue_set_max_active is called here. 144 * So workqueue_set_max_active is called here.
145 */ 145 */
146static inline void thresh_exec_hook(struct __btrfs_workqueue_struct *wq) 146static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
147{ 147{
148 int new_max_active; 148 int new_max_active;
149 long pending; 149 long pending;
@@ -186,10 +186,10 @@ out:
186 } 186 }
187} 187}
188 188
189static void run_ordered_work(struct __btrfs_workqueue_struct *wq) 189static void run_ordered_work(struct __btrfs_workqueue *wq)
190{ 190{
191 struct list_head *list = &wq->ordered_list; 191 struct list_head *list = &wq->ordered_list;
192 struct btrfs_work_struct *work; 192 struct btrfs_work *work;
193 spinlock_t *lock = &wq->list_lock; 193 spinlock_t *lock = &wq->list_lock;
194 unsigned long flags; 194 unsigned long flags;
195 195
@@ -197,7 +197,7 @@ static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
197 spin_lock_irqsave(lock, flags); 197 spin_lock_irqsave(lock, flags);
198 if (list_empty(list)) 198 if (list_empty(list))
199 break; 199 break;
200 work = list_entry(list->next, struct btrfs_work_struct, 200 work = list_entry(list->next, struct btrfs_work,
201 ordered_list); 201 ordered_list);
202 if (!test_bit(WORK_DONE_BIT, &work->flags)) 202 if (!test_bit(WORK_DONE_BIT, &work->flags))
203 break; 203 break;
@@ -229,11 +229,11 @@ static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
229 229
230static void normal_work_helper(struct work_struct *arg) 230static void normal_work_helper(struct work_struct *arg)
231{ 231{
232 struct btrfs_work_struct *work; 232 struct btrfs_work *work;
233 struct __btrfs_workqueue_struct *wq; 233 struct __btrfs_workqueue *wq;
234 int need_order = 0; 234 int need_order = 0;
235 235
236 work = container_of(arg, struct btrfs_work_struct, normal_work); 236 work = container_of(arg, struct btrfs_work, normal_work);
237 /* 237 /*
238 * We should not touch things inside work in the following cases: 238 * We should not touch things inside work in the following cases:
239 * 1) after work->func() if it has no ordered_free 239 * 1) after work->func() if it has no ordered_free
@@ -254,10 +254,10 @@ static void normal_work_helper(struct work_struct *arg)
254 } 254 }
255} 255}
256 256
257void btrfs_init_work(struct btrfs_work_struct *work, 257void btrfs_init_work(struct btrfs_work *work,
258 void (*func)(struct btrfs_work_struct *), 258 void (*func)(struct btrfs_work *),
259 void (*ordered_func)(struct btrfs_work_struct *), 259 void (*ordered_func)(struct btrfs_work *),
260 void (*ordered_free)(struct btrfs_work_struct *)) 260 void (*ordered_free)(struct btrfs_work *))
261{ 261{
262 work->func = func; 262 work->func = func;
263 work->ordered_func = ordered_func; 263 work->ordered_func = ordered_func;
@@ -267,8 +267,8 @@ void btrfs_init_work(struct btrfs_work_struct *work,
267 work->flags = 0; 267 work->flags = 0;
268} 268}
269 269
270static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq, 270static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
271 struct btrfs_work_struct *work) 271 struct btrfs_work *work)
272{ 272{
273 unsigned long flags; 273 unsigned long flags;
274 274
@@ -282,10 +282,10 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
282 queue_work(wq->normal_wq, &work->normal_work); 282 queue_work(wq->normal_wq, &work->normal_work);
283} 283}
284 284
285void btrfs_queue_work(struct btrfs_workqueue_struct *wq, 285void btrfs_queue_work(struct btrfs_workqueue *wq,
286 struct btrfs_work_struct *work) 286 struct btrfs_work *work)
287{ 287{
288 struct __btrfs_workqueue_struct *dest_wq; 288 struct __btrfs_workqueue *dest_wq;
289 289
290 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) 290 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
291 dest_wq = wq->high; 291 dest_wq = wq->high;
@@ -295,13 +295,13 @@ void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
295} 295}
296 296
297static inline void 297static inline void
298__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq) 298__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
299{ 299{
300 destroy_workqueue(wq->normal_wq); 300 destroy_workqueue(wq->normal_wq);
301 kfree(wq); 301 kfree(wq);
302} 302}
303 303
304void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq) 304void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
305{ 305{
306 if (!wq) 306 if (!wq)
307 return; 307 return;
@@ -310,14 +310,14 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
310 __btrfs_destroy_workqueue(wq->normal); 310 __btrfs_destroy_workqueue(wq->normal);
311} 311}
312 312
313void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max) 313void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
314{ 314{
315 wq->normal->max_active = max; 315 wq->normal->max_active = max;
316 if (wq->high) 316 if (wq->high)
317 wq->high->max_active = max; 317 wq->high->max_active = max;
318} 318}
319 319
320void btrfs_set_work_high_priority(struct btrfs_work_struct *work) 320void btrfs_set_work_high_priority(struct btrfs_work *work)
321{ 321{
322 set_bit(WORK_HIGH_PRIO_BIT, &work->flags); 322 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
323} 323}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index ab05904f791c..08d717476227 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -20,33 +20,33 @@
20#ifndef __BTRFS_ASYNC_THREAD_ 20#ifndef __BTRFS_ASYNC_THREAD_
21#define __BTRFS_ASYNC_THREAD_ 21#define __BTRFS_ASYNC_THREAD_
22 22
23struct btrfs_workqueue_struct; 23struct btrfs_workqueue;
24/* Internal use only */ 24/* Internal use only */
25struct __btrfs_workqueue_struct; 25struct __btrfs_workqueue;
26 26
27struct btrfs_work_struct { 27struct btrfs_work {
28 void (*func)(struct btrfs_work_struct *arg); 28 void (*func)(struct btrfs_work *arg);
29 void (*ordered_func)(struct btrfs_work_struct *arg); 29 void (*ordered_func)(struct btrfs_work *arg);
30 void (*ordered_free)(struct btrfs_work_struct *arg); 30 void (*ordered_free)(struct btrfs_work *arg);
31 31
32 /* Don't touch things below */ 32 /* Don't touch things below */
33 struct work_struct normal_work; 33 struct work_struct normal_work;
34 struct list_head ordered_list; 34 struct list_head ordered_list;
35 struct __btrfs_workqueue_struct *wq; 35 struct __btrfs_workqueue *wq;
36 unsigned long flags; 36 unsigned long flags;
37}; 37};
38 38
39struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name, 39struct btrfs_workqueue *btrfs_alloc_workqueue(char *name,
40 int flags, 40 int flags,
41 int max_active, 41 int max_active,
42 int thresh); 42 int thresh);
43void btrfs_init_work(struct btrfs_work_struct *work, 43void btrfs_init_work(struct btrfs_work *work,
44 void (*func)(struct btrfs_work_struct *), 44 void (*func)(struct btrfs_work *),
45 void (*ordered_func)(struct btrfs_work_struct *), 45 void (*ordered_func)(struct btrfs_work *),
46 void (*ordered_free)(struct btrfs_work_struct *)); 46 void (*ordered_free)(struct btrfs_work *));
47void btrfs_queue_work(struct btrfs_workqueue_struct *wq, 47void btrfs_queue_work(struct btrfs_workqueue *wq,
48 struct btrfs_work_struct *work); 48 struct btrfs_work *work);
49void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq); 49void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
50void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max); 50void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
51void btrfs_set_work_high_priority(struct btrfs_work_struct *work); 51void btrfs_set_work_high_priority(struct btrfs_work *work);
52#endif 52#endif
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 5a8c77a441ba..b4d2e957b89f 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1221,7 +1221,7 @@ struct btrfs_caching_control {
1221 struct list_head list; 1221 struct list_head list;
1222 struct mutex mutex; 1222 struct mutex mutex;
1223 wait_queue_head_t wait; 1223 wait_queue_head_t wait;
1224 struct btrfs_work_struct work; 1224 struct btrfs_work work;
1225 struct btrfs_block_group_cache *block_group; 1225 struct btrfs_block_group_cache *block_group;
1226 u64 progress; 1226 u64 progress;
1227 atomic_t count; 1227 atomic_t count;
@@ -1504,27 +1504,27 @@ struct btrfs_fs_info {
1504 * A third pool does submit_bio to avoid deadlocking with the other 1504 * A third pool does submit_bio to avoid deadlocking with the other
1505 * two 1505 * two
1506 */ 1506 */
1507 struct btrfs_workqueue_struct *workers; 1507 struct btrfs_workqueue *workers;
1508 struct btrfs_workqueue_struct *delalloc_workers; 1508 struct btrfs_workqueue *delalloc_workers;
1509 struct btrfs_workqueue_struct *flush_workers; 1509 struct btrfs_workqueue *flush_workers;
1510 struct btrfs_workqueue_struct *endio_workers; 1510 struct btrfs_workqueue *endio_workers;
1511 struct btrfs_workqueue_struct *endio_meta_workers; 1511 struct btrfs_workqueue *endio_meta_workers;
1512 struct btrfs_workqueue_struct *endio_raid56_workers; 1512 struct btrfs_workqueue *endio_raid56_workers;
1513 struct btrfs_workqueue_struct *rmw_workers; 1513 struct btrfs_workqueue *rmw_workers;
1514 struct btrfs_workqueue_struct *endio_meta_write_workers; 1514 struct btrfs_workqueue *endio_meta_write_workers;
1515 struct btrfs_workqueue_struct *endio_write_workers; 1515 struct btrfs_workqueue *endio_write_workers;
1516 struct btrfs_workqueue_struct *endio_freespace_worker; 1516 struct btrfs_workqueue *endio_freespace_worker;
1517 struct btrfs_workqueue_struct *submit_workers; 1517 struct btrfs_workqueue *submit_workers;
1518 struct btrfs_workqueue_struct *caching_workers; 1518 struct btrfs_workqueue *caching_workers;
1519 struct btrfs_workqueue_struct *readahead_workers; 1519 struct btrfs_workqueue *readahead_workers;
1520 1520
1521 /* 1521 /*
1522 * fixup workers take dirty pages that didn't properly go through 1522 * fixup workers take dirty pages that didn't properly go through
1523 * the cow mechanism and make them safe to write. It happens 1523 * the cow mechanism and make them safe to write. It happens
1524 * for the sys_munmap function call path 1524 * for the sys_munmap function call path
1525 */ 1525 */
1526 struct btrfs_workqueue_struct *fixup_workers; 1526 struct btrfs_workqueue *fixup_workers;
1527 struct btrfs_workqueue_struct *delayed_workers; 1527 struct btrfs_workqueue *delayed_workers;
1528 struct task_struct *transaction_kthread; 1528 struct task_struct *transaction_kthread;
1529 struct task_struct *cleaner_kthread; 1529 struct task_struct *cleaner_kthread;
1530 int thread_pool_size; 1530 int thread_pool_size;
@@ -1604,9 +1604,9 @@ struct btrfs_fs_info {
1604 atomic_t scrub_cancel_req; 1604 atomic_t scrub_cancel_req;
1605 wait_queue_head_t scrub_pause_wait; 1605 wait_queue_head_t scrub_pause_wait;
1606 int scrub_workers_refcnt; 1606 int scrub_workers_refcnt;
1607 struct btrfs_workqueue_struct *scrub_workers; 1607 struct btrfs_workqueue *scrub_workers;
1608 struct btrfs_workqueue_struct *scrub_wr_completion_workers; 1608 struct btrfs_workqueue *scrub_wr_completion_workers;
1609 struct btrfs_workqueue_struct *scrub_nocow_workers; 1609 struct btrfs_workqueue *scrub_nocow_workers;
1610 1610
1611#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1611#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1612 u32 check_integrity_print_mask; 1612 u32 check_integrity_print_mask;
@@ -1647,9 +1647,9 @@ struct btrfs_fs_info {
1647 /* qgroup rescan items */ 1647 /* qgroup rescan items */
1648 struct mutex qgroup_rescan_lock; /* protects the progress item */ 1648 struct mutex qgroup_rescan_lock; /* protects the progress item */
1649 struct btrfs_key qgroup_rescan_progress; 1649 struct btrfs_key qgroup_rescan_progress;
1650 struct btrfs_workqueue_struct *qgroup_rescan_workers; 1650 struct btrfs_workqueue *qgroup_rescan_workers;
1651 struct completion qgroup_rescan_completion; 1651 struct completion qgroup_rescan_completion;
1652 struct btrfs_work_struct qgroup_rescan_work; 1652 struct btrfs_work qgroup_rescan_work;
1653 1653
1654 /* filesystem state */ 1654 /* filesystem state */
1655 unsigned long fs_state; 1655 unsigned long fs_state;
@@ -3680,7 +3680,7 @@ struct btrfs_delalloc_work {
3680 int delay_iput; 3680 int delay_iput;
3681 struct completion completion; 3681 struct completion completion;
3682 struct list_head list; 3682 struct list_head list;
3683 struct btrfs_work_struct work; 3683 struct btrfs_work work;
3684}; 3684};
3685 3685
3686struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 3686struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 76e85d66801f..33e561a84013 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1318,10 +1318,10 @@ void btrfs_remove_delayed_node(struct inode *inode)
1318struct btrfs_async_delayed_work { 1318struct btrfs_async_delayed_work {
1319 struct btrfs_delayed_root *delayed_root; 1319 struct btrfs_delayed_root *delayed_root;
1320 int nr; 1320 int nr;
1321 struct btrfs_work_struct work; 1321 struct btrfs_work work;
1322}; 1322};
1323 1323
1324static void btrfs_async_run_delayed_root(struct btrfs_work_struct *work) 1324static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1325{ 1325{
1326 struct btrfs_async_delayed_work *async_work; 1326 struct btrfs_async_delayed_work *async_work;
1327 struct btrfs_delayed_root *delayed_root; 1327 struct btrfs_delayed_root *delayed_root;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index c80d9507171c..f7d84d955764 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -55,7 +55,7 @@
55#endif 55#endif
56 56
57static struct extent_io_ops btree_extent_io_ops; 57static struct extent_io_ops btree_extent_io_ops;
58static void end_workqueue_fn(struct btrfs_work_struct *work); 58static void end_workqueue_fn(struct btrfs_work *work);
59static void free_fs_root(struct btrfs_root *root); 59static void free_fs_root(struct btrfs_root *root);
60static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 60static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
61 int read_only); 61 int read_only);
@@ -86,7 +86,7 @@ struct end_io_wq {
86 int error; 86 int error;
87 int metadata; 87 int metadata;
88 struct list_head list; 88 struct list_head list;
89 struct btrfs_work_struct work; 89 struct btrfs_work work;
90}; 90};
91 91
92/* 92/*
@@ -108,7 +108,7 @@ struct async_submit_bio {
108 * can't tell us where in the file the bio should go 108 * can't tell us where in the file the bio should go
109 */ 109 */
110 u64 bio_offset; 110 u64 bio_offset;
111 struct btrfs_work_struct work; 111 struct btrfs_work work;
112 int error; 112 int error;
113}; 113};
114 114
@@ -742,7 +742,7 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
742 return 256 * limit; 742 return 256 * limit;
743} 743}
744 744
745static void run_one_async_start(struct btrfs_work_struct *work) 745static void run_one_async_start(struct btrfs_work *work)
746{ 746{
747 struct async_submit_bio *async; 747 struct async_submit_bio *async;
748 int ret; 748 int ret;
@@ -755,7 +755,7 @@ static void run_one_async_start(struct btrfs_work_struct *work)
755 async->error = ret; 755 async->error = ret;
756} 756}
757 757
758static void run_one_async_done(struct btrfs_work_struct *work) 758static void run_one_async_done(struct btrfs_work *work)
759{ 759{
760 struct btrfs_fs_info *fs_info; 760 struct btrfs_fs_info *fs_info;
761 struct async_submit_bio *async; 761 struct async_submit_bio *async;
@@ -782,7 +782,7 @@ static void run_one_async_done(struct btrfs_work_struct *work)
782 async->bio_offset); 782 async->bio_offset);
783} 783}
784 784
785static void run_one_async_free(struct btrfs_work_struct *work) 785static void run_one_async_free(struct btrfs_work *work)
786{ 786{
787 struct async_submit_bio *async; 787 struct async_submit_bio *async;
788 788
@@ -1668,7 +1668,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1668 * called by the kthread helper functions to finally call the bio end_io 1668 * called by the kthread helper functions to finally call the bio end_io
1669 * functions. This is where read checksum verification actually happens 1669 * functions. This is where read checksum verification actually happens
1670 */ 1670 */
1671static void end_workqueue_fn(struct btrfs_work_struct *work) 1671static void end_workqueue_fn(struct btrfs_work *work)
1672{ 1672{
1673 struct bio *bio; 1673 struct bio *bio;
1674 struct end_io_wq *end_io_wq; 1674 struct end_io_wq *end_io_wq;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index bb58082f6d61..19ea8ad70c67 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -378,7 +378,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
378 return total_added; 378 return total_added;
379} 379}
380 380
381static noinline void caching_thread(struct btrfs_work_struct *work) 381static noinline void caching_thread(struct btrfs_work *work)
382{ 382{
383 struct btrfs_block_group_cache *block_group; 383 struct btrfs_block_group_cache *block_group;
384 struct btrfs_fs_info *fs_info; 384 struct btrfs_fs_info *fs_info;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0885f333574d..53697a80b849 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -324,7 +324,7 @@ struct async_cow {
324 u64 start; 324 u64 start;
325 u64 end; 325 u64 end;
326 struct list_head extents; 326 struct list_head extents;
327 struct btrfs_work_struct work; 327 struct btrfs_work work;
328}; 328};
329 329
330static noinline int add_async_extent(struct async_cow *cow, 330static noinline int add_async_extent(struct async_cow *cow,
@@ -1000,7 +1000,7 @@ out_unlock:
1000/* 1000/*
1001 * work queue call back to started compression on a file and pages 1001 * work queue call back to started compression on a file and pages
1002 */ 1002 */
1003static noinline void async_cow_start(struct btrfs_work_struct *work) 1003static noinline void async_cow_start(struct btrfs_work *work)
1004{ 1004{
1005 struct async_cow *async_cow; 1005 struct async_cow *async_cow;
1006 int num_added = 0; 1006 int num_added = 0;
@@ -1018,7 +1018,7 @@ static noinline void async_cow_start(struct btrfs_work_struct *work)
1018/* 1018/*
1019 * work queue call back to submit previously compressed pages 1019 * work queue call back to submit previously compressed pages
1020 */ 1020 */
1021static noinline void async_cow_submit(struct btrfs_work_struct *work) 1021static noinline void async_cow_submit(struct btrfs_work *work)
1022{ 1022{
1023 struct async_cow *async_cow; 1023 struct async_cow *async_cow;
1024 struct btrfs_root *root; 1024 struct btrfs_root *root;
@@ -1039,7 +1039,7 @@ static noinline void async_cow_submit(struct btrfs_work_struct *work)
1039 submit_compressed_extents(async_cow->inode, async_cow); 1039 submit_compressed_extents(async_cow->inode, async_cow);
1040} 1040}
1041 1041
1042static noinline void async_cow_free(struct btrfs_work_struct *work) 1042static noinline void async_cow_free(struct btrfs_work *work)
1043{ 1043{
1044 struct async_cow *async_cow; 1044 struct async_cow *async_cow;
1045 async_cow = container_of(work, struct async_cow, work); 1045 async_cow = container_of(work, struct async_cow, work);
@@ -1748,10 +1748,10 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1748/* see btrfs_writepage_start_hook for details on why this is required */ 1748/* see btrfs_writepage_start_hook for details on why this is required */
1749struct btrfs_writepage_fixup { 1749struct btrfs_writepage_fixup {
1750 struct page *page; 1750 struct page *page;
1751 struct btrfs_work_struct work; 1751 struct btrfs_work work;
1752}; 1752};
1753 1753
1754static void btrfs_writepage_fixup_worker(struct btrfs_work_struct *work) 1754static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1755{ 1755{
1756 struct btrfs_writepage_fixup *fixup; 1756 struct btrfs_writepage_fixup *fixup;
1757 struct btrfs_ordered_extent *ordered; 1757 struct btrfs_ordered_extent *ordered;
@@ -2750,7 +2750,7 @@ out:
2750 return ret; 2750 return ret;
2751} 2751}
2752 2752
2753static void finish_ordered_fn(struct btrfs_work_struct *work) 2753static void finish_ordered_fn(struct btrfs_work *work)
2754{ 2754{
2755 struct btrfs_ordered_extent *ordered_extent; 2755 struct btrfs_ordered_extent *ordered_extent;
2756 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 2756 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
@@ -2763,7 +2763,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2763 struct inode *inode = page->mapping->host; 2763 struct inode *inode = page->mapping->host;
2764 struct btrfs_root *root = BTRFS_I(inode)->root; 2764 struct btrfs_root *root = BTRFS_I(inode)->root;
2765 struct btrfs_ordered_extent *ordered_extent = NULL; 2765 struct btrfs_ordered_extent *ordered_extent = NULL;
2766 struct btrfs_workqueue_struct *workers; 2766 struct btrfs_workqueue *workers;
2767 2767
2768 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 2768 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2769 2769
@@ -8384,7 +8384,7 @@ out_notrans:
8384 return ret; 8384 return ret;
8385} 8385}
8386 8386
8387static void btrfs_run_delalloc_work(struct btrfs_work_struct *work) 8387static void btrfs_run_delalloc_work(struct btrfs_work *work)
8388{ 8388{
8389 struct btrfs_delalloc_work *delalloc_work; 8389 struct btrfs_delalloc_work *delalloc_work;
8390 struct inode *inode; 8390 struct inode *inode;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 6fa8219b5d03..751ee38083a9 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -576,7 +576,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
576 wake_up(&entry->wait); 576 wake_up(&entry->wait);
577} 577}
578 578
579static void btrfs_run_ordered_extent_work(struct btrfs_work_struct *work) 579static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
580{ 580{
581 struct btrfs_ordered_extent *ordered; 581 struct btrfs_ordered_extent *ordered;
582 582
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 84bb236119fe..246897058efb 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -130,10 +130,10 @@ struct btrfs_ordered_extent {
130 /* a per root list of all the pending ordered extents */ 130 /* a per root list of all the pending ordered extents */
131 struct list_head root_extent_list; 131 struct list_head root_extent_list;
132 132
133 struct btrfs_work_struct work; 133 struct btrfs_work work;
134 134
135 struct completion completion; 135 struct completion completion;
136 struct btrfs_work_struct flush_work; 136 struct btrfs_work flush_work;
137 struct list_head work_list; 137 struct list_head work_list;
138}; 138};
139 139
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 38617cc2fdd5..2cf905877aaf 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1984,7 +1984,7 @@ out:
1984 return ret; 1984 return ret;
1985} 1985}
1986 1986
1987static void btrfs_qgroup_rescan_worker(struct btrfs_work_struct *work) 1987static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1988{ 1988{
1989 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 1989 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
1990 qgroup_rescan_work); 1990 qgroup_rescan_work);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 5afa564201a2..1269fc30b15c 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -87,7 +87,7 @@ struct btrfs_raid_bio {
87 /* 87 /*
88 * for scheduling work in the helper threads 88 * for scheduling work in the helper threads
89 */ 89 */
90 struct btrfs_work_struct work; 90 struct btrfs_work work;
91 91
92 /* 92 /*
93 * bio list and bio_list_lock are used 93 * bio list and bio_list_lock are used
@@ -166,8 +166,8 @@ struct btrfs_raid_bio {
166 166
167static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 167static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
168static noinline void finish_rmw(struct btrfs_raid_bio *rbio); 168static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
169static void rmw_work(struct btrfs_work_struct *work); 169static void rmw_work(struct btrfs_work *work);
170static void read_rebuild_work(struct btrfs_work_struct *work); 170static void read_rebuild_work(struct btrfs_work *work);
171static void async_rmw_stripe(struct btrfs_raid_bio *rbio); 171static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
172static void async_read_rebuild(struct btrfs_raid_bio *rbio); 172static void async_read_rebuild(struct btrfs_raid_bio *rbio);
173static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); 173static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
@@ -1588,7 +1588,7 @@ struct btrfs_plug_cb {
1588 struct blk_plug_cb cb; 1588 struct blk_plug_cb cb;
1589 struct btrfs_fs_info *info; 1589 struct btrfs_fs_info *info;
1590 struct list_head rbio_list; 1590 struct list_head rbio_list;
1591 struct btrfs_work_struct work; 1591 struct btrfs_work work;
1592}; 1592};
1593 1593
1594/* 1594/*
@@ -1652,7 +1652,7 @@ static void run_plug(struct btrfs_plug_cb *plug)
1652 * if the unplug comes from schedule, we have to push the 1652 * if the unplug comes from schedule, we have to push the
1653 * work off to a helper thread 1653 * work off to a helper thread
1654 */ 1654 */
1655static void unplug_work(struct btrfs_work_struct *work) 1655static void unplug_work(struct btrfs_work *work)
1656{ 1656{
1657 struct btrfs_plug_cb *plug; 1657 struct btrfs_plug_cb *plug;
1658 plug = container_of(work, struct btrfs_plug_cb, work); 1658 plug = container_of(work, struct btrfs_plug_cb, work);
@@ -2079,7 +2079,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2079 2079
2080} 2080}
2081 2081
2082static void rmw_work(struct btrfs_work_struct *work) 2082static void rmw_work(struct btrfs_work *work)
2083{ 2083{
2084 struct btrfs_raid_bio *rbio; 2084 struct btrfs_raid_bio *rbio;
2085 2085
@@ -2087,7 +2087,7 @@ static void rmw_work(struct btrfs_work_struct *work)
2087 raid56_rmw_stripe(rbio); 2087 raid56_rmw_stripe(rbio);
2088} 2088}
2089 2089
2090static void read_rebuild_work(struct btrfs_work_struct *work) 2090static void read_rebuild_work(struct btrfs_work *work)
2091{ 2091{
2092 struct btrfs_raid_bio *rbio; 2092 struct btrfs_raid_bio *rbio;
2093 2093
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 9e01d3677355..30947f923620 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -91,8 +91,7 @@ struct reada_zone {
91}; 91};
92 92
93struct reada_machine_work { 93struct reada_machine_work {
94 struct btrfs_work_struct 94 struct btrfs_work work;
95 work;
96 struct btrfs_fs_info *fs_info; 95 struct btrfs_fs_info *fs_info;
97}; 96};
98 97
@@ -734,7 +733,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
734 733
735} 734}
736 735
737static void reada_start_machine_worker(struct btrfs_work_struct *work) 736static void reada_start_machine_worker(struct btrfs_work *work)
738{ 737{
739 struct reada_machine_work *rmw; 738 struct reada_machine_work *rmw;
740 struct btrfs_fs_info *fs_info; 739 struct btrfs_fs_info *fs_info;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 5a240f5e6ceb..db21a1360e13 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -96,8 +96,7 @@ struct scrub_bio {
96#endif 96#endif
97 int page_count; 97 int page_count;
98 int next_free; 98 int next_free;
99 struct btrfs_work_struct 99 struct btrfs_work work;
100 work;
101}; 100};
102 101
103struct scrub_block { 102struct scrub_block {
@@ -155,8 +154,7 @@ struct scrub_fixup_nodatasum {
155 struct btrfs_device *dev; 154 struct btrfs_device *dev;
156 u64 logical; 155 u64 logical;
157 struct btrfs_root *root; 156 struct btrfs_root *root;
158 struct btrfs_work_struct 157 struct btrfs_work work;
159 work;
160 int mirror_num; 158 int mirror_num;
161}; 159};
162 160
@@ -174,8 +172,7 @@ struct scrub_copy_nocow_ctx {
174 int mirror_num; 172 int mirror_num;
175 u64 physical_for_dev_replace; 173 u64 physical_for_dev_replace;
176 struct list_head inodes; 174 struct list_head inodes;
177 struct btrfs_work_struct 175 struct btrfs_work work;
178 work;
179}; 176};
180 177
181struct scrub_warning { 178struct scrub_warning {
@@ -234,7 +231,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
234 u64 gen, int mirror_num, u8 *csum, int force, 231 u64 gen, int mirror_num, u8 *csum, int force,
235 u64 physical_for_dev_replace); 232 u64 physical_for_dev_replace);
236static void scrub_bio_end_io(struct bio *bio, int err); 233static void scrub_bio_end_io(struct bio *bio, int err);
237static void scrub_bio_end_io_worker(struct btrfs_work_struct *work); 234static void scrub_bio_end_io_worker(struct btrfs_work *work);
238static void scrub_block_complete(struct scrub_block *sblock); 235static void scrub_block_complete(struct scrub_block *sblock);
239static void scrub_remap_extent(struct btrfs_fs_info *fs_info, 236static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
240 u64 extent_logical, u64 extent_len, 237 u64 extent_logical, u64 extent_len,
@@ -251,14 +248,14 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
251 struct scrub_page *spage); 248 struct scrub_page *spage);
252static void scrub_wr_submit(struct scrub_ctx *sctx); 249static void scrub_wr_submit(struct scrub_ctx *sctx);
253static void scrub_wr_bio_end_io(struct bio *bio, int err); 250static void scrub_wr_bio_end_io(struct bio *bio, int err);
254static void scrub_wr_bio_end_io_worker(struct btrfs_work_struct *work); 251static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
255static int write_page_nocow(struct scrub_ctx *sctx, 252static int write_page_nocow(struct scrub_ctx *sctx,
256 u64 physical_for_dev_replace, struct page *page); 253 u64 physical_for_dev_replace, struct page *page);
257static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, 254static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
258 struct scrub_copy_nocow_ctx *ctx); 255 struct scrub_copy_nocow_ctx *ctx);
259static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 256static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
260 int mirror_num, u64 physical_for_dev_replace); 257 int mirror_num, u64 physical_for_dev_replace);
261static void copy_nocow_pages_worker(struct btrfs_work_struct *work); 258static void copy_nocow_pages_worker(struct btrfs_work *work);
262static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); 259static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
263static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); 260static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
264 261
@@ -737,7 +734,7 @@ out:
737 return -EIO; 734 return -EIO;
738} 735}
739 736
740static void scrub_fixup_nodatasum(struct btrfs_work_struct *work) 737static void scrub_fixup_nodatasum(struct btrfs_work *work)
741{ 738{
742 int ret; 739 int ret;
743 struct scrub_fixup_nodatasum *fixup; 740 struct scrub_fixup_nodatasum *fixup;
@@ -1622,7 +1619,7 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err)
1622 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); 1619 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1623} 1620}
1624 1621
1625static void scrub_wr_bio_end_io_worker(struct btrfs_work_struct *work) 1622static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1626{ 1623{
1627 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 1624 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1628 struct scrub_ctx *sctx = sbio->sctx; 1625 struct scrub_ctx *sctx = sbio->sctx;
@@ -2090,7 +2087,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
2090 btrfs_queue_work(fs_info->scrub_workers, &sbio->work); 2087 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2091} 2088}
2092 2089
2093static void scrub_bio_end_io_worker(struct btrfs_work_struct *work) 2090static void scrub_bio_end_io_worker(struct btrfs_work *work)
2094{ 2091{
2095 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 2092 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2096 struct scrub_ctx *sctx = sbio->sctx; 2093 struct scrub_ctx *sctx = sbio->sctx;
@@ -3161,7 +3158,7 @@ static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3161 3158
3162#define COPY_COMPLETE 1 3159#define COPY_COMPLETE 1
3163 3160
3164static void copy_nocow_pages_worker(struct btrfs_work_struct *work) 3161static void copy_nocow_pages_worker(struct btrfs_work *work)
3165{ 3162{
3166 struct scrub_copy_nocow_ctx *nocow_ctx = 3163 struct scrub_copy_nocow_ctx *nocow_ctx =
3167 container_of(work, struct scrub_copy_nocow_ctx, work); 3164 container_of(work, struct scrub_copy_nocow_ctx, work);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0066cff077ce..b4660c413c73 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -440,7 +440,7 @@ done:
440 blk_finish_plug(&plug); 440 blk_finish_plug(&plug);
441} 441}
442 442
443static void pending_bios_fn(struct btrfs_work_struct *work) 443static void pending_bios_fn(struct btrfs_work *work)
444{ 444{
445 struct btrfs_device *device; 445 struct btrfs_device *device;
446 446
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 5d9a03773ca6..80754f9dd3df 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -95,7 +95,7 @@ struct btrfs_device {
95 /* per-device scrub information */ 95 /* per-device scrub information */
96 struct scrub_ctx *scrub_device; 96 struct scrub_ctx *scrub_device;
97 97
98 struct btrfs_work_struct work; 98 struct btrfs_work work;
99 struct rcu_head rcu; 99 struct rcu_head rcu;
100 struct work_struct rcu_work; 100 struct work_struct rcu_work;
101 101