diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 19:03:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 19:03:04 -0400 |
commit | 86d9c070175de65890794fa227b68297da6206d8 (patch) | |
tree | 1aa4f1d1ecf397bd0d745a67b9d828420a99a8b4 /fs | |
parent | 413e3376485e6cf81f4cf6a4dbc0de0326535093 (diff) | |
parent | a2a9537ac0b37a5da6fbe7e1e9cb06c524d2a9c4 (diff) |
Merge branch 'for-2.6.30' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.30' of git://git.kernel.dk/linux-2.6-block:
Get rid of pdflush_operation() in emergency sync and remount
btrfs: get rid of current_is_pdflush() in btrfs_btree_balance_dirty
Move the default_backing_dev_info out of readahead.c and into backing-dev.c
block: Repeated lines in switching-sched.txt
bsg: Remove bogus check against request_queue->max_sectors
block: WARN in __blk_put_request() for potential bio leak
loop: fix circular locking in loop_clr_fd()
loop: support barrier writes
bsg: add support for tail queuing
cpqarray: enable bus mastering
block: genhd.h cleanup patch
block: add private bio_set for bio integrity allocations
block: genhd.h comment needs updating
block: get rid of unused blkdev_free_rq() define
block: remove various blk_queue_*() setting functions in blk_init_queue_node()
cciss: add BUILD_BUG_ON() for catching bad CommandList_struct alignment
block: don't create bio_vec slabs of less than the inline number
block: cleanup bio_alloc_bioset()
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bio-integrity.c | 85 | ||||
-rw-r--r-- | fs/bio.c | 87 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 2 | ||||
-rw-r--r-- | fs/super.c | 11 | ||||
-rw-r--r-- | fs/sync.c | 14 |
5 files changed, 95 insertions, 104 deletions
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index fe2b1aa2464e..31c46a241bac 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -26,23 +26,23 @@ | |||
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | 27 | ||
28 | static struct kmem_cache *bio_integrity_slab __read_mostly; | 28 | static struct kmem_cache *bio_integrity_slab __read_mostly; |
29 | static mempool_t *bio_integrity_pool; | ||
30 | static struct bio_set *integrity_bio_set; | ||
29 | static struct workqueue_struct *kintegrityd_wq; | 31 | static struct workqueue_struct *kintegrityd_wq; |
30 | 32 | ||
31 | /** | 33 | /** |
32 | * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio | 34 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio |
33 | * @bio: bio to attach integrity metadata to | 35 | * @bio: bio to attach integrity metadata to |
34 | * @gfp_mask: Memory allocation mask | 36 | * @gfp_mask: Memory allocation mask |
35 | * @nr_vecs: Number of integrity metadata scatter-gather elements | 37 | * @nr_vecs: Number of integrity metadata scatter-gather elements |
36 | * @bs: bio_set to allocate from | ||
37 | * | 38 | * |
38 | * Description: This function prepares a bio for attaching integrity | 39 | * Description: This function prepares a bio for attaching integrity |
39 | * metadata. nr_vecs specifies the maximum number of pages containing | 40 | * metadata. nr_vecs specifies the maximum number of pages containing |
40 | * integrity metadata that can be attached. | 41 | * integrity metadata that can be attached. |
41 | */ | 42 | */ |
42 | struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, | 43 | struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, |
43 | gfp_t gfp_mask, | 44 | gfp_t gfp_mask, |
44 | unsigned int nr_vecs, | 45 | unsigned int nr_vecs) |
45 | struct bio_set *bs) | ||
46 | { | 46 | { |
47 | struct bio_integrity_payload *bip; | 47 | struct bio_integrity_payload *bip; |
48 | struct bio_vec *iv; | 48 | struct bio_vec *iv; |
@@ -50,7 +50,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, | |||
50 | 50 | ||
51 | BUG_ON(bio == NULL); | 51 | BUG_ON(bio == NULL); |
52 | 52 | ||
53 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); | 53 | bip = mempool_alloc(bio_integrity_pool, gfp_mask); |
54 | if (unlikely(bip == NULL)) { | 54 | if (unlikely(bip == NULL)) { |
55 | printk(KERN_ERR "%s: could not alloc bip\n", __func__); | 55 | printk(KERN_ERR "%s: could not alloc bip\n", __func__); |
56 | return NULL; | 56 | return NULL; |
@@ -58,10 +58,10 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, | |||
58 | 58 | ||
59 | memset(bip, 0, sizeof(*bip)); | 59 | memset(bip, 0, sizeof(*bip)); |
60 | 60 | ||
61 | iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, bs); | 61 | iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set); |
62 | if (unlikely(iv == NULL)) { | 62 | if (unlikely(iv == NULL)) { |
63 | printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__); | 63 | printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__); |
64 | mempool_free(bip, bs->bio_integrity_pool); | 64 | mempool_free(bip, bio_integrity_pool); |
65 | return NULL; | 65 | return NULL; |
66 | } | 66 | } |
67 | 67 | ||
@@ -72,35 +72,16 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, | |||
72 | 72 | ||
73 | return bip; | 73 | return bip; |
74 | } | 74 | } |
75 | EXPORT_SYMBOL(bio_integrity_alloc_bioset); | ||
76 | |||
77 | /** | ||
78 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio | ||
79 | * @bio: bio to attach integrity metadata to | ||
80 | * @gfp_mask: Memory allocation mask | ||
81 | * @nr_vecs: Number of integrity metadata scatter-gather elements | ||
82 | * | ||
83 | * Description: This function prepares a bio for attaching integrity | ||
84 | * metadata. nr_vecs specifies the maximum number of pages containing | ||
85 | * integrity metadata that can be attached. | ||
86 | */ | ||
87 | struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | ||
88 | gfp_t gfp_mask, | ||
89 | unsigned int nr_vecs) | ||
90 | { | ||
91 | return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set); | ||
92 | } | ||
93 | EXPORT_SYMBOL(bio_integrity_alloc); | 75 | EXPORT_SYMBOL(bio_integrity_alloc); |
94 | 76 | ||
95 | /** | 77 | /** |
96 | * bio_integrity_free - Free bio integrity payload | 78 | * bio_integrity_free - Free bio integrity payload |
97 | * @bio: bio containing bip to be freed | 79 | * @bio: bio containing bip to be freed |
98 | * @bs: bio_set this bio was allocated from | ||
99 | * | 80 | * |
100 | * Description: Used to free the integrity portion of a bio. Usually | 81 | * Description: Used to free the integrity portion of a bio. Usually |
101 | * called from bio_free(). | 82 | * called from bio_free(). |
102 | */ | 83 | */ |
103 | void bio_integrity_free(struct bio *bio, struct bio_set *bs) | 84 | void bio_integrity_free(struct bio *bio) |
104 | { | 85 | { |
105 | struct bio_integrity_payload *bip = bio->bi_integrity; | 86 | struct bio_integrity_payload *bip = bio->bi_integrity; |
106 | 87 | ||
@@ -111,8 +92,8 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs) | |||
111 | && bip->bip_buf != NULL) | 92 | && bip->bip_buf != NULL) |
112 | kfree(bip->bip_buf); | 93 | kfree(bip->bip_buf); |
113 | 94 | ||
114 | bvec_free_bs(bs, bip->bip_vec, bip->bip_pool); | 95 | bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool); |
115 | mempool_free(bip, bs->bio_integrity_pool); | 96 | mempool_free(bip, bio_integrity_pool); |
116 | 97 | ||
117 | bio->bi_integrity = NULL; | 98 | bio->bi_integrity = NULL; |
118 | } | 99 | } |
@@ -686,19 +667,17 @@ EXPORT_SYMBOL(bio_integrity_split); | |||
686 | * @bio: New bio | 667 | * @bio: New bio |
687 | * @bio_src: Original bio | 668 | * @bio_src: Original bio |
688 | * @gfp_mask: Memory allocation mask | 669 | * @gfp_mask: Memory allocation mask |
689 | * @bs: bio_set to allocate bip from | ||
690 | * | 670 | * |
691 | * Description: Called to allocate a bip when cloning a bio | 671 | * Description: Called to allocate a bip when cloning a bio |
692 | */ | 672 | */ |
693 | int bio_integrity_clone(struct bio *bio, struct bio *bio_src, | 673 | int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) |
694 | gfp_t gfp_mask, struct bio_set *bs) | ||
695 | { | 674 | { |
696 | struct bio_integrity_payload *bip_src = bio_src->bi_integrity; | 675 | struct bio_integrity_payload *bip_src = bio_src->bi_integrity; |
697 | struct bio_integrity_payload *bip; | 676 | struct bio_integrity_payload *bip; |
698 | 677 | ||
699 | BUG_ON(bip_src == NULL); | 678 | BUG_ON(bip_src == NULL); |
700 | 679 | ||
701 | bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs); | 680 | bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); |
702 | 681 | ||
703 | if (bip == NULL) | 682 | if (bip == NULL) |
704 | return -EIO; | 683 | return -EIO; |
@@ -714,37 +693,25 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, | |||
714 | } | 693 | } |
715 | EXPORT_SYMBOL(bio_integrity_clone); | 694 | EXPORT_SYMBOL(bio_integrity_clone); |
716 | 695 | ||
717 | int bioset_integrity_create(struct bio_set *bs, int pool_size) | 696 | static int __init bio_integrity_init(void) |
718 | { | 697 | { |
719 | bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, | 698 | kintegrityd_wq = create_workqueue("kintegrityd"); |
720 | bio_integrity_slab); | ||
721 | if (!bs->bio_integrity_pool) | ||
722 | return -1; | ||
723 | |||
724 | return 0; | ||
725 | } | ||
726 | EXPORT_SYMBOL(bioset_integrity_create); | ||
727 | 699 | ||
728 | void bioset_integrity_free(struct bio_set *bs) | 700 | if (!kintegrityd_wq) |
729 | { | 701 | panic("Failed to create kintegrityd\n"); |
730 | if (bs->bio_integrity_pool) | ||
731 | mempool_destroy(bs->bio_integrity_pool); | ||
732 | } | ||
733 | EXPORT_SYMBOL(bioset_integrity_free); | ||
734 | 702 | ||
735 | void __init bio_integrity_init_slab(void) | ||
736 | { | ||
737 | bio_integrity_slab = KMEM_CACHE(bio_integrity_payload, | 703 | bio_integrity_slab = KMEM_CACHE(bio_integrity_payload, |
738 | SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 704 | SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
739 | } | ||
740 | 705 | ||
741 | static int __init integrity_init(void) | 706 | bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE, |
742 | { | 707 | bio_integrity_slab); |
743 | kintegrityd_wq = create_workqueue("kintegrityd"); | 708 | if (!bio_integrity_pool) |
709 | panic("bio_integrity: can't allocate bip pool\n"); | ||
744 | 710 | ||
745 | if (!kintegrityd_wq) | 711 | integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0); |
746 | panic("Failed to create kintegrityd\n"); | 712 | if (!integrity_bio_set) |
713 | panic("bio_integrity: can't allocate bio_set\n"); | ||
747 | 714 | ||
748 | return 0; | 715 | return 0; |
749 | } | 716 | } |
750 | subsys_initcall(integrity_init); | 717 | subsys_initcall(bio_integrity_init); |
@@ -248,7 +248,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) | |||
248 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); | 248 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); |
249 | 249 | ||
250 | if (bio_integrity(bio)) | 250 | if (bio_integrity(bio)) |
251 | bio_integrity_free(bio, bs); | 251 | bio_integrity_free(bio); |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * If we have front padding, adjust the bio pointer before freeing | 254 | * If we have front padding, adjust the bio pointer before freeing |
@@ -301,48 +301,51 @@ void bio_init(struct bio *bio) | |||
301 | **/ | 301 | **/ |
302 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 302 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
303 | { | 303 | { |
304 | struct bio_vec *bvl = NULL; | ||
304 | struct bio *bio = NULL; | 305 | struct bio *bio = NULL; |
305 | void *uninitialized_var(p); | 306 | unsigned long idx = 0; |
307 | void *p = NULL; | ||
306 | 308 | ||
307 | if (bs) { | 309 | if (bs) { |
308 | p = mempool_alloc(bs->bio_pool, gfp_mask); | 310 | p = mempool_alloc(bs->bio_pool, gfp_mask); |
309 | 311 | if (!p) | |
310 | if (p) | 312 | goto err; |
311 | bio = p + bs->front_pad; | 313 | bio = p + bs->front_pad; |
312 | } else | 314 | } else { |
313 | bio = kmalloc(sizeof(*bio), gfp_mask); | 315 | bio = kmalloc(sizeof(*bio), gfp_mask); |
316 | if (!bio) | ||
317 | goto err; | ||
318 | } | ||
314 | 319 | ||
315 | if (likely(bio)) { | 320 | bio_init(bio); |
316 | struct bio_vec *bvl = NULL; | 321 | |
317 | 322 | if (unlikely(!nr_iovecs)) | |
318 | bio_init(bio); | 323 | goto out_set; |
319 | if (likely(nr_iovecs)) { | 324 | |
320 | unsigned long uninitialized_var(idx); | 325 | if (nr_iovecs <= BIO_INLINE_VECS) { |
321 | 326 | bvl = bio->bi_inline_vecs; | |
322 | if (nr_iovecs <= BIO_INLINE_VECS) { | 327 | nr_iovecs = BIO_INLINE_VECS; |
323 | idx = 0; | 328 | } else { |
324 | bvl = bio->bi_inline_vecs; | 329 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); |
325 | nr_iovecs = BIO_INLINE_VECS; | 330 | if (unlikely(!bvl)) |
326 | } else { | 331 | goto err_free; |
327 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, | 332 | |
328 | bs); | 333 | nr_iovecs = bvec_nr_vecs(idx); |
329 | nr_iovecs = bvec_nr_vecs(idx); | ||
330 | } | ||
331 | if (unlikely(!bvl)) { | ||
332 | if (bs) | ||
333 | mempool_free(p, bs->bio_pool); | ||
334 | else | ||
335 | kfree(bio); | ||
336 | bio = NULL; | ||
337 | goto out; | ||
338 | } | ||
339 | bio->bi_flags |= idx << BIO_POOL_OFFSET; | ||
340 | bio->bi_max_vecs = nr_iovecs; | ||
341 | } | ||
342 | bio->bi_io_vec = bvl; | ||
343 | } | 334 | } |
344 | out: | 335 | bio->bi_flags |= idx << BIO_POOL_OFFSET; |
336 | bio->bi_max_vecs = nr_iovecs; | ||
337 | out_set: | ||
338 | bio->bi_io_vec = bvl; | ||
339 | |||
345 | return bio; | 340 | return bio; |
341 | |||
342 | err_free: | ||
343 | if (bs) | ||
344 | mempool_free(p, bs->bio_pool); | ||
345 | else | ||
346 | kfree(bio); | ||
347 | err: | ||
348 | return NULL; | ||
346 | } | 349 | } |
347 | 350 | ||
348 | struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) | 351 | struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) |
@@ -463,7 +466,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
463 | if (bio_integrity(bio)) { | 466 | if (bio_integrity(bio)) { |
464 | int ret; | 467 | int ret; |
465 | 468 | ||
466 | ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set); | 469 | ret = bio_integrity_clone(b, bio, gfp_mask); |
467 | 470 | ||
468 | if (ret < 0) { | 471 | if (ret < 0) { |
469 | bio_put(b); | 472 | bio_put(b); |
@@ -1526,7 +1529,6 @@ void bioset_free(struct bio_set *bs) | |||
1526 | if (bs->bio_pool) | 1529 | if (bs->bio_pool) |
1527 | mempool_destroy(bs->bio_pool); | 1530 | mempool_destroy(bs->bio_pool); |
1528 | 1531 | ||
1529 | bioset_integrity_free(bs); | ||
1530 | biovec_free_pools(bs); | 1532 | biovec_free_pools(bs); |
1531 | bio_put_slab(bs); | 1533 | bio_put_slab(bs); |
1532 | 1534 | ||
@@ -1567,9 +1569,6 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) | |||
1567 | if (!bs->bio_pool) | 1569 | if (!bs->bio_pool) |
1568 | goto bad; | 1570 | goto bad; |
1569 | 1571 | ||
1570 | if (bioset_integrity_create(bs, pool_size)) | ||
1571 | goto bad; | ||
1572 | |||
1573 | if (!biovec_create_pools(bs, pool_size)) | 1572 | if (!biovec_create_pools(bs, pool_size)) |
1574 | return bs; | 1573 | return bs; |
1575 | 1574 | ||
@@ -1586,6 +1585,13 @@ static void __init biovec_init_slabs(void) | |||
1586 | int size; | 1585 | int size; |
1587 | struct biovec_slab *bvs = bvec_slabs + i; | 1586 | struct biovec_slab *bvs = bvec_slabs + i; |
1588 | 1587 | ||
1588 | #ifndef CONFIG_BLK_DEV_INTEGRITY | ||
1589 | if (bvs->nr_vecs <= BIO_INLINE_VECS) { | ||
1590 | bvs->slab = NULL; | ||
1591 | continue; | ||
1592 | } | ||
1593 | #endif | ||
1594 | |||
1589 | size = bvs->nr_vecs * sizeof(struct bio_vec); | 1595 | size = bvs->nr_vecs * sizeof(struct bio_vec); |
1590 | bvs->slab = kmem_cache_create(bvs->name, size, 0, | 1596 | bvs->slab = kmem_cache_create(bvs->name, size, 0, |
1591 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1597 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
@@ -1600,7 +1606,6 @@ static int __init init_bio(void) | |||
1600 | if (!bio_slabs) | 1606 | if (!bio_slabs) |
1601 | panic("bio: can't allocate bios\n"); | 1607 | panic("bio: can't allocate bios\n"); |
1602 | 1608 | ||
1603 | bio_integrity_init_slab(); | ||
1604 | biovec_init_slabs(); | 1609 | biovec_init_slabs(); |
1605 | 1610 | ||
1606 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); | 1611 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3e18175248e0..6ec80c0fc869 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -2385,7 +2385,7 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) | |||
2385 | unsigned long thresh = 32 * 1024 * 1024; | 2385 | unsigned long thresh = 32 * 1024 * 1024; |
2386 | tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; | 2386 | tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; |
2387 | 2387 | ||
2388 | if (current_is_pdflush() || current->flags & PF_MEMALLOC) | 2388 | if (current->flags & PF_MEMALLOC) |
2389 | return; | 2389 | return; |
2390 | 2390 | ||
2391 | num_dirty = count_range_bits(tree, &start, (u64)-1, | 2391 | num_dirty = count_range_bits(tree, &start, (u64)-1, |
diff --git a/fs/super.c b/fs/super.c index 6ce501447ada..dd4acb158b5e 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -674,7 +674,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) | |||
674 | return 0; | 674 | return 0; |
675 | } | 675 | } |
676 | 676 | ||
677 | static void do_emergency_remount(unsigned long foo) | 677 | static void do_emergency_remount(struct work_struct *work) |
678 | { | 678 | { |
679 | struct super_block *sb; | 679 | struct super_block *sb; |
680 | 680 | ||
@@ -697,12 +697,19 @@ static void do_emergency_remount(unsigned long foo) | |||
697 | spin_lock(&sb_lock); | 697 | spin_lock(&sb_lock); |
698 | } | 698 | } |
699 | spin_unlock(&sb_lock); | 699 | spin_unlock(&sb_lock); |
700 | kfree(work); | ||
700 | printk("Emergency Remount complete\n"); | 701 | printk("Emergency Remount complete\n"); |
701 | } | 702 | } |
702 | 703 | ||
703 | void emergency_remount(void) | 704 | void emergency_remount(void) |
704 | { | 705 | { |
705 | pdflush_operation(do_emergency_remount, 0); | 706 | struct work_struct *work; |
707 | |||
708 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
709 | if (work) { | ||
710 | INIT_WORK(work, do_emergency_remount); | ||
711 | schedule_work(work); | ||
712 | } | ||
706 | } | 713 | } |
707 | 714 | ||
708 | /* | 715 | /* |
@@ -42,9 +42,21 @@ SYSCALL_DEFINE0(sync) | |||
42 | return 0; | 42 | return 0; |
43 | } | 43 | } |
44 | 44 | ||
45 | static void do_sync_work(struct work_struct *work) | ||
46 | { | ||
47 | do_sync(0); | ||
48 | kfree(work); | ||
49 | } | ||
50 | |||
45 | void emergency_sync(void) | 51 | void emergency_sync(void) |
46 | { | 52 | { |
47 | pdflush_operation(do_sync, 0); | 53 | struct work_struct *work; |
54 | |||
55 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
56 | if (work) { | ||
57 | INIT_WORK(work, do_sync_work); | ||
58 | schedule_work(work); | ||
59 | } | ||
48 | } | 60 | } |
49 | 61 | ||
50 | /* | 62 | /* |