diff options
Diffstat (limited to 'fs')
54 files changed, 886 insertions, 386 deletions
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 15b5ca2a2606..9c949348510b 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
@@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) | |||
37 | char *value = NULL; | 37 | char *value = NULL; |
38 | struct posix_acl *acl; | 38 | struct posix_acl *acl; |
39 | 39 | ||
40 | if (!IS_POSIXACL(inode)) | ||
41 | return NULL; | ||
42 | |||
40 | acl = get_cached_acl(inode, type); | 43 | acl = get_cached_acl(inode, type); |
41 | if (acl != ACL_NOT_CACHED) | 44 | if (acl != ACL_NOT_CACHED) |
42 | return acl; | 45 | return acl; |
@@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, | |||
84 | struct posix_acl *acl; | 87 | struct posix_acl *acl; |
85 | int ret = 0; | 88 | int ret = 0; |
86 | 89 | ||
90 | if (!IS_POSIXACL(dentry->d_inode)) | ||
91 | return -EOPNOTSUPP; | ||
92 | |||
87 | acl = btrfs_get_acl(dentry->d_inode, type); | 93 | acl = btrfs_get_acl(dentry->d_inode, type); |
88 | 94 | ||
89 | if (IS_ERR(acl)) | 95 | if (IS_ERR(acl)) |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f745287fbf2e..4d2110eafe29 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
562 | u64 em_len; | 562 | u64 em_len; |
563 | u64 em_start; | 563 | u64 em_start; |
564 | struct extent_map *em; | 564 | struct extent_map *em; |
565 | int ret; | 565 | int ret = -ENOMEM; |
566 | u32 *sums; | 566 | u32 *sums; |
567 | 567 | ||
568 | tree = &BTRFS_I(inode)->io_tree; | 568 | tree = &BTRFS_I(inode)->io_tree; |
@@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
577 | 577 | ||
578 | compressed_len = em->block_len; | 578 | compressed_len = em->block_len; |
579 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); | 579 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); |
580 | if (!cb) | ||
581 | goto out; | ||
582 | |||
580 | atomic_set(&cb->pending_bios, 0); | 583 | atomic_set(&cb->pending_bios, 0); |
581 | cb->errors = 0; | 584 | cb->errors = 0; |
582 | cb->inode = inode; | 585 | cb->inode = inode; |
@@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
597 | 600 | ||
598 | nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / | 601 | nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / |
599 | PAGE_CACHE_SIZE; | 602 | PAGE_CACHE_SIZE; |
600 | cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, | 603 | cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, |
601 | GFP_NOFS); | 604 | GFP_NOFS); |
605 | if (!cb->compressed_pages) | ||
606 | goto fail1; | ||
607 | |||
602 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 608 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
603 | 609 | ||
604 | for (page_index = 0; page_index < nr_pages; page_index++) { | 610 | for (page_index = 0; page_index < nr_pages; page_index++) { |
605 | cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | | 611 | cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | |
606 | __GFP_HIGHMEM); | 612 | __GFP_HIGHMEM); |
613 | if (!cb->compressed_pages[page_index]) | ||
614 | goto fail2; | ||
607 | } | 615 | } |
608 | cb->nr_pages = nr_pages; | 616 | cb->nr_pages = nr_pages; |
609 | 617 | ||
@@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
614 | cb->len = uncompressed_len; | 622 | cb->len = uncompressed_len; |
615 | 623 | ||
616 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); | 624 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); |
625 | if (!comp_bio) | ||
626 | goto fail2; | ||
617 | comp_bio->bi_private = cb; | 627 | comp_bio->bi_private = cb; |
618 | comp_bio->bi_end_io = end_compressed_bio_read; | 628 | comp_bio->bi_end_io = end_compressed_bio_read; |
619 | atomic_inc(&cb->pending_bios); | 629 | atomic_inc(&cb->pending_bios); |
@@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
681 | 691 | ||
682 | bio_put(comp_bio); | 692 | bio_put(comp_bio); |
683 | return 0; | 693 | return 0; |
694 | |||
695 | fail2: | ||
696 | for (page_index = 0; page_index < nr_pages; page_index++) | ||
697 | free_page((unsigned long)cb->compressed_pages[page_index]); | ||
698 | |||
699 | kfree(cb->compressed_pages); | ||
700 | fail1: | ||
701 | kfree(cb); | ||
702 | out: | ||
703 | free_extent_map(em); | ||
704 | return ret; | ||
684 | } | 705 | } |
685 | 706 | ||
686 | static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; | 707 | static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; |
@@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, | |||
900 | return ret; | 921 | return ret; |
901 | } | 922 | } |
902 | 923 | ||
903 | void __exit btrfs_exit_compress(void) | 924 | void btrfs_exit_compress(void) |
904 | { | 925 | { |
905 | free_workspaces(); | 926 | free_workspaces(); |
906 | } | 927 | } |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b531c36455d8..fdce8799b98d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1550,6 +1550,7 @@ static int transaction_kthread(void *arg) | |||
1550 | spin_unlock(&root->fs_info->new_trans_lock); | 1550 | spin_unlock(&root->fs_info->new_trans_lock); |
1551 | 1551 | ||
1552 | trans = btrfs_join_transaction(root, 1); | 1552 | trans = btrfs_join_transaction(root, 1); |
1553 | BUG_ON(IS_ERR(trans)); | ||
1553 | if (transid == trans->transid) { | 1554 | if (transid == trans->transid) { |
1554 | ret = btrfs_commit_transaction(trans, root); | 1555 | ret = btrfs_commit_transaction(trans, root); |
1555 | BUG_ON(ret); | 1556 | BUG_ON(ret); |
@@ -2453,10 +2454,14 @@ int btrfs_commit_super(struct btrfs_root *root) | |||
2453 | up_write(&root->fs_info->cleanup_work_sem); | 2454 | up_write(&root->fs_info->cleanup_work_sem); |
2454 | 2455 | ||
2455 | trans = btrfs_join_transaction(root, 1); | 2456 | trans = btrfs_join_transaction(root, 1); |
2457 | if (IS_ERR(trans)) | ||
2458 | return PTR_ERR(trans); | ||
2456 | ret = btrfs_commit_transaction(trans, root); | 2459 | ret = btrfs_commit_transaction(trans, root); |
2457 | BUG_ON(ret); | 2460 | BUG_ON(ret); |
2458 | /* run commit again to drop the original snapshot */ | 2461 | /* run commit again to drop the original snapshot */ |
2459 | trans = btrfs_join_transaction(root, 1); | 2462 | trans = btrfs_join_transaction(root, 1); |
2463 | if (IS_ERR(trans)) | ||
2464 | return PTR_ERR(trans); | ||
2460 | btrfs_commit_transaction(trans, root); | 2465 | btrfs_commit_transaction(trans, root); |
2461 | ret = btrfs_write_and_wait_transaction(NULL, root); | 2466 | ret = btrfs_write_and_wait_transaction(NULL, root); |
2462 | BUG_ON(ret); | 2467 | BUG_ON(ret); |
@@ -2554,6 +2559,8 @@ int close_ctree(struct btrfs_root *root) | |||
2554 | kfree(fs_info->chunk_root); | 2559 | kfree(fs_info->chunk_root); |
2555 | kfree(fs_info->dev_root); | 2560 | kfree(fs_info->dev_root); |
2556 | kfree(fs_info->csum_root); | 2561 | kfree(fs_info->csum_root); |
2562 | kfree(fs_info); | ||
2563 | |||
2557 | return 0; | 2564 | return 0; |
2558 | } | 2565 | } |
2559 | 2566 | ||
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 9786963b07e5..ff27d7a477b2 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c | |||
@@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child) | |||
171 | int ret; | 171 | int ret; |
172 | 172 | ||
173 | path = btrfs_alloc_path(); | 173 | path = btrfs_alloc_path(); |
174 | if (!path) | ||
175 | return ERR_PTR(-ENOMEM); | ||
174 | 176 | ||
175 | if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { | 177 | if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { |
176 | key.objectid = root->root_key.objectid; | 178 | key.objectid = root->root_key.objectid; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b55269340cec..4e7e012ad667 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -320,11 +320,6 @@ static int caching_kthread(void *data) | |||
320 | if (!path) | 320 | if (!path) |
321 | return -ENOMEM; | 321 | return -ENOMEM; |
322 | 322 | ||
323 | exclude_super_stripes(extent_root, block_group); | ||
324 | spin_lock(&block_group->space_info->lock); | ||
325 | block_group->space_info->bytes_readonly += block_group->bytes_super; | ||
326 | spin_unlock(&block_group->space_info->lock); | ||
327 | |||
328 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | 323 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); |
329 | 324 | ||
330 | /* | 325 | /* |
@@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
467 | cache->cached = BTRFS_CACHE_NO; | 462 | cache->cached = BTRFS_CACHE_NO; |
468 | } | 463 | } |
469 | spin_unlock(&cache->lock); | 464 | spin_unlock(&cache->lock); |
470 | if (ret == 1) | 465 | if (ret == 1) { |
466 | free_excluded_extents(fs_info->extent_root, cache); | ||
471 | return 0; | 467 | return 0; |
468 | } | ||
472 | } | 469 | } |
473 | 470 | ||
474 | if (load_cache_only) | 471 | if (load_cache_only) |
@@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3344 | u64 reserved; | 3341 | u64 reserved; |
3345 | u64 max_reclaim; | 3342 | u64 max_reclaim; |
3346 | u64 reclaimed = 0; | 3343 | u64 reclaimed = 0; |
3344 | long time_left; | ||
3347 | int pause = 1; | 3345 | int pause = 1; |
3348 | int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; | 3346 | int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; |
3347 | int loops = 0; | ||
3349 | 3348 | ||
3350 | block_rsv = &root->fs_info->delalloc_block_rsv; | 3349 | block_rsv = &root->fs_info->delalloc_block_rsv; |
3351 | space_info = block_rsv->space_info; | 3350 | space_info = block_rsv->space_info; |
@@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3358 | 3357 | ||
3359 | max_reclaim = min(reserved, to_reclaim); | 3358 | max_reclaim = min(reserved, to_reclaim); |
3360 | 3359 | ||
3361 | while (1) { | 3360 | while (loops < 1024) { |
3362 | /* have the flusher threads jump in and do some IO */ | 3361 | /* have the flusher threads jump in and do some IO */ |
3363 | smp_mb(); | 3362 | smp_mb(); |
3364 | nr_pages = min_t(unsigned long, nr_pages, | 3363 | nr_pages = min_t(unsigned long, nr_pages, |
@@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3366 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); | 3365 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); |
3367 | 3366 | ||
3368 | spin_lock(&space_info->lock); | 3367 | spin_lock(&space_info->lock); |
3369 | if (reserved > space_info->bytes_reserved) | 3368 | if (reserved > space_info->bytes_reserved) { |
3369 | loops = 0; | ||
3370 | reclaimed += reserved - space_info->bytes_reserved; | 3370 | reclaimed += reserved - space_info->bytes_reserved; |
3371 | } else { | ||
3372 | loops++; | ||
3373 | } | ||
3371 | reserved = space_info->bytes_reserved; | 3374 | reserved = space_info->bytes_reserved; |
3372 | spin_unlock(&space_info->lock); | 3375 | spin_unlock(&space_info->lock); |
3373 | 3376 | ||
@@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3378 | return -EAGAIN; | 3381 | return -EAGAIN; |
3379 | 3382 | ||
3380 | __set_current_state(TASK_INTERRUPTIBLE); | 3383 | __set_current_state(TASK_INTERRUPTIBLE); |
3381 | schedule_timeout(pause); | 3384 | time_left = schedule_timeout(pause); |
3385 | |||
3386 | /* We were interrupted, exit */ | ||
3387 | if (time_left) | ||
3388 | break; | ||
3389 | |||
3382 | pause <<= 1; | 3390 | pause <<= 1; |
3383 | if (pause > HZ / 10) | 3391 | if (pause > HZ / 10) |
3384 | pause = HZ / 10; | 3392 | pause = HZ / 10; |
@@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, | |||
3588 | 3596 | ||
3589 | if (num_bytes > 0) { | 3597 | if (num_bytes > 0) { |
3590 | if (dest) { | 3598 | if (dest) { |
3591 | block_rsv_add_bytes(dest, num_bytes, 0); | 3599 | spin_lock(&dest->lock); |
3592 | } else { | 3600 | if (!dest->full) { |
3601 | u64 bytes_to_add; | ||
3602 | |||
3603 | bytes_to_add = dest->size - dest->reserved; | ||
3604 | bytes_to_add = min(num_bytes, bytes_to_add); | ||
3605 | dest->reserved += bytes_to_add; | ||
3606 | if (dest->reserved >= dest->size) | ||
3607 | dest->full = 1; | ||
3608 | num_bytes -= bytes_to_add; | ||
3609 | } | ||
3610 | spin_unlock(&dest->lock); | ||
3611 | } | ||
3612 | if (num_bytes) { | ||
3593 | spin_lock(&space_info->lock); | 3613 | spin_lock(&space_info->lock); |
3594 | space_info->bytes_reserved -= num_bytes; | 3614 | space_info->bytes_reserved -= num_bytes; |
3595 | spin_unlock(&space_info->lock); | 3615 | spin_unlock(&space_info->lock); |
@@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |||
4012 | 4032 | ||
4013 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 4033 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
4014 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); | 4034 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); |
4035 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); | ||
4015 | 4036 | ||
4016 | spin_lock(&BTRFS_I(inode)->accounting_lock); | 4037 | spin_lock(&BTRFS_I(inode)->accounting_lock); |
4017 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); | 4038 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); |
@@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, | |||
5633 | struct btrfs_root *root, u32 blocksize) | 5654 | struct btrfs_root *root, u32 blocksize) |
5634 | { | 5655 | { |
5635 | struct btrfs_block_rsv *block_rsv; | 5656 | struct btrfs_block_rsv *block_rsv; |
5657 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; | ||
5636 | int ret; | 5658 | int ret; |
5637 | 5659 | ||
5638 | block_rsv = get_block_rsv(trans, root); | 5660 | block_rsv = get_block_rsv(trans, root); |
@@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans, | |||
5640 | if (block_rsv->size == 0) { | 5662 | if (block_rsv->size == 0) { |
5641 | ret = reserve_metadata_bytes(trans, root, block_rsv, | 5663 | ret = reserve_metadata_bytes(trans, root, block_rsv, |
5642 | blocksize, 0); | 5664 | blocksize, 0); |
5643 | if (ret) | 5665 | /* |
5666 | * If we couldn't reserve metadata bytes try and use some from | ||
5667 | * the global reserve. | ||
5668 | */ | ||
5669 | if (ret && block_rsv != global_rsv) { | ||
5670 | ret = block_rsv_use_bytes(global_rsv, blocksize); | ||
5671 | if (!ret) | ||
5672 | return global_rsv; | ||
5673 | return ERR_PTR(ret); | ||
5674 | } else if (ret) { | ||
5644 | return ERR_PTR(ret); | 5675 | return ERR_PTR(ret); |
5676 | } | ||
5645 | return block_rsv; | 5677 | return block_rsv; |
5646 | } | 5678 | } |
5647 | 5679 | ||
5648 | ret = block_rsv_use_bytes(block_rsv, blocksize); | 5680 | ret = block_rsv_use_bytes(block_rsv, blocksize); |
5649 | if (!ret) | 5681 | if (!ret) |
5650 | return block_rsv; | 5682 | return block_rsv; |
5683 | if (ret) { | ||
5684 | WARN_ON(1); | ||
5685 | ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, | ||
5686 | 0); | ||
5687 | if (!ret) { | ||
5688 | spin_lock(&block_rsv->lock); | ||
5689 | block_rsv->size += blocksize; | ||
5690 | spin_unlock(&block_rsv->lock); | ||
5691 | return block_rsv; | ||
5692 | } else if (ret && block_rsv != global_rsv) { | ||
5693 | ret = block_rsv_use_bytes(global_rsv, blocksize); | ||
5694 | if (!ret) | ||
5695 | return global_rsv; | ||
5696 | } | ||
5697 | } | ||
5651 | 5698 | ||
5652 | return ERR_PTR(-ENOSPC); | 5699 | return ERR_PTR(-ENOSPC); |
5653 | } | 5700 | } |
@@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
6221 | BUG_ON(!wc); | 6268 | BUG_ON(!wc); |
6222 | 6269 | ||
6223 | trans = btrfs_start_transaction(tree_root, 0); | 6270 | trans = btrfs_start_transaction(tree_root, 0); |
6271 | BUG_ON(IS_ERR(trans)); | ||
6272 | |||
6224 | if (block_rsv) | 6273 | if (block_rsv) |
6225 | trans->block_rsv = block_rsv; | 6274 | trans->block_rsv = block_rsv; |
6226 | 6275 | ||
@@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
6318 | 6367 | ||
6319 | btrfs_end_transaction_throttle(trans, tree_root); | 6368 | btrfs_end_transaction_throttle(trans, tree_root); |
6320 | trans = btrfs_start_transaction(tree_root, 0); | 6369 | trans = btrfs_start_transaction(tree_root, 0); |
6370 | BUG_ON(IS_ERR(trans)); | ||
6321 | if (block_rsv) | 6371 | if (block_rsv) |
6322 | trans->block_rsv = block_rsv; | 6372 | trans->block_rsv = block_rsv; |
6323 | } | 6373 | } |
@@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start, | |||
6446 | int ret = 0; | 6496 | int ret = 0; |
6447 | 6497 | ||
6448 | ra = kzalloc(sizeof(*ra), GFP_NOFS); | 6498 | ra = kzalloc(sizeof(*ra), GFP_NOFS); |
6499 | if (!ra) | ||
6500 | return -ENOMEM; | ||
6449 | 6501 | ||
6450 | mutex_lock(&inode->i_mutex); | 6502 | mutex_lock(&inode->i_mutex); |
6451 | first_index = start >> PAGE_CACHE_SHIFT; | 6503 | first_index = start >> PAGE_CACHE_SHIFT; |
@@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) | |||
7477 | BUG_ON(reloc_root->commit_root != NULL); | 7529 | BUG_ON(reloc_root->commit_root != NULL); |
7478 | while (1) { | 7530 | while (1) { |
7479 | trans = btrfs_join_transaction(root, 1); | 7531 | trans = btrfs_join_transaction(root, 1); |
7480 | BUG_ON(!trans); | 7532 | BUG_ON(IS_ERR(trans)); |
7481 | 7533 | ||
7482 | mutex_lock(&root->fs_info->drop_mutex); | 7534 | mutex_lock(&root->fs_info->drop_mutex); |
7483 | ret = btrfs_drop_snapshot(trans, reloc_root); | 7535 | ret = btrfs_drop_snapshot(trans, reloc_root); |
@@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) | |||
7535 | 7587 | ||
7536 | if (found) { | 7588 | if (found) { |
7537 | trans = btrfs_start_transaction(root, 1); | 7589 | trans = btrfs_start_transaction(root, 1); |
7538 | BUG_ON(!trans); | 7590 | BUG_ON(IS_ERR(trans)); |
7539 | ret = btrfs_commit_transaction(trans, root); | 7591 | ret = btrfs_commit_transaction(trans, root); |
7540 | BUG_ON(ret); | 7592 | BUG_ON(ret); |
7541 | } | 7593 | } |
@@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, | |||
7779 | 7831 | ||
7780 | 7832 | ||
7781 | trans = btrfs_start_transaction(extent_root, 1); | 7833 | trans = btrfs_start_transaction(extent_root, 1); |
7782 | BUG_ON(!trans); | 7834 | BUG_ON(IS_ERR(trans)); |
7783 | 7835 | ||
7784 | if (extent_key->objectid == 0) { | 7836 | if (extent_key->objectid == 0) { |
7785 | ret = del_extent_zero(trans, extent_root, path, extent_key); | 7837 | ret = del_extent_zero(trans, extent_root, path, extent_key); |
@@ -8270,6 +8322,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) | |||
8270 | if (block_group->cached == BTRFS_CACHE_STARTED) | 8322 | if (block_group->cached == BTRFS_CACHE_STARTED) |
8271 | wait_block_group_cache_done(block_group); | 8323 | wait_block_group_cache_done(block_group); |
8272 | 8324 | ||
8325 | /* | ||
8326 | * We haven't cached this block group, which means we could | ||
8327 | * possibly have excluded extents on this block group. | ||
8328 | */ | ||
8329 | if (block_group->cached == BTRFS_CACHE_NO) | ||
8330 | free_excluded_extents(info->extent_root, block_group); | ||
8331 | |||
8273 | btrfs_remove_free_space_cache(block_group); | 8332 | btrfs_remove_free_space_cache(block_group); |
8274 | btrfs_put_block_group(block_group); | 8333 | btrfs_put_block_group(block_group); |
8275 | 8334 | ||
@@ -8385,6 +8444,13 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8385 | cache->sectorsize = root->sectorsize; | 8444 | cache->sectorsize = root->sectorsize; |
8386 | 8445 | ||
8387 | /* | 8446 | /* |
8447 | * We need to exclude the super stripes now so that the space | ||
8448 | * info has super bytes accounted for, otherwise we'll think | ||
8449 | * we have more space than we actually do. | ||
8450 | */ | ||
8451 | exclude_super_stripes(root, cache); | ||
8452 | |||
8453 | /* | ||
8388 | * check for two cases, either we are full, and therefore | 8454 | * check for two cases, either we are full, and therefore |
8389 | * don't need to bother with the caching work since we won't | 8455 | * don't need to bother with the caching work since we won't |
8390 | * find any space, or we are empty, and we can just add all | 8456 | * find any space, or we are empty, and we can just add all |
@@ -8392,12 +8458,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8392 | * time, particularly in the full case. | 8458 | * time, particularly in the full case. |
8393 | */ | 8459 | */ |
8394 | if (found_key.offset == btrfs_block_group_used(&cache->item)) { | 8460 | if (found_key.offset == btrfs_block_group_used(&cache->item)) { |
8395 | exclude_super_stripes(root, cache); | ||
8396 | cache->last_byte_to_unpin = (u64)-1; | 8461 | cache->last_byte_to_unpin = (u64)-1; |
8397 | cache->cached = BTRFS_CACHE_FINISHED; | 8462 | cache->cached = BTRFS_CACHE_FINISHED; |
8398 | free_excluded_extents(root, cache); | 8463 | free_excluded_extents(root, cache); |
8399 | } else if (btrfs_block_group_used(&cache->item) == 0) { | 8464 | } else if (btrfs_block_group_used(&cache->item) == 0) { |
8400 | exclude_super_stripes(root, cache); | ||
8401 | cache->last_byte_to_unpin = (u64)-1; | 8465 | cache->last_byte_to_unpin = (u64)-1; |
8402 | cache->cached = BTRFS_CACHE_FINISHED; | 8466 | cache->cached = BTRFS_CACHE_FINISHED; |
8403 | add_new_free_space(cache, root->fs_info, | 8467 | add_new_free_space(cache, root->fs_info, |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2e993cf1766e..5e76a474cb7e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, | |||
1865 | bio_get(bio); | 1865 | bio_get(bio); |
1866 | 1866 | ||
1867 | if (tree->ops && tree->ops->submit_bio_hook) | 1867 | if (tree->ops && tree->ops->submit_bio_hook) |
1868 | tree->ops->submit_bio_hook(page->mapping->host, rw, bio, | 1868 | ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, |
1869 | mirror_num, bio_flags, start); | 1869 | mirror_num, bio_flags, start); |
1870 | else | 1870 | else |
1871 | submit_bio(rw, bio); | 1871 | submit_bio(rw, bio); |
@@ -1920,6 +1920,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
1920 | nr = bio_get_nr_vecs(bdev); | 1920 | nr = bio_get_nr_vecs(bdev); |
1921 | 1921 | ||
1922 | bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); | 1922 | bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); |
1923 | if (!bio) | ||
1924 | return -ENOMEM; | ||
1923 | 1925 | ||
1924 | bio_add_page(bio, page, page_size, offset); | 1926 | bio_add_page(bio, page, page_size, offset); |
1925 | bio->bi_end_io = end_io_func; | 1927 | bio->bi_end_io = end_io_func; |
@@ -2126,7 +2128,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, | |||
2126 | ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, | 2128 | ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, |
2127 | &bio_flags); | 2129 | &bio_flags); |
2128 | if (bio) | 2130 | if (bio) |
2129 | submit_one_bio(READ, bio, 0, bio_flags); | 2131 | ret = submit_one_bio(READ, bio, 0, bio_flags); |
2130 | return ret; | 2132 | return ret; |
2131 | } | 2133 | } |
2132 | 2134 | ||
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a562a250ae77..4f19a3e1bf32 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, | |||
536 | root = root->fs_info->csum_root; | 536 | root = root->fs_info->csum_root; |
537 | 537 | ||
538 | path = btrfs_alloc_path(); | 538 | path = btrfs_alloc_path(); |
539 | if (!path) | ||
540 | return -ENOMEM; | ||
539 | 541 | ||
540 | while (1) { | 542 | while (1) { |
541 | key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | 543 | key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; |
@@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, | |||
548 | if (path->slots[0] == 0) | 550 | if (path->slots[0] == 0) |
549 | goto out; | 551 | goto out; |
550 | path->slots[0]--; | 552 | path->slots[0]--; |
553 | } else if (ret < 0) { | ||
554 | goto out; | ||
551 | } | 555 | } |
556 | |||
552 | leaf = path->nodes[0]; | 557 | leaf = path->nodes[0]; |
553 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 558 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
554 | 559 | ||
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c800d58f3013..c1d3a818731a 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -793,8 +793,12 @@ again: | |||
793 | for (i = 0; i < num_pages; i++) { | 793 | for (i = 0; i < num_pages; i++) { |
794 | pages[i] = grab_cache_page(inode->i_mapping, index + i); | 794 | pages[i] = grab_cache_page(inode->i_mapping, index + i); |
795 | if (!pages[i]) { | 795 | if (!pages[i]) { |
796 | err = -ENOMEM; | 796 | int c; |
797 | BUG_ON(1); | 797 | for (c = i - 1; c >= 0; c--) { |
798 | unlock_page(pages[c]); | ||
799 | page_cache_release(pages[c]); | ||
800 | } | ||
801 | return -ENOMEM; | ||
798 | } | 802 | } |
799 | wait_on_page_writeback(pages[i]); | 803 | wait_on_page_writeback(pages[i]); |
800 | } | 804 | } |
@@ -946,6 +950,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
946 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / | 950 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / |
947 | (sizeof(struct page *))); | 951 | (sizeof(struct page *))); |
948 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); | 952 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); |
953 | if (!pages) { | ||
954 | ret = -ENOMEM; | ||
955 | goto out; | ||
956 | } | ||
949 | 957 | ||
950 | /* generic_write_checks can change our pos */ | 958 | /* generic_write_checks can change our pos */ |
951 | start_pos = pos; | 959 | start_pos = pos; |
@@ -984,8 +992,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
984 | size_t write_bytes = min(iov_iter_count(&i), | 992 | size_t write_bytes = min(iov_iter_count(&i), |
985 | nrptrs * (size_t)PAGE_CACHE_SIZE - | 993 | nrptrs * (size_t)PAGE_CACHE_SIZE - |
986 | offset); | 994 | offset); |
987 | size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> | 995 | size_t num_pages = (write_bytes + offset + |
988 | PAGE_CACHE_SHIFT; | 996 | PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
989 | 997 | ||
990 | WARN_ON(num_pages > nrptrs); | 998 | WARN_ON(num_pages > nrptrs); |
991 | memset(pages, 0, sizeof(struct page *) * nrptrs); | 999 | memset(pages, 0, sizeof(struct page *) * nrptrs); |
@@ -1015,8 +1023,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
1015 | 1023 | ||
1016 | copied = btrfs_copy_from_user(pos, num_pages, | 1024 | copied = btrfs_copy_from_user(pos, num_pages, |
1017 | write_bytes, pages, &i); | 1025 | write_bytes, pages, &i); |
1018 | dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> | 1026 | dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> |
1019 | PAGE_CACHE_SHIFT; | 1027 | PAGE_CACHE_SHIFT; |
1020 | 1028 | ||
1021 | if (num_pages > dirty_pages) { | 1029 | if (num_pages > dirty_pages) { |
1022 | if (copied > 0) | 1030 | if (copied > 0) |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 60d684266959..a0390657451b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
987 | return entry; | 987 | return entry; |
988 | } | 988 | } |
989 | 989 | ||
990 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 990 | static inline void |
991 | struct btrfs_free_space *info) | 991 | __unlink_free_space(struct btrfs_block_group_cache *block_group, |
992 | struct btrfs_free_space *info) | ||
992 | { | 993 | { |
993 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 994 | rb_erase(&info->offset_index, &block_group->free_space_offset); |
994 | block_group->free_extents--; | 995 | block_group->free_extents--; |
996 | } | ||
997 | |||
998 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | ||
999 | struct btrfs_free_space *info) | ||
1000 | { | ||
1001 | __unlink_free_space(block_group, info); | ||
995 | block_group->free_space -= info->bytes; | 1002 | block_group->free_space -= info->bytes; |
996 | } | 1003 | } |
997 | 1004 | ||
@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1016 | u64 max_bytes; | 1023 | u64 max_bytes; |
1017 | u64 bitmap_bytes; | 1024 | u64 bitmap_bytes; |
1018 | u64 extent_bytes; | 1025 | u64 extent_bytes; |
1026 | u64 size = block_group->key.offset; | ||
1019 | 1027 | ||
1020 | /* | 1028 | /* |
1021 | * The goal is to keep the total amount of memory used per 1gb of space | 1029 | * The goal is to keep the total amount of memory used per 1gb of space |
1022 | * at or below 32k, so we need to adjust how much memory we allow to be | 1030 | * at or below 32k, so we need to adjust how much memory we allow to be |
1023 | * used by extent based free space tracking | 1031 | * used by extent based free space tracking |
1024 | */ | 1032 | */ |
1025 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | 1033 | if (size < 1024 * 1024 * 1024) |
1026 | (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); | 1034 | max_bytes = MAX_CACHE_BYTES_PER_GIG; |
1035 | else | ||
1036 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | ||
1037 | div64_u64(size, 1024 * 1024 * 1024); | ||
1027 | 1038 | ||
1028 | /* | 1039 | /* |
1029 | * we want to account for 1 more bitmap than what we have so we can make | 1040 | * we want to account for 1 more bitmap than what we have so we can make |
@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | |||
1171 | recalculate_thresholds(block_group); | 1182 | recalculate_thresholds(block_group); |
1172 | } | 1183 | } |
1173 | 1184 | ||
1185 | static void free_bitmap(struct btrfs_block_group_cache *block_group, | ||
1186 | struct btrfs_free_space *bitmap_info) | ||
1187 | { | ||
1188 | unlink_free_space(block_group, bitmap_info); | ||
1189 | kfree(bitmap_info->bitmap); | ||
1190 | kfree(bitmap_info); | ||
1191 | block_group->total_bitmaps--; | ||
1192 | recalculate_thresholds(block_group); | ||
1193 | } | ||
1194 | |||
1174 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | 1195 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, |
1175 | struct btrfs_free_space *bitmap_info, | 1196 | struct btrfs_free_space *bitmap_info, |
1176 | u64 *offset, u64 *bytes) | 1197 | u64 *offset, u64 *bytes) |
@@ -1195,6 +1216,7 @@ again: | |||
1195 | */ | 1216 | */ |
1196 | search_start = *offset; | 1217 | search_start = *offset; |
1197 | search_bytes = *bytes; | 1218 | search_bytes = *bytes; |
1219 | search_bytes = min(search_bytes, end - search_start + 1); | ||
1198 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1220 | ret = search_bitmap(block_group, bitmap_info, &search_start, |
1199 | &search_bytes); | 1221 | &search_bytes); |
1200 | BUG_ON(ret < 0 || search_start != *offset); | 1222 | BUG_ON(ret < 0 || search_start != *offset); |
@@ -1211,13 +1233,8 @@ again: | |||
1211 | 1233 | ||
1212 | if (*bytes) { | 1234 | if (*bytes) { |
1213 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | 1235 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
1214 | if (!bitmap_info->bytes) { | 1236 | if (!bitmap_info->bytes) |
1215 | unlink_free_space(block_group, bitmap_info); | 1237 | free_bitmap(block_group, bitmap_info); |
1216 | kfree(bitmap_info->bitmap); | ||
1217 | kfree(bitmap_info); | ||
1218 | block_group->total_bitmaps--; | ||
1219 | recalculate_thresholds(block_group); | ||
1220 | } | ||
1221 | 1238 | ||
1222 | /* | 1239 | /* |
1223 | * no entry after this bitmap, but we still have bytes to | 1240 | * no entry after this bitmap, but we still have bytes to |
@@ -1250,13 +1267,8 @@ again: | |||
1250 | return -EAGAIN; | 1267 | return -EAGAIN; |
1251 | 1268 | ||
1252 | goto again; | 1269 | goto again; |
1253 | } else if (!bitmap_info->bytes) { | 1270 | } else if (!bitmap_info->bytes) |
1254 | unlink_free_space(block_group, bitmap_info); | 1271 | free_bitmap(block_group, bitmap_info); |
1255 | kfree(bitmap_info->bitmap); | ||
1256 | kfree(bitmap_info); | ||
1257 | block_group->total_bitmaps--; | ||
1258 | recalculate_thresholds(block_group); | ||
1259 | } | ||
1260 | 1272 | ||
1261 | return 0; | 1273 | return 0; |
1262 | } | 1274 | } |
@@ -1359,22 +1371,14 @@ out: | |||
1359 | return ret; | 1371 | return ret; |
1360 | } | 1372 | } |
1361 | 1373 | ||
1362 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 1374 | bool try_merge_free_space(struct btrfs_block_group_cache *block_group, |
1363 | u64 offset, u64 bytes) | 1375 | struct btrfs_free_space *info, bool update_stat) |
1364 | { | 1376 | { |
1365 | struct btrfs_free_space *right_info = NULL; | 1377 | struct btrfs_free_space *left_info; |
1366 | struct btrfs_free_space *left_info = NULL; | 1378 | struct btrfs_free_space *right_info; |
1367 | struct btrfs_free_space *info = NULL; | 1379 | bool merged = false; |
1368 | int ret = 0; | 1380 | u64 offset = info->offset; |
1369 | 1381 | u64 bytes = info->bytes; | |
1370 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
1371 | if (!info) | ||
1372 | return -ENOMEM; | ||
1373 | |||
1374 | info->offset = offset; | ||
1375 | info->bytes = bytes; | ||
1376 | |||
1377 | spin_lock(&block_group->tree_lock); | ||
1378 | 1382 | ||
1379 | /* | 1383 | /* |
1380 | * first we want to see if there is free space adjacent to the range we | 1384 | * first we want to see if there is free space adjacent to the range we |
@@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1388 | else | 1392 | else |
1389 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | 1393 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); |
1390 | 1394 | ||
1391 | /* | ||
1392 | * If there was no extent directly to the left or right of this new | ||
1393 | * extent then we know we're going to have to allocate a new extent, so | ||
1394 | * before we do that see if we need to drop this into a bitmap | ||
1395 | */ | ||
1396 | if ((!left_info || left_info->bitmap) && | ||
1397 | (!right_info || right_info->bitmap)) { | ||
1398 | ret = insert_into_bitmap(block_group, info); | ||
1399 | |||
1400 | if (ret < 0) { | ||
1401 | goto out; | ||
1402 | } else if (ret) { | ||
1403 | ret = 0; | ||
1404 | goto out; | ||
1405 | } | ||
1406 | } | ||
1407 | |||
1408 | if (right_info && !right_info->bitmap) { | 1395 | if (right_info && !right_info->bitmap) { |
1409 | unlink_free_space(block_group, right_info); | 1396 | if (update_stat) |
1397 | unlink_free_space(block_group, right_info); | ||
1398 | else | ||
1399 | __unlink_free_space(block_group, right_info); | ||
1410 | info->bytes += right_info->bytes; | 1400 | info->bytes += right_info->bytes; |
1411 | kfree(right_info); | 1401 | kfree(right_info); |
1402 | merged = true; | ||
1412 | } | 1403 | } |
1413 | 1404 | ||
1414 | if (left_info && !left_info->bitmap && | 1405 | if (left_info && !left_info->bitmap && |
1415 | left_info->offset + left_info->bytes == offset) { | 1406 | left_info->offset + left_info->bytes == offset) { |
1416 | unlink_free_space(block_group, left_info); | 1407 | if (update_stat) |
1408 | unlink_free_space(block_group, left_info); | ||
1409 | else | ||
1410 | __unlink_free_space(block_group, left_info); | ||
1417 | info->offset = left_info->offset; | 1411 | info->offset = left_info->offset; |
1418 | info->bytes += left_info->bytes; | 1412 | info->bytes += left_info->bytes; |
1419 | kfree(left_info); | 1413 | kfree(left_info); |
1414 | merged = true; | ||
1420 | } | 1415 | } |
1421 | 1416 | ||
1417 | return merged; | ||
1418 | } | ||
1419 | |||
1420 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
1421 | u64 offset, u64 bytes) | ||
1422 | { | ||
1423 | struct btrfs_free_space *info; | ||
1424 | int ret = 0; | ||
1425 | |||
1426 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
1427 | if (!info) | ||
1428 | return -ENOMEM; | ||
1429 | |||
1430 | info->offset = offset; | ||
1431 | info->bytes = bytes; | ||
1432 | |||
1433 | spin_lock(&block_group->tree_lock); | ||
1434 | |||
1435 | if (try_merge_free_space(block_group, info, true)) | ||
1436 | goto link; | ||
1437 | |||
1438 | /* | ||
1439 | * There was no extent directly to the left or right of this new | ||
1440 | * extent then we know we're going to have to allocate a new extent, so | ||
1441 | * before we do that see if we need to drop this into a bitmap | ||
1442 | */ | ||
1443 | ret = insert_into_bitmap(block_group, info); | ||
1444 | if (ret < 0) { | ||
1445 | goto out; | ||
1446 | } else if (ret) { | ||
1447 | ret = 0; | ||
1448 | goto out; | ||
1449 | } | ||
1450 | link: | ||
1422 | ret = link_free_space(block_group, info); | 1451 | ret = link_free_space(block_group, info); |
1423 | if (ret) | 1452 | if (ret) |
1424 | kfree(info); | 1453 | kfree(info); |
@@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space( | |||
1621 | node = rb_next(&entry->offset_index); | 1650 | node = rb_next(&entry->offset_index); |
1622 | rb_erase(&entry->offset_index, &cluster->root); | 1651 | rb_erase(&entry->offset_index, &cluster->root); |
1623 | BUG_ON(entry->bitmap); | 1652 | BUG_ON(entry->bitmap); |
1653 | try_merge_free_space(block_group, entry, false); | ||
1624 | tree_insert_offset(&block_group->free_space_offset, | 1654 | tree_insert_offset(&block_group->free_space_offset, |
1625 | entry->offset, &entry->offset_index, 0); | 1655 | entry->offset, &entry->offset_index, 0); |
1626 | } | 1656 | } |
@@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
1685 | ret = offset; | 1715 | ret = offset; |
1686 | if (entry->bitmap) { | 1716 | if (entry->bitmap) { |
1687 | bitmap_clear_bits(block_group, entry, offset, bytes); | 1717 | bitmap_clear_bits(block_group, entry, offset, bytes); |
1688 | if (!entry->bytes) { | 1718 | if (!entry->bytes) |
1689 | unlink_free_space(block_group, entry); | 1719 | free_bitmap(block_group, entry); |
1690 | kfree(entry->bitmap); | ||
1691 | kfree(entry); | ||
1692 | block_group->total_bitmaps--; | ||
1693 | recalculate_thresholds(block_group); | ||
1694 | } | ||
1695 | } else { | 1720 | } else { |
1696 | unlink_free_space(block_group, entry); | 1721 | unlink_free_space(block_group, entry); |
1697 | entry->offset += bytes; | 1722 | entry->offset += bytes; |
@@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1789 | 1814 | ||
1790 | ret = search_start; | 1815 | ret = search_start; |
1791 | bitmap_clear_bits(block_group, entry, ret, bytes); | 1816 | bitmap_clear_bits(block_group, entry, ret, bytes); |
1817 | if (entry->bytes == 0) | ||
1818 | free_bitmap(block_group, entry); | ||
1792 | out: | 1819 | out: |
1793 | spin_unlock(&cluster->lock); | 1820 | spin_unlock(&cluster->lock); |
1794 | spin_unlock(&block_group->tree_lock); | 1821 | spin_unlock(&block_group->tree_lock); |
@@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1842 | entry->offset += bytes; | 1869 | entry->offset += bytes; |
1843 | entry->bytes -= bytes; | 1870 | entry->bytes -= bytes; |
1844 | 1871 | ||
1845 | if (entry->bytes == 0) { | 1872 | if (entry->bytes == 0) |
1846 | rb_erase(&entry->offset_index, &cluster->root); | 1873 | rb_erase(&entry->offset_index, &cluster->root); |
1847 | kfree(entry); | ||
1848 | } | ||
1849 | break; | 1874 | break; |
1850 | } | 1875 | } |
1851 | out: | 1876 | out: |
1852 | spin_unlock(&cluster->lock); | 1877 | spin_unlock(&cluster->lock); |
1853 | 1878 | ||
1879 | if (!ret) | ||
1880 | return 0; | ||
1881 | |||
1882 | spin_lock(&block_group->tree_lock); | ||
1883 | |||
1884 | block_group->free_space -= bytes; | ||
1885 | if (entry->bytes == 0) { | ||
1886 | block_group->free_extents--; | ||
1887 | kfree(entry); | ||
1888 | } | ||
1889 | |||
1890 | spin_unlock(&block_group->tree_lock); | ||
1891 | |||
1854 | return ret; | 1892 | return ret; |
1855 | } | 1893 | } |
1856 | 1894 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 160b55b3e132..bcc461a9695f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -416,7 +416,7 @@ again: | |||
416 | } | 416 | } |
417 | if (start == 0) { | 417 | if (start == 0) { |
418 | trans = btrfs_join_transaction(root, 1); | 418 | trans = btrfs_join_transaction(root, 1); |
419 | BUG_ON(!trans); | 419 | BUG_ON(IS_ERR(trans)); |
420 | btrfs_set_trans_block_group(trans, inode); | 420 | btrfs_set_trans_block_group(trans, inode); |
421 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 421 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
422 | 422 | ||
@@ -612,6 +612,7 @@ retry: | |||
612 | GFP_NOFS); | 612 | GFP_NOFS); |
613 | 613 | ||
614 | trans = btrfs_join_transaction(root, 1); | 614 | trans = btrfs_join_transaction(root, 1); |
615 | BUG_ON(IS_ERR(trans)); | ||
615 | ret = btrfs_reserve_extent(trans, root, | 616 | ret = btrfs_reserve_extent(trans, root, |
616 | async_extent->compressed_size, | 617 | async_extent->compressed_size, |
617 | async_extent->compressed_size, | 618 | async_extent->compressed_size, |
@@ -771,7 +772,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
771 | 772 | ||
772 | BUG_ON(root == root->fs_info->tree_root); | 773 | BUG_ON(root == root->fs_info->tree_root); |
773 | trans = btrfs_join_transaction(root, 1); | 774 | trans = btrfs_join_transaction(root, 1); |
774 | BUG_ON(!trans); | 775 | BUG_ON(IS_ERR(trans)); |
775 | btrfs_set_trans_block_group(trans, inode); | 776 | btrfs_set_trans_block_group(trans, inode); |
776 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 777 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
777 | 778 | ||
@@ -1049,7 +1050,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1049 | } else { | 1050 | } else { |
1050 | trans = btrfs_join_transaction(root, 1); | 1051 | trans = btrfs_join_transaction(root, 1); |
1051 | } | 1052 | } |
1052 | BUG_ON(!trans); | 1053 | BUG_ON(IS_ERR(trans)); |
1053 | 1054 | ||
1054 | cow_start = (u64)-1; | 1055 | cow_start = (u64)-1; |
1055 | cur_offset = start; | 1056 | cur_offset = start; |
@@ -1557,6 +1558,7 @@ out: | |||
1557 | out_page: | 1558 | out_page: |
1558 | unlock_page(page); | 1559 | unlock_page(page); |
1559 | page_cache_release(page); | 1560 | page_cache_release(page); |
1561 | kfree(fixup); | ||
1560 | } | 1562 | } |
1561 | 1563 | ||
1562 | /* | 1564 | /* |
@@ -1703,7 +1705,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1703 | trans = btrfs_join_transaction_nolock(root, 1); | 1705 | trans = btrfs_join_transaction_nolock(root, 1); |
1704 | else | 1706 | else |
1705 | trans = btrfs_join_transaction(root, 1); | 1707 | trans = btrfs_join_transaction(root, 1); |
1706 | BUG_ON(!trans); | 1708 | BUG_ON(IS_ERR(trans)); |
1707 | btrfs_set_trans_block_group(trans, inode); | 1709 | btrfs_set_trans_block_group(trans, inode); |
1708 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 1710 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
1709 | ret = btrfs_update_inode(trans, root, inode); | 1711 | ret = btrfs_update_inode(trans, root, inode); |
@@ -1720,6 +1722,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1720 | trans = btrfs_join_transaction_nolock(root, 1); | 1722 | trans = btrfs_join_transaction_nolock(root, 1); |
1721 | else | 1723 | else |
1722 | trans = btrfs_join_transaction(root, 1); | 1724 | trans = btrfs_join_transaction(root, 1); |
1725 | BUG_ON(IS_ERR(trans)); | ||
1723 | btrfs_set_trans_block_group(trans, inode); | 1726 | btrfs_set_trans_block_group(trans, inode); |
1724 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 1727 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
1725 | 1728 | ||
@@ -2354,6 +2357,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2354 | */ | 2357 | */ |
2355 | if (is_bad_inode(inode)) { | 2358 | if (is_bad_inode(inode)) { |
2356 | trans = btrfs_start_transaction(root, 0); | 2359 | trans = btrfs_start_transaction(root, 0); |
2360 | BUG_ON(IS_ERR(trans)); | ||
2357 | btrfs_orphan_del(trans, inode); | 2361 | btrfs_orphan_del(trans, inode); |
2358 | btrfs_end_transaction(trans, root); | 2362 | btrfs_end_transaction(trans, root); |
2359 | iput(inode); | 2363 | iput(inode); |
@@ -2381,6 +2385,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2381 | 2385 | ||
2382 | if (root->orphan_block_rsv || root->orphan_item_inserted) { | 2386 | if (root->orphan_block_rsv || root->orphan_item_inserted) { |
2383 | trans = btrfs_join_transaction(root, 1); | 2387 | trans = btrfs_join_transaction(root, 1); |
2388 | BUG_ON(IS_ERR(trans)); | ||
2384 | btrfs_end_transaction(trans, root); | 2389 | btrfs_end_transaction(trans, root); |
2385 | } | 2390 | } |
2386 | 2391 | ||
@@ -2641,7 +2646,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, | |||
2641 | path = btrfs_alloc_path(); | 2646 | path = btrfs_alloc_path(); |
2642 | if (!path) { | 2647 | if (!path) { |
2643 | ret = -ENOMEM; | 2648 | ret = -ENOMEM; |
2644 | goto err; | 2649 | goto out; |
2645 | } | 2650 | } |
2646 | 2651 | ||
2647 | path->leave_spinning = 1; | 2652 | path->leave_spinning = 1; |
@@ -2714,9 +2719,10 @@ static int check_path_shared(struct btrfs_root *root, | |||
2714 | struct extent_buffer *eb; | 2719 | struct extent_buffer *eb; |
2715 | int level; | 2720 | int level; |
2716 | u64 refs = 1; | 2721 | u64 refs = 1; |
2717 | int uninitialized_var(ret); | ||
2718 | 2722 | ||
2719 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { | 2723 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { |
2724 | int ret; | ||
2725 | |||
2720 | if (!path->nodes[level]) | 2726 | if (!path->nodes[level]) |
2721 | break; | 2727 | break; |
2722 | eb = path->nodes[level]; | 2728 | eb = path->nodes[level]; |
@@ -2727,7 +2733,7 @@ static int check_path_shared(struct btrfs_root *root, | |||
2727 | if (refs > 1) | 2733 | if (refs > 1) |
2728 | return 1; | 2734 | return 1; |
2729 | } | 2735 | } |
2730 | return ret; /* XXX callers? */ | 2736 | return 0; |
2731 | } | 2737 | } |
2732 | 2738 | ||
2733 | /* | 2739 | /* |
@@ -4134,7 +4140,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |||
4134 | } | 4140 | } |
4135 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); | 4141 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); |
4136 | 4142 | ||
4137 | if (root != sub_root) { | 4143 | if (!IS_ERR(inode) && root != sub_root) { |
4138 | down_read(&root->fs_info->cleanup_work_sem); | 4144 | down_read(&root->fs_info->cleanup_work_sem); |
4139 | if (!(inode->i_sb->s_flags & MS_RDONLY)) | 4145 | if (!(inode->i_sb->s_flags & MS_RDONLY)) |
4140 | btrfs_orphan_cleanup(sub_root); | 4146 | btrfs_orphan_cleanup(sub_root); |
@@ -4347,6 +4353,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
4347 | trans = btrfs_join_transaction_nolock(root, 1); | 4353 | trans = btrfs_join_transaction_nolock(root, 1); |
4348 | else | 4354 | else |
4349 | trans = btrfs_join_transaction(root, 1); | 4355 | trans = btrfs_join_transaction(root, 1); |
4356 | if (IS_ERR(trans)) | ||
4357 | return PTR_ERR(trans); | ||
4350 | btrfs_set_trans_block_group(trans, inode); | 4358 | btrfs_set_trans_block_group(trans, inode); |
4351 | if (nolock) | 4359 | if (nolock) |
4352 | ret = btrfs_end_transaction_nolock(trans, root); | 4360 | ret = btrfs_end_transaction_nolock(trans, root); |
@@ -4372,6 +4380,7 @@ void btrfs_dirty_inode(struct inode *inode) | |||
4372 | return; | 4380 | return; |
4373 | 4381 | ||
4374 | trans = btrfs_join_transaction(root, 1); | 4382 | trans = btrfs_join_transaction(root, 1); |
4383 | BUG_ON(IS_ERR(trans)); | ||
4375 | btrfs_set_trans_block_group(trans, inode); | 4384 | btrfs_set_trans_block_group(trans, inode); |
4376 | 4385 | ||
4377 | ret = btrfs_update_inode(trans, root, inode); | 4386 | ret = btrfs_update_inode(trans, root, inode); |
@@ -5176,6 +5185,8 @@ again: | |||
5176 | em = NULL; | 5185 | em = NULL; |
5177 | btrfs_release_path(root, path); | 5186 | btrfs_release_path(root, path); |
5178 | trans = btrfs_join_transaction(root, 1); | 5187 | trans = btrfs_join_transaction(root, 1); |
5188 | if (IS_ERR(trans)) | ||
5189 | return ERR_CAST(trans); | ||
5179 | goto again; | 5190 | goto again; |
5180 | } | 5191 | } |
5181 | map = kmap(page); | 5192 | map = kmap(page); |
@@ -5280,8 +5291,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | |||
5280 | btrfs_drop_extent_cache(inode, start, start + len - 1, 0); | 5291 | btrfs_drop_extent_cache(inode, start, start + len - 1, 0); |
5281 | 5292 | ||
5282 | trans = btrfs_join_transaction(root, 0); | 5293 | trans = btrfs_join_transaction(root, 0); |
5283 | if (!trans) | 5294 | if (IS_ERR(trans)) |
5284 | return ERR_PTR(-ENOMEM); | 5295 | return ERR_CAST(trans); |
5285 | 5296 | ||
5286 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 5297 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
5287 | 5298 | ||
@@ -5505,7 +5516,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
5505 | * while we look for nocow cross refs | 5516 | * while we look for nocow cross refs |
5506 | */ | 5517 | */ |
5507 | trans = btrfs_join_transaction(root, 0); | 5518 | trans = btrfs_join_transaction(root, 0); |
5508 | if (!trans) | 5519 | if (IS_ERR(trans)) |
5509 | goto must_cow; | 5520 | goto must_cow; |
5510 | 5521 | ||
5511 | if (can_nocow_odirect(trans, inode, start, len) == 1) { | 5522 | if (can_nocow_odirect(trans, inode, start, len) == 1) { |
@@ -5640,7 +5651,7 @@ again: | |||
5640 | BUG_ON(!ordered); | 5651 | BUG_ON(!ordered); |
5641 | 5652 | ||
5642 | trans = btrfs_join_transaction(root, 1); | 5653 | trans = btrfs_join_transaction(root, 1); |
5643 | if (!trans) { | 5654 | if (IS_ERR(trans)) { |
5644 | err = -ENOMEM; | 5655 | err = -ENOMEM; |
5645 | goto out; | 5656 | goto out; |
5646 | } | 5657 | } |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a506a22b522a..02d224e8c83f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
203 | 203 | ||
204 | 204 | ||
205 | trans = btrfs_join_transaction(root, 1); | 205 | trans = btrfs_join_transaction(root, 1); |
206 | BUG_ON(!trans); | 206 | BUG_ON(IS_ERR(trans)); |
207 | 207 | ||
208 | ret = btrfs_update_inode(trans, root, inode); | 208 | ret = btrfs_update_inode(trans, root, inode); |
209 | BUG_ON(ret); | 209 | BUG_ON(ret); |
@@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
907 | 907 | ||
908 | if (new_size > old_size) { | 908 | if (new_size > old_size) { |
909 | trans = btrfs_start_transaction(root, 0); | 909 | trans = btrfs_start_transaction(root, 0); |
910 | if (IS_ERR(trans)) { | ||
911 | ret = PTR_ERR(trans); | ||
912 | goto out_unlock; | ||
913 | } | ||
910 | ret = btrfs_grow_device(trans, device, new_size); | 914 | ret = btrfs_grow_device(trans, device, new_size); |
911 | btrfs_commit_transaction(trans, root); | 915 | btrfs_commit_transaction(trans, root); |
912 | } else { | 916 | } else { |
@@ -1898,7 +1902,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1898 | 1902 | ||
1899 | memcpy(&new_key, &key, sizeof(new_key)); | 1903 | memcpy(&new_key, &key, sizeof(new_key)); |
1900 | new_key.objectid = inode->i_ino; | 1904 | new_key.objectid = inode->i_ino; |
1901 | new_key.offset = key.offset + destoff - off; | 1905 | if (off <= key.offset) |
1906 | new_key.offset = key.offset + destoff - off; | ||
1907 | else | ||
1908 | new_key.offset = destoff; | ||
1902 | 1909 | ||
1903 | trans = btrfs_start_transaction(root, 1); | 1910 | trans = btrfs_start_transaction(root, 1); |
1904 | if (IS_ERR(trans)) { | 1911 | if (IS_ERR(trans)) { |
@@ -2082,7 +2089,7 @@ static long btrfs_ioctl_trans_start(struct file *file) | |||
2082 | 2089 | ||
2083 | ret = -ENOMEM; | 2090 | ret = -ENOMEM; |
2084 | trans = btrfs_start_ioctl_transaction(root, 0); | 2091 | trans = btrfs_start_ioctl_transaction(root, 0); |
2085 | if (!trans) | 2092 | if (IS_ERR(trans)) |
2086 | goto out_drop; | 2093 | goto out_drop; |
2087 | 2094 | ||
2088 | file->private_data = trans; | 2095 | file->private_data = trans; |
@@ -2138,9 +2145,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) | |||
2138 | path->leave_spinning = 1; | 2145 | path->leave_spinning = 1; |
2139 | 2146 | ||
2140 | trans = btrfs_start_transaction(root, 1); | 2147 | trans = btrfs_start_transaction(root, 1); |
2141 | if (!trans) { | 2148 | if (IS_ERR(trans)) { |
2142 | btrfs_free_path(path); | 2149 | btrfs_free_path(path); |
2143 | return -ENOMEM; | 2150 | return PTR_ERR(trans); |
2144 | } | 2151 | } |
2145 | 2152 | ||
2146 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); | 2153 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); |
@@ -2334,6 +2341,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp | |||
2334 | u64 transid; | 2341 | u64 transid; |
2335 | 2342 | ||
2336 | trans = btrfs_start_transaction(root, 0); | 2343 | trans = btrfs_start_transaction(root, 0); |
2344 | if (IS_ERR(trans)) | ||
2345 | return PTR_ERR(trans); | ||
2337 | transid = trans->transid; | 2346 | transid = trans->transid; |
2338 | btrfs_commit_transaction_async(trans, root, 0); | 2347 | btrfs_commit_transaction_async(trans, root, 0); |
2339 | 2348 | ||
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2b61e1ddcd99..083a55477375 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, | |||
141 | u64 file_offset) | 141 | u64 file_offset) |
142 | { | 142 | { |
143 | struct rb_root *root = &tree->tree; | 143 | struct rb_root *root = &tree->tree; |
144 | struct rb_node *prev; | 144 | struct rb_node *prev = NULL; |
145 | struct rb_node *ret; | 145 | struct rb_node *ret; |
146 | struct btrfs_ordered_extent *entry; | 146 | struct btrfs_ordered_extent *entry; |
147 | 147 | ||
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 0d126be22b63..fb2605d998e9 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c | |||
@@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) | |||
260 | #else | 260 | #else |
261 | BUG(); | 261 | BUG(); |
262 | #endif | 262 | #endif |
263 | break; | ||
263 | case BTRFS_BLOCK_GROUP_ITEM_KEY: | 264 | case BTRFS_BLOCK_GROUP_ITEM_KEY: |
264 | bi = btrfs_item_ptr(l, i, | 265 | bi = btrfs_item_ptr(l, i, |
265 | struct btrfs_block_group_item); | 266 | struct btrfs_block_group_item); |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 045c9c2b2d7e..1f5556acb530 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -2028,6 +2028,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, | |||
2028 | 2028 | ||
2029 | while (1) { | 2029 | while (1) { |
2030 | trans = btrfs_start_transaction(root, 0); | 2030 | trans = btrfs_start_transaction(root, 0); |
2031 | BUG_ON(IS_ERR(trans)); | ||
2031 | trans->block_rsv = rc->block_rsv; | 2032 | trans->block_rsv = rc->block_rsv; |
2032 | 2033 | ||
2033 | ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, | 2034 | ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, |
@@ -2147,6 +2148,12 @@ again: | |||
2147 | } | 2148 | } |
2148 | 2149 | ||
2149 | trans = btrfs_join_transaction(rc->extent_root, 1); | 2150 | trans = btrfs_join_transaction(rc->extent_root, 1); |
2151 | if (IS_ERR(trans)) { | ||
2152 | if (!err) | ||
2153 | btrfs_block_rsv_release(rc->extent_root, | ||
2154 | rc->block_rsv, num_bytes); | ||
2155 | return PTR_ERR(trans); | ||
2156 | } | ||
2150 | 2157 | ||
2151 | if (!err) { | 2158 | if (!err) { |
2152 | if (num_bytes != rc->merging_rsv_size) { | 2159 | if (num_bytes != rc->merging_rsv_size) { |
@@ -3222,6 +3229,7 @@ truncate: | |||
3222 | trans = btrfs_join_transaction(root, 0); | 3229 | trans = btrfs_join_transaction(root, 0); |
3223 | if (IS_ERR(trans)) { | 3230 | if (IS_ERR(trans)) { |
3224 | btrfs_free_path(path); | 3231 | btrfs_free_path(path); |
3232 | ret = PTR_ERR(trans); | ||
3225 | goto out; | 3233 | goto out; |
3226 | } | 3234 | } |
3227 | 3235 | ||
@@ -3628,6 +3636,7 @@ int prepare_to_relocate(struct reloc_control *rc) | |||
3628 | set_reloc_control(rc); | 3636 | set_reloc_control(rc); |
3629 | 3637 | ||
3630 | trans = btrfs_join_transaction(rc->extent_root, 1); | 3638 | trans = btrfs_join_transaction(rc->extent_root, 1); |
3639 | BUG_ON(IS_ERR(trans)); | ||
3631 | btrfs_commit_transaction(trans, rc->extent_root); | 3640 | btrfs_commit_transaction(trans, rc->extent_root); |
3632 | return 0; | 3641 | return 0; |
3633 | } | 3642 | } |
@@ -3657,6 +3666,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3657 | 3666 | ||
3658 | while (1) { | 3667 | while (1) { |
3659 | trans = btrfs_start_transaction(rc->extent_root, 0); | 3668 | trans = btrfs_start_transaction(rc->extent_root, 0); |
3669 | BUG_ON(IS_ERR(trans)); | ||
3660 | 3670 | ||
3661 | if (update_backref_cache(trans, &rc->backref_cache)) { | 3671 | if (update_backref_cache(trans, &rc->backref_cache)) { |
3662 | btrfs_end_transaction(trans, rc->extent_root); | 3672 | btrfs_end_transaction(trans, rc->extent_root); |
@@ -3804,7 +3814,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3804 | 3814 | ||
3805 | /* get rid of pinned extents */ | 3815 | /* get rid of pinned extents */ |
3806 | trans = btrfs_join_transaction(rc->extent_root, 1); | 3816 | trans = btrfs_join_transaction(rc->extent_root, 1); |
3807 | btrfs_commit_transaction(trans, rc->extent_root); | 3817 | if (IS_ERR(trans)) |
3818 | err = PTR_ERR(trans); | ||
3819 | else | ||
3820 | btrfs_commit_transaction(trans, rc->extent_root); | ||
3808 | out_free: | 3821 | out_free: |
3809 | btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); | 3822 | btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); |
3810 | btrfs_free_path(path); | 3823 | btrfs_free_path(path); |
@@ -4022,6 +4035,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) | |||
4022 | int ret; | 4035 | int ret; |
4023 | 4036 | ||
4024 | trans = btrfs_start_transaction(root->fs_info->tree_root, 0); | 4037 | trans = btrfs_start_transaction(root->fs_info->tree_root, 0); |
4038 | BUG_ON(IS_ERR(trans)); | ||
4025 | 4039 | ||
4026 | memset(&root->root_item.drop_progress, 0, | 4040 | memset(&root->root_item.drop_progress, 0, |
4027 | sizeof(root->root_item.drop_progress)); | 4041 | sizeof(root->root_item.drop_progress)); |
@@ -4125,6 +4139,11 @@ int btrfs_recover_relocation(struct btrfs_root *root) | |||
4125 | set_reloc_control(rc); | 4139 | set_reloc_control(rc); |
4126 | 4140 | ||
4127 | trans = btrfs_join_transaction(rc->extent_root, 1); | 4141 | trans = btrfs_join_transaction(rc->extent_root, 1); |
4142 | if (IS_ERR(trans)) { | ||
4143 | unset_reloc_control(rc); | ||
4144 | err = PTR_ERR(trans); | ||
4145 | goto out_free; | ||
4146 | } | ||
4128 | 4147 | ||
4129 | rc->merge_reloc_tree = 1; | 4148 | rc->merge_reloc_tree = 1; |
4130 | 4149 | ||
@@ -4154,9 +4173,13 @@ int btrfs_recover_relocation(struct btrfs_root *root) | |||
4154 | unset_reloc_control(rc); | 4173 | unset_reloc_control(rc); |
4155 | 4174 | ||
4156 | trans = btrfs_join_transaction(rc->extent_root, 1); | 4175 | trans = btrfs_join_transaction(rc->extent_root, 1); |
4157 | btrfs_commit_transaction(trans, rc->extent_root); | 4176 | if (IS_ERR(trans)) |
4158 | out: | 4177 | err = PTR_ERR(trans); |
4178 | else | ||
4179 | btrfs_commit_transaction(trans, rc->extent_root); | ||
4180 | out_free: | ||
4159 | kfree(rc); | 4181 | kfree(rc); |
4182 | out: | ||
4160 | while (!list_empty(&reloc_roots)) { | 4183 | while (!list_empty(&reloc_roots)) { |
4161 | reloc_root = list_entry(reloc_roots.next, | 4184 | reloc_root = list_entry(reloc_roots.next, |
4162 | struct btrfs_root, root_list); | 4185 | struct btrfs_root, root_list); |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index b2130c46fdb5..a004008f7d28 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
383 | struct btrfs_fs_devices **fs_devices) | 383 | struct btrfs_fs_devices **fs_devices) |
384 | { | 384 | { |
385 | substring_t args[MAX_OPT_ARGS]; | 385 | substring_t args[MAX_OPT_ARGS]; |
386 | char *opts, *p; | 386 | char *opts, *orig, *p; |
387 | int error = 0; | 387 | int error = 0; |
388 | int intarg; | 388 | int intarg; |
389 | 389 | ||
@@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
397 | opts = kstrdup(options, GFP_KERNEL); | 397 | opts = kstrdup(options, GFP_KERNEL); |
398 | if (!opts) | 398 | if (!opts) |
399 | return -ENOMEM; | 399 | return -ENOMEM; |
400 | orig = opts; | ||
400 | 401 | ||
401 | while ((p = strsep(&opts, ",")) != NULL) { | 402 | while ((p = strsep(&opts, ",")) != NULL) { |
402 | int token; | 403 | int token; |
@@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
432 | } | 433 | } |
433 | 434 | ||
434 | out_free_opts: | 435 | out_free_opts: |
435 | kfree(opts); | 436 | kfree(orig); |
436 | out: | 437 | out: |
437 | /* | 438 | /* |
438 | * If no subvolume name is specified we use the default one. Allocate | 439 | * If no subvolume name is specified we use the default one. Allocate |
@@ -623,6 +624,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
623 | btrfs_wait_ordered_extents(root, 0, 0); | 624 | btrfs_wait_ordered_extents(root, 0, 0); |
624 | 625 | ||
625 | trans = btrfs_start_transaction(root, 0); | 626 | trans = btrfs_start_transaction(root, 0); |
627 | if (IS_ERR(trans)) | ||
628 | return PTR_ERR(trans); | ||
626 | ret = btrfs_commit_transaction(trans, root); | 629 | ret = btrfs_commit_transaction(trans, root); |
627 | return ret; | 630 | return ret; |
628 | } | 631 | } |
@@ -761,6 +764,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
761 | } | 764 | } |
762 | 765 | ||
763 | btrfs_close_devices(fs_devices); | 766 | btrfs_close_devices(fs_devices); |
767 | kfree(fs_info); | ||
768 | kfree(tree_root); | ||
764 | } else { | 769 | } else { |
765 | char b[BDEVNAME_SIZE]; | 770 | char b[BDEVNAME_SIZE]; |
766 | 771 | ||
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index bae5c7b8bbe2..3d73c8d93bbb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
1161 | INIT_DELAYED_WORK(&ac->work, do_async_commit); | 1161 | INIT_DELAYED_WORK(&ac->work, do_async_commit); |
1162 | ac->root = root; | 1162 | ac->root = root; |
1163 | ac->newtrans = btrfs_join_transaction(root, 0); | 1163 | ac->newtrans = btrfs_join_transaction(root, 0); |
1164 | if (IS_ERR(ac->newtrans)) { | ||
1165 | int err = PTR_ERR(ac->newtrans); | ||
1166 | kfree(ac); | ||
1167 | return err; | ||
1168 | } | ||
1164 | 1169 | ||
1165 | /* take transaction reference */ | 1170 | /* take transaction reference */ |
1166 | mutex_lock(&root->fs_info->trans_mutex); | 1171 | mutex_lock(&root->fs_info->trans_mutex); |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 054744ac5719..a4bbb854dfd2 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, | |||
338 | } | 338 | } |
339 | dst_copy = kmalloc(item_size, GFP_NOFS); | 339 | dst_copy = kmalloc(item_size, GFP_NOFS); |
340 | src_copy = kmalloc(item_size, GFP_NOFS); | 340 | src_copy = kmalloc(item_size, GFP_NOFS); |
341 | if (!dst_copy || !src_copy) { | ||
342 | btrfs_release_path(root, path); | ||
343 | kfree(dst_copy); | ||
344 | kfree(src_copy); | ||
345 | return -ENOMEM; | ||
346 | } | ||
341 | 347 | ||
342 | read_extent_buffer(eb, src_copy, src_ptr, item_size); | 348 | read_extent_buffer(eb, src_copy, src_ptr, item_size); |
343 | 349 | ||
@@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, | |||
665 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | 671 | btrfs_dir_item_key_to_cpu(leaf, di, &location); |
666 | name_len = btrfs_dir_name_len(leaf, di); | 672 | name_len = btrfs_dir_name_len(leaf, di); |
667 | name = kmalloc(name_len, GFP_NOFS); | 673 | name = kmalloc(name_len, GFP_NOFS); |
674 | if (!name) | ||
675 | return -ENOMEM; | ||
676 | |||
668 | read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); | 677 | read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); |
669 | btrfs_release_path(root, path); | 678 | btrfs_release_path(root, path); |
670 | 679 | ||
@@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log, | |||
744 | int match = 0; | 753 | int match = 0; |
745 | 754 | ||
746 | path = btrfs_alloc_path(); | 755 | path = btrfs_alloc_path(); |
756 | if (!path) | ||
757 | return -ENOMEM; | ||
758 | |||
747 | ret = btrfs_search_slot(NULL, log, key, path, 0, 0); | 759 | ret = btrfs_search_slot(NULL, log, key, path, 0, 0); |
748 | if (ret != 0) | 760 | if (ret != 0) |
749 | goto out; | 761 | goto out; |
@@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
967 | key.offset = (u64)-1; | 979 | key.offset = (u64)-1; |
968 | 980 | ||
969 | path = btrfs_alloc_path(); | 981 | path = btrfs_alloc_path(); |
982 | if (!path) | ||
983 | return -ENOMEM; | ||
970 | 984 | ||
971 | while (1) { | 985 | while (1) { |
972 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 986 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
@@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, | |||
1178 | 1192 | ||
1179 | name_len = btrfs_dir_name_len(eb, di); | 1193 | name_len = btrfs_dir_name_len(eb, di); |
1180 | name = kmalloc(name_len, GFP_NOFS); | 1194 | name = kmalloc(name_len, GFP_NOFS); |
1195 | if (!name) | ||
1196 | return -ENOMEM; | ||
1197 | |||
1181 | log_type = btrfs_dir_type(eb, di); | 1198 | log_type = btrfs_dir_type(eb, di); |
1182 | read_extent_buffer(eb, name, (unsigned long)(di + 1), | 1199 | read_extent_buffer(eb, name, (unsigned long)(di + 1), |
1183 | name_len); | 1200 | name_len); |
@@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, | |||
1692 | root_owner = btrfs_header_owner(parent); | 1709 | root_owner = btrfs_header_owner(parent); |
1693 | 1710 | ||
1694 | next = btrfs_find_create_tree_block(root, bytenr, blocksize); | 1711 | next = btrfs_find_create_tree_block(root, bytenr, blocksize); |
1712 | if (!next) | ||
1713 | return -ENOMEM; | ||
1695 | 1714 | ||
1696 | if (*level == 1) { | 1715 | if (*level == 1) { |
1697 | wc->process_func(root, next, wc, ptr_gen); | 1716 | wc->process_func(root, next, wc, ptr_gen); |
@@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2032 | wait_log_commit(trans, log_root_tree, | 2051 | wait_log_commit(trans, log_root_tree, |
2033 | log_root_tree->log_transid); | 2052 | log_root_tree->log_transid); |
2034 | mutex_unlock(&log_root_tree->log_mutex); | 2053 | mutex_unlock(&log_root_tree->log_mutex); |
2054 | ret = 0; | ||
2035 | goto out; | 2055 | goto out; |
2036 | } | 2056 | } |
2037 | atomic_set(&log_root_tree->log_commit[index2], 1); | 2057 | atomic_set(&log_root_tree->log_commit[index2], 1); |
@@ -2096,7 +2116,7 @@ out: | |||
2096 | smp_mb(); | 2116 | smp_mb(); |
2097 | if (waitqueue_active(&root->log_commit_wait[index1])) | 2117 | if (waitqueue_active(&root->log_commit_wait[index1])) |
2098 | wake_up(&root->log_commit_wait[index1]); | 2118 | wake_up(&root->log_commit_wait[index1]); |
2099 | return 0; | 2119 | return ret; |
2100 | } | 2120 | } |
2101 | 2121 | ||
2102 | static void free_log_tree(struct btrfs_trans_handle *trans, | 2122 | static void free_log_tree(struct btrfs_trans_handle *trans, |
@@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | |||
2194 | 2214 | ||
2195 | log = root->log_root; | 2215 | log = root->log_root; |
2196 | path = btrfs_alloc_path(); | 2216 | path = btrfs_alloc_path(); |
2217 | if (!path) | ||
2218 | return -ENOMEM; | ||
2219 | |||
2197 | di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, | 2220 | di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, |
2198 | name, name_len, -1); | 2221 | name, name_len, -1); |
2199 | if (IS_ERR(di)) { | 2222 | if (IS_ERR(di)) { |
@@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
2594 | 2617 | ||
2595 | ins_data = kmalloc(nr * sizeof(struct btrfs_key) + | 2618 | ins_data = kmalloc(nr * sizeof(struct btrfs_key) + |
2596 | nr * sizeof(u32), GFP_NOFS); | 2619 | nr * sizeof(u32), GFP_NOFS); |
2620 | if (!ins_data) | ||
2621 | return -ENOMEM; | ||
2622 | |||
2597 | ins_sizes = (u32 *)ins_data; | 2623 | ins_sizes = (u32 *)ins_data; |
2598 | ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); | 2624 | ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); |
2599 | 2625 | ||
@@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
2725 | log = root->log_root; | 2751 | log = root->log_root; |
2726 | 2752 | ||
2727 | path = btrfs_alloc_path(); | 2753 | path = btrfs_alloc_path(); |
2754 | if (!path) | ||
2755 | return -ENOMEM; | ||
2728 | dst_path = btrfs_alloc_path(); | 2756 | dst_path = btrfs_alloc_path(); |
2757 | if (!dst_path) { | ||
2758 | btrfs_free_path(path); | ||
2759 | return -ENOMEM; | ||
2760 | } | ||
2729 | 2761 | ||
2730 | min_key.objectid = inode->i_ino; | 2762 | min_key.objectid = inode->i_ino; |
2731 | min_key.type = BTRFS_INODE_ITEM_KEY; | 2763 | min_key.type = BTRFS_INODE_ITEM_KEY; |
@@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) | |||
3080 | BUG_ON(!path); | 3112 | BUG_ON(!path); |
3081 | 3113 | ||
3082 | trans = btrfs_start_transaction(fs_info->tree_root, 0); | 3114 | trans = btrfs_start_transaction(fs_info->tree_root, 0); |
3115 | BUG_ON(IS_ERR(trans)); | ||
3083 | 3116 | ||
3084 | wc.trans = trans; | 3117 | wc.trans = trans; |
3085 | wc.pin = 1; | 3118 | wc.pin = 1; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d158530233b7..2636a051e4b2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, | |||
1213 | return -ENOMEM; | 1213 | return -ENOMEM; |
1214 | 1214 | ||
1215 | trans = btrfs_start_transaction(root, 0); | 1215 | trans = btrfs_start_transaction(root, 0); |
1216 | if (IS_ERR(trans)) { | ||
1217 | btrfs_free_path(path); | ||
1218 | return PTR_ERR(trans); | ||
1219 | } | ||
1216 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; | 1220 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; |
1217 | key.type = BTRFS_DEV_ITEM_KEY; | 1221 | key.type = BTRFS_DEV_ITEM_KEY; |
1218 | key.offset = device->devid; | 1222 | key.offset = device->devid; |
@@ -1606,6 +1610,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1606 | } | 1610 | } |
1607 | 1611 | ||
1608 | trans = btrfs_start_transaction(root, 0); | 1612 | trans = btrfs_start_transaction(root, 0); |
1613 | if (IS_ERR(trans)) { | ||
1614 | kfree(device); | ||
1615 | ret = PTR_ERR(trans); | ||
1616 | goto error; | ||
1617 | } | ||
1618 | |||
1609 | lock_chunks(root); | 1619 | lock_chunks(root); |
1610 | 1620 | ||
1611 | device->writeable = 1; | 1621 | device->writeable = 1; |
@@ -1873,7 +1883,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, | |||
1873 | return ret; | 1883 | return ret; |
1874 | 1884 | ||
1875 | trans = btrfs_start_transaction(root, 0); | 1885 | trans = btrfs_start_transaction(root, 0); |
1876 | BUG_ON(!trans); | 1886 | BUG_ON(IS_ERR(trans)); |
1877 | 1887 | ||
1878 | lock_chunks(root); | 1888 | lock_chunks(root); |
1879 | 1889 | ||
@@ -2047,7 +2057,7 @@ int btrfs_balance(struct btrfs_root *dev_root) | |||
2047 | BUG_ON(ret); | 2057 | BUG_ON(ret); |
2048 | 2058 | ||
2049 | trans = btrfs_start_transaction(dev_root, 0); | 2059 | trans = btrfs_start_transaction(dev_root, 0); |
2050 | BUG_ON(!trans); | 2060 | BUG_ON(IS_ERR(trans)); |
2051 | 2061 | ||
2052 | ret = btrfs_grow_device(trans, device, old_size); | 2062 | ret = btrfs_grow_device(trans, device, old_size); |
2053 | BUG_ON(ret); | 2063 | BUG_ON(ret); |
@@ -2213,6 +2223,11 @@ again: | |||
2213 | 2223 | ||
2214 | /* Shrinking succeeded, else we would be at "done". */ | 2224 | /* Shrinking succeeded, else we would be at "done". */ |
2215 | trans = btrfs_start_transaction(root, 0); | 2225 | trans = btrfs_start_transaction(root, 0); |
2226 | if (IS_ERR(trans)) { | ||
2227 | ret = PTR_ERR(trans); | ||
2228 | goto done; | ||
2229 | } | ||
2230 | |||
2216 | lock_chunks(root); | 2231 | lock_chunks(root); |
2217 | 2232 | ||
2218 | device->disk_total_bytes = new_size; | 2233 | device->disk_total_bytes = new_size; |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index ee45648b0d1a..7cb0f7f847e4 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -3,6 +3,7 @@ config CIFS | |||
3 | depends on INET | 3 | depends on INET |
4 | select NLS | 4 | select NLS |
5 | select CRYPTO | 5 | select CRYPTO |
6 | select CRYPTO_MD4 | ||
6 | select CRYPTO_MD5 | 7 | select CRYPTO_MD5 |
7 | select CRYPTO_HMAC | 8 | select CRYPTO_HMAC |
8 | select CRYPTO_ARC4 | 9 | select CRYPTO_ARC4 |
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index f1c68629f277..0a265ad9e426 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c | |||
@@ -282,8 +282,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) | |||
282 | cFYI(1, "in %s", __func__); | 282 | cFYI(1, "in %s", __func__); |
283 | BUG_ON(IS_ROOT(mntpt)); | 283 | BUG_ON(IS_ROOT(mntpt)); |
284 | 284 | ||
285 | xid = GetXid(); | ||
286 | |||
287 | /* | 285 | /* |
288 | * The MSDFS spec states that paths in DFS referral requests and | 286 | * The MSDFS spec states that paths in DFS referral requests and |
289 | * responses must be prefixed by a single '\' character instead of | 287 | * responses must be prefixed by a single '\' character instead of |
@@ -293,7 +291,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) | |||
293 | mnt = ERR_PTR(-ENOMEM); | 291 | mnt = ERR_PTR(-ENOMEM); |
294 | full_path = build_path_from_dentry(mntpt); | 292 | full_path = build_path_from_dentry(mntpt); |
295 | if (full_path == NULL) | 293 | if (full_path == NULL) |
296 | goto free_xid; | 294 | goto cdda_exit; |
297 | 295 | ||
298 | cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); | 296 | cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); |
299 | tlink = cifs_sb_tlink(cifs_sb); | 297 | tlink = cifs_sb_tlink(cifs_sb); |
@@ -303,9 +301,11 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) | |||
303 | } | 301 | } |
304 | ses = tlink_tcon(tlink)->ses; | 302 | ses = tlink_tcon(tlink)->ses; |
305 | 303 | ||
304 | xid = GetXid(); | ||
306 | rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, | 305 | rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, |
307 | &num_referrals, &referrals, | 306 | &num_referrals, &referrals, |
308 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 307 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
308 | FreeXid(xid); | ||
309 | 309 | ||
310 | cifs_put_tlink(tlink); | 310 | cifs_put_tlink(tlink); |
311 | 311 | ||
@@ -338,8 +338,7 @@ success: | |||
338 | free_dfs_info_array(referrals, num_referrals); | 338 | free_dfs_info_array(referrals, num_referrals); |
339 | free_full_path: | 339 | free_full_path: |
340 | kfree(full_path); | 340 | kfree(full_path); |
341 | free_xid: | 341 | cdda_exit: |
342 | FreeXid(xid); | ||
343 | cFYI(1, "leaving %s" , __func__); | 342 | cFYI(1, "leaving %s" , __func__); |
344 | return mnt; | 343 | return mnt; |
345 | } | 344 | } |
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 1e7636b145a8..beeebf194234 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -372,6 +372,10 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, | |||
372 | 372 | ||
373 | ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), | 373 | ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), |
374 | GFP_KERNEL); | 374 | GFP_KERNEL); |
375 | if (!ppace) { | ||
376 | cERROR(1, "DACL memory allocation error"); | ||
377 | return; | ||
378 | } | ||
375 | 379 | ||
376 | for (i = 0; i < num_aces; ++i) { | 380 | for (i = 0; i < num_aces; ++i) { |
377 | ppace[i] = (struct cifs_ace *) (acl_base + acl_size); | 381 | ppace[i] = (struct cifs_ace *) (acl_base + acl_size); |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 0db5f1de0227..a51585f9852b 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -657,9 +657,10 @@ calc_seckey(struct cifsSesInfo *ses) | |||
657 | get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); | 657 | get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); |
658 | 658 | ||
659 | tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); | 659 | tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); |
660 | if (!tfm_arc4 || IS_ERR(tfm_arc4)) { | 660 | if (IS_ERR(tfm_arc4)) { |
661 | rc = PTR_ERR(tfm_arc4); | ||
661 | cERROR(1, "could not allocate crypto API arc4\n"); | 662 | cERROR(1, "could not allocate crypto API arc4\n"); |
662 | return PTR_ERR(tfm_arc4); | 663 | return rc; |
663 | } | 664 | } |
664 | 665 | ||
665 | desc.tfm = tfm_arc4; | 666 | desc.tfm = tfm_arc4; |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 14789a97304e..4a3330235d55 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
127 | extern const struct export_operations cifs_export_ops; | 127 | extern const struct export_operations cifs_export_ops; |
128 | #endif /* EXPERIMENTAL */ | 128 | #endif /* EXPERIMENTAL */ |
129 | 129 | ||
130 | #define CIFS_VERSION "1.69" | 130 | #define CIFS_VERSION "1.70" |
131 | #endif /* _CIFSFS_H */ | 131 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index edd5b29b53c9..17afb0fbcaed 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -188,6 +188,8 @@ struct TCP_Server_Info { | |||
188 | /* multiplexed reads or writes */ | 188 | /* multiplexed reads or writes */ |
189 | unsigned int maxBuf; /* maxBuf specifies the maximum */ | 189 | unsigned int maxBuf; /* maxBuf specifies the maximum */ |
190 | /* message size the server can send or receive for non-raw SMBs */ | 190 | /* message size the server can send or receive for non-raw SMBs */ |
191 | /* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */ | ||
192 | /* when socket is setup (and during reconnect) before NegProt sent */ | ||
191 | unsigned int max_rw; /* maxRw specifies the maximum */ | 193 | unsigned int max_rw; /* maxRw specifies the maximum */ |
192 | /* message size the server can send or receive for */ | 194 | /* message size the server can send or receive for */ |
193 | /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ | 195 | /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ |
@@ -652,7 +654,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, | |||
652 | #define MID_REQUEST_SUBMITTED 2 | 654 | #define MID_REQUEST_SUBMITTED 2 |
653 | #define MID_RESPONSE_RECEIVED 4 | 655 | #define MID_RESPONSE_RECEIVED 4 |
654 | #define MID_RETRY_NEEDED 8 /* session closed while this request out */ | 656 | #define MID_RETRY_NEEDED 8 /* session closed while this request out */ |
655 | #define MID_NO_RESP_NEEDED 0x10 | 657 | #define MID_RESPONSE_MALFORMED 0x10 |
656 | 658 | ||
657 | /* Types of response buffer returned from SendReceive2 */ | 659 | /* Types of response buffer returned from SendReceive2 */ |
658 | #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ | 660 | #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 3106f5e5c633..904aa47e3515 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -136,9 +136,6 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
136 | } | 136 | } |
137 | } | 137 | } |
138 | 138 | ||
139 | if (ses->status == CifsExiting) | ||
140 | return -EIO; | ||
141 | |||
142 | /* | 139 | /* |
143 | * Give demultiplex thread up to 10 seconds to reconnect, should be | 140 | * Give demultiplex thread up to 10 seconds to reconnect, should be |
144 | * greater than cifs socket timeout which is 7 seconds | 141 | * greater than cifs socket timeout which is 7 seconds |
@@ -156,7 +153,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
156 | * retrying until process is killed or server comes | 153 | * retrying until process is killed or server comes |
157 | * back on-line | 154 | * back on-line |
158 | */ | 155 | */ |
159 | if (!tcon->retry || ses->status == CifsExiting) { | 156 | if (!tcon->retry) { |
160 | cFYI(1, "gave up waiting on reconnect in smb_init"); | 157 | cFYI(1, "gave up waiting on reconnect in smb_init"); |
161 | return -EHOSTDOWN; | 158 | return -EHOSTDOWN; |
162 | } | 159 | } |
@@ -4914,7 +4911,6 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, | |||
4914 | __u16 fid, __u32 pid_of_opener, bool SetAllocation) | 4911 | __u16 fid, __u32 pid_of_opener, bool SetAllocation) |
4915 | { | 4912 | { |
4916 | struct smb_com_transaction2_sfi_req *pSMB = NULL; | 4913 | struct smb_com_transaction2_sfi_req *pSMB = NULL; |
4917 | char *data_offset; | ||
4918 | struct file_end_of_file_info *parm_data; | 4914 | struct file_end_of_file_info *parm_data; |
4919 | int rc = 0; | 4915 | int rc = 0; |
4920 | __u16 params, param_offset, offset, byte_count, count; | 4916 | __u16 params, param_offset, offset, byte_count, count; |
@@ -4938,8 +4934,6 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, | |||
4938 | param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; | 4934 | param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; |
4939 | offset = param_offset + params; | 4935 | offset = param_offset + params; |
4940 | 4936 | ||
4941 | data_offset = (char *) (&pSMB->hdr.Protocol) + offset; | ||
4942 | |||
4943 | count = sizeof(struct file_end_of_file_info); | 4937 | count = sizeof(struct file_end_of_file_info); |
4944 | pSMB->MaxParameterCount = cpu_to_le16(2); | 4938 | pSMB->MaxParameterCount = cpu_to_le16(2); |
4945 | /* BB find exact max SMB PDU from sess structure BB */ | 4939 | /* BB find exact max SMB PDU from sess structure BB */ |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 47d8ff623683..8d6c17ab593d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -337,8 +337,13 @@ cifs_echo_request(struct work_struct *work) | |||
337 | struct TCP_Server_Info *server = container_of(work, | 337 | struct TCP_Server_Info *server = container_of(work, |
338 | struct TCP_Server_Info, echo.work); | 338 | struct TCP_Server_Info, echo.work); |
339 | 339 | ||
340 | /* no need to ping if we got a response recently */ | 340 | /* |
341 | if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) | 341 | * We cannot send an echo until the NEGOTIATE_PROTOCOL request is |
342 | * done, which is indicated by maxBuf != 0. Also, no need to ping if | ||
343 | * we got a response recently | ||
344 | */ | ||
345 | if (server->maxBuf == 0 || | ||
346 | time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) | ||
342 | goto requeue_echo; | 347 | goto requeue_echo; |
343 | 348 | ||
344 | rc = CIFSSMBEcho(server); | 349 | rc = CIFSSMBEcho(server); |
@@ -578,14 +583,23 @@ incomplete_rcv: | |||
578 | else if (reconnect == 1) | 583 | else if (reconnect == 1) |
579 | continue; | 584 | continue; |
580 | 585 | ||
581 | length += 4; /* account for rfc1002 hdr */ | 586 | total_read += 4; /* account for rfc1002 hdr */ |
582 | 587 | ||
588 | dump_smb(smb_buffer, total_read); | ||
583 | 589 | ||
584 | dump_smb(smb_buffer, length); | 590 | /* |
585 | if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) { | 591 | * We know that we received enough to get to the MID as we |
586 | cifs_dump_mem("Bad SMB: ", smb_buffer, 48); | 592 | * checked the pdu_length earlier. Now check to see |
587 | continue; | 593 | * if the rest of the header is OK. We borrow the length |
588 | } | 594 | * var for the rest of the loop to avoid a new stack var. |
595 | * | ||
596 | * 48 bytes is enough to display the header and a little bit | ||
597 | * into the payload for debugging purposes. | ||
598 | */ | ||
599 | length = checkSMB(smb_buffer, smb_buffer->Mid, total_read); | ||
600 | if (length != 0) | ||
601 | cifs_dump_mem("Bad SMB: ", smb_buffer, | ||
602 | min_t(unsigned int, total_read, 48)); | ||
589 | 603 | ||
590 | mid_entry = NULL; | 604 | mid_entry = NULL; |
591 | server->lstrp = jiffies; | 605 | server->lstrp = jiffies; |
@@ -597,7 +611,8 @@ incomplete_rcv: | |||
597 | if ((mid_entry->mid == smb_buffer->Mid) && | 611 | if ((mid_entry->mid == smb_buffer->Mid) && |
598 | (mid_entry->midState == MID_REQUEST_SUBMITTED) && | 612 | (mid_entry->midState == MID_REQUEST_SUBMITTED) && |
599 | (mid_entry->command == smb_buffer->Command)) { | 613 | (mid_entry->command == smb_buffer->Command)) { |
600 | if (check2ndT2(smb_buffer,server->maxBuf) > 0) { | 614 | if (length == 0 && |
615 | check2ndT2(smb_buffer, server->maxBuf) > 0) { | ||
601 | /* We have a multipart transact2 resp */ | 616 | /* We have a multipart transact2 resp */ |
602 | isMultiRsp = true; | 617 | isMultiRsp = true; |
603 | if (mid_entry->resp_buf) { | 618 | if (mid_entry->resp_buf) { |
@@ -632,12 +647,17 @@ incomplete_rcv: | |||
632 | mid_entry->resp_buf = smb_buffer; | 647 | mid_entry->resp_buf = smb_buffer; |
633 | mid_entry->largeBuf = isLargeBuf; | 648 | mid_entry->largeBuf = isLargeBuf; |
634 | multi_t2_fnd: | 649 | multi_t2_fnd: |
635 | mid_entry->midState = MID_RESPONSE_RECEIVED; | 650 | if (length == 0) |
636 | list_del_init(&mid_entry->qhead); | 651 | mid_entry->midState = |
637 | mid_entry->callback(mid_entry); | 652 | MID_RESPONSE_RECEIVED; |
653 | else | ||
654 | mid_entry->midState = | ||
655 | MID_RESPONSE_MALFORMED; | ||
638 | #ifdef CONFIG_CIFS_STATS2 | 656 | #ifdef CONFIG_CIFS_STATS2 |
639 | mid_entry->when_received = jiffies; | 657 | mid_entry->when_received = jiffies; |
640 | #endif | 658 | #endif |
659 | list_del_init(&mid_entry->qhead); | ||
660 | mid_entry->callback(mid_entry); | ||
641 | break; | 661 | break; |
642 | } | 662 | } |
643 | mid_entry = NULL; | 663 | mid_entry = NULL; |
@@ -653,6 +673,9 @@ multi_t2_fnd: | |||
653 | else | 673 | else |
654 | smallbuf = NULL; | 674 | smallbuf = NULL; |
655 | } | 675 | } |
676 | } else if (length != 0) { | ||
677 | /* response sanity checks failed */ | ||
678 | continue; | ||
656 | } else if (!is_valid_oplock_break(smb_buffer, server) && | 679 | } else if (!is_valid_oplock_break(smb_buffer, server) && |
657 | !isMultiRsp) { | 680 | !isMultiRsp) { |
658 | cERROR(1, "No task to wake, unknown frame received! " | 681 | cERROR(1, "No task to wake, unknown frame received! " |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0de17c1db608..e964b1cd5dd0 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -346,7 +346,6 @@ int cifs_open(struct inode *inode, struct file *file) | |||
346 | struct cifsTconInfo *tcon; | 346 | struct cifsTconInfo *tcon; |
347 | struct tcon_link *tlink; | 347 | struct tcon_link *tlink; |
348 | struct cifsFileInfo *pCifsFile = NULL; | 348 | struct cifsFileInfo *pCifsFile = NULL; |
349 | struct cifsInodeInfo *pCifsInode; | ||
350 | char *full_path = NULL; | 349 | char *full_path = NULL; |
351 | bool posix_open_ok = false; | 350 | bool posix_open_ok = false; |
352 | __u16 netfid; | 351 | __u16 netfid; |
@@ -361,8 +360,6 @@ int cifs_open(struct inode *inode, struct file *file) | |||
361 | } | 360 | } |
362 | tcon = tlink_tcon(tlink); | 361 | tcon = tlink_tcon(tlink); |
363 | 362 | ||
364 | pCifsInode = CIFS_I(file->f_path.dentry->d_inode); | ||
365 | |||
366 | full_path = build_path_from_dentry(file->f_path.dentry); | 363 | full_path = build_path_from_dentry(file->f_path.dentry); |
367 | if (full_path == NULL) { | 364 | if (full_path == NULL) { |
368 | rc = -ENOMEM; | 365 | rc = -ENOMEM; |
@@ -1146,7 +1143,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) | |||
1146 | char *write_data; | 1143 | char *write_data; |
1147 | int rc = -EFAULT; | 1144 | int rc = -EFAULT; |
1148 | int bytes_written = 0; | 1145 | int bytes_written = 0; |
1149 | struct cifs_sb_info *cifs_sb; | ||
1150 | struct inode *inode; | 1146 | struct inode *inode; |
1151 | struct cifsFileInfo *open_file; | 1147 | struct cifsFileInfo *open_file; |
1152 | 1148 | ||
@@ -1154,7 +1150,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) | |||
1154 | return -EFAULT; | 1150 | return -EFAULT; |
1155 | 1151 | ||
1156 | inode = page->mapping->host; | 1152 | inode = page->mapping->host; |
1157 | cifs_sb = CIFS_SB(inode->i_sb); | ||
1158 | 1153 | ||
1159 | offset += (loff_t)from; | 1154 | offset += (loff_t)from; |
1160 | write_data = kmap(page); | 1155 | write_data = kmap(page); |
@@ -1667,9 +1662,10 @@ static ssize_t | |||
1667 | cifs_iovec_write(struct file *file, const struct iovec *iov, | 1662 | cifs_iovec_write(struct file *file, const struct iovec *iov, |
1668 | unsigned long nr_segs, loff_t *poffset) | 1663 | unsigned long nr_segs, loff_t *poffset) |
1669 | { | 1664 | { |
1670 | size_t total_written = 0, written = 0; | 1665 | unsigned int written; |
1671 | unsigned long num_pages, npages; | 1666 | unsigned long num_pages, npages, i; |
1672 | size_t copied, len, cur_len, i; | 1667 | size_t copied, len, cur_len; |
1668 | ssize_t total_written = 0; | ||
1673 | struct kvec *to_send; | 1669 | struct kvec *to_send; |
1674 | struct page **pages; | 1670 | struct page **pages; |
1675 | struct iov_iter it; | 1671 | struct iov_iter it; |
@@ -1825,7 +1821,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, | |||
1825 | { | 1821 | { |
1826 | int rc; | 1822 | int rc; |
1827 | int xid; | 1823 | int xid; |
1828 | unsigned int total_read, bytes_read = 0; | 1824 | ssize_t total_read; |
1825 | unsigned int bytes_read = 0; | ||
1829 | size_t len, cur_len; | 1826 | size_t len, cur_len; |
1830 | int iov_offset = 0; | 1827 | int iov_offset = 0; |
1831 | struct cifs_sb_info *cifs_sb; | 1828 | struct cifs_sb_info *cifs_sb; |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 02cd60aefbff..e8804d373404 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -55,8 +55,9 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) | |||
55 | 55 | ||
56 | md5 = crypto_alloc_shash("md5", 0, 0); | 56 | md5 = crypto_alloc_shash("md5", 0, 0); |
57 | if (IS_ERR(md5)) { | 57 | if (IS_ERR(md5)) { |
58 | rc = PTR_ERR(md5); | ||
58 | cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); | 59 | cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); |
59 | return PTR_ERR(md5); | 60 | return rc; |
60 | } | 61 | } |
61 | size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); | 62 | size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); |
62 | sdescmd5 = kmalloc(size, GFP_KERNEL); | 63 | sdescmd5 = kmalloc(size, GFP_KERNEL); |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index a09e077ba925..2a930a752a78 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -236,10 +236,7 @@ __u16 GetNextMid(struct TCP_Server_Info *server) | |||
236 | { | 236 | { |
237 | __u16 mid = 0; | 237 | __u16 mid = 0; |
238 | __u16 last_mid; | 238 | __u16 last_mid; |
239 | int collision; | 239 | bool collision; |
240 | |||
241 | if (server == NULL) | ||
242 | return mid; | ||
243 | 240 | ||
244 | spin_lock(&GlobalMid_Lock); | 241 | spin_lock(&GlobalMid_Lock); |
245 | last_mid = server->CurrentMid; /* we do not want to loop forever */ | 242 | last_mid = server->CurrentMid; /* we do not want to loop forever */ |
@@ -252,24 +249,38 @@ __u16 GetNextMid(struct TCP_Server_Info *server) | |||
252 | (and it would also have to have been a request that | 249 | (and it would also have to have been a request that |
253 | did not time out) */ | 250 | did not time out) */ |
254 | while (server->CurrentMid != last_mid) { | 251 | while (server->CurrentMid != last_mid) { |
255 | struct list_head *tmp; | ||
256 | struct mid_q_entry *mid_entry; | 252 | struct mid_q_entry *mid_entry; |
253 | unsigned int num_mids; | ||
257 | 254 | ||
258 | collision = 0; | 255 | collision = false; |
259 | if (server->CurrentMid == 0) | 256 | if (server->CurrentMid == 0) |
260 | server->CurrentMid++; | 257 | server->CurrentMid++; |
261 | 258 | ||
262 | list_for_each(tmp, &server->pending_mid_q) { | 259 | num_mids = 0; |
263 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 260 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { |
264 | 261 | ++num_mids; | |
265 | if ((mid_entry->mid == server->CurrentMid) && | 262 | if (mid_entry->mid == server->CurrentMid && |
266 | (mid_entry->midState == MID_REQUEST_SUBMITTED)) { | 263 | mid_entry->midState == MID_REQUEST_SUBMITTED) { |
267 | /* This mid is in use, try a different one */ | 264 | /* This mid is in use, try a different one */ |
268 | collision = 1; | 265 | collision = true; |
269 | break; | 266 | break; |
270 | } | 267 | } |
271 | } | 268 | } |
272 | if (collision == 0) { | 269 | |
270 | /* | ||
271 | * if we have more than 32k mids in the list, then something | ||
272 | * is very wrong. Possibly a local user is trying to DoS the | ||
273 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
274 | * we get to 2^16 mids then we're in big trouble as this | ||
275 | * function could loop forever. | ||
276 | * | ||
277 | * Go ahead and assign out the mid in this situation, but force | ||
278 | * an eventual reconnect to clean out the pending_mid_q. | ||
279 | */ | ||
280 | if (num_mids > 32768) | ||
281 | server->tcpStatus = CifsNeedReconnect; | ||
282 | |||
283 | if (!collision) { | ||
273 | mid = server->CurrentMid; | 284 | mid = server->CurrentMid; |
274 | break; | 285 | break; |
275 | } | 286 | } |
@@ -381,29 +392,31 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
381 | } | 392 | } |
382 | 393 | ||
383 | static int | 394 | static int |
384 | checkSMBhdr(struct smb_hdr *smb, __u16 mid) | 395 | check_smb_hdr(struct smb_hdr *smb, __u16 mid) |
385 | { | 396 | { |
386 | /* Make sure that this really is an SMB, that it is a response, | 397 | /* does it have the right SMB "signature" ? */ |
387 | and that the message ids match */ | 398 | if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) { |
388 | if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) && | 399 | cERROR(1, "Bad protocol string signature header 0x%x", |
389 | (mid == smb->Mid)) { | 400 | *(unsigned int *)smb->Protocol); |
390 | if (smb->Flags & SMBFLG_RESPONSE) | 401 | return 1; |
391 | return 0; | 402 | } |
392 | else { | 403 | |
393 | /* only one valid case where server sends us request */ | 404 | /* Make sure that message ids match */ |
394 | if (smb->Command == SMB_COM_LOCKING_ANDX) | 405 | if (mid != smb->Mid) { |
395 | return 0; | 406 | cERROR(1, "Mids do not match. received=%u expected=%u", |
396 | else | 407 | smb->Mid, mid); |
397 | cERROR(1, "Received Request not response"); | 408 | return 1; |
398 | } | ||
399 | } else { /* bad signature or mid */ | ||
400 | if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) | ||
401 | cERROR(1, "Bad protocol string signature header %x", | ||
402 | *(unsigned int *) smb->Protocol); | ||
403 | if (mid != smb->Mid) | ||
404 | cERROR(1, "Mids do not match"); | ||
405 | } | 409 | } |
406 | cERROR(1, "bad smb detected. The Mid=%d", smb->Mid); | 410 | |
411 | /* if it's a response then accept */ | ||
412 | if (smb->Flags & SMBFLG_RESPONSE) | ||
413 | return 0; | ||
414 | |||
415 | /* only one valid case where server sends us request */ | ||
416 | if (smb->Command == SMB_COM_LOCKING_ANDX) | ||
417 | return 0; | ||
418 | |||
419 | cERROR(1, "Server sent request, not response. mid=%u", smb->Mid); | ||
407 | return 1; | 420 | return 1; |
408 | } | 421 | } |
409 | 422 | ||
@@ -448,7 +461,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) | |||
448 | return 1; | 461 | return 1; |
449 | } | 462 | } |
450 | 463 | ||
451 | if (checkSMBhdr(smb, mid)) | 464 | if (check_smb_hdr(smb, mid)) |
452 | return 1; | 465 | return 1; |
453 | clc_len = smbCalcSize_LE(smb); | 466 | clc_len = smbCalcSize_LE(smb); |
454 | 467 | ||
@@ -465,25 +478,26 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) | |||
465 | if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF)) | 478 | if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF)) |
466 | return 0; /* bcc wrapped */ | 479 | return 0; /* bcc wrapped */ |
467 | } | 480 | } |
468 | cFYI(1, "Calculated size %d vs length %d mismatch for mid %d", | 481 | cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u", |
469 | clc_len, 4 + len, smb->Mid); | 482 | clc_len, 4 + len, smb->Mid); |
470 | /* Windows XP can return a few bytes too much, presumably | 483 | |
471 | an illegal pad, at the end of byte range lock responses | 484 | if (4 + len < clc_len) { |
472 | so we allow for that three byte pad, as long as actual | 485 | cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u", |
473 | received length is as long or longer than calculated length */ | ||
474 | /* We have now had to extend this more, since there is a | ||
475 | case in which it needs to be bigger still to handle a | ||
476 | malformed response to transact2 findfirst from WinXP when | ||
477 | access denied is returned and thus bcc and wct are zero | ||
478 | but server says length is 0x21 bytes too long as if the server | ||
479 | forget to reset the smb rfc1001 length when it reset the | ||
480 | wct and bcc to minimum size and drop the t2 parms and data */ | ||
481 | if ((4+len > clc_len) && (len <= clc_len + 512)) | ||
482 | return 0; | ||
483 | else { | ||
484 | cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d", | ||
485 | len, smb->Mid); | 486 | len, smb->Mid); |
486 | return 1; | 487 | return 1; |
488 | } else if (len > clc_len + 512) { | ||
489 | /* | ||
490 | * Some servers (Windows XP in particular) send more | ||
491 | * data than the lengths in the SMB packet would | ||
492 | * indicate on certain calls (byte range locks and | ||
493 | * trans2 find first calls in particular). While the | ||
494 | * client can handle such a frame by ignoring the | ||
495 | * trailing data, we choose limit the amount of extra | ||
496 | * data to 512 bytes. | ||
497 | */ | ||
498 | cERROR(1, "RFC1001 size %u more than 512 bytes larger " | ||
499 | "than SMB for mid=%u", len, smb->Mid); | ||
500 | return 1; | ||
487 | } | 501 | } |
488 | } | 502 | } |
489 | return 0; | 503 | return 0; |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 7f25cc3d2256..f8e4cd2a7912 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -764,7 +764,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
764 | { | 764 | { |
765 | int rc = 0; | 765 | int rc = 0; |
766 | int xid, i; | 766 | int xid, i; |
767 | struct cifs_sb_info *cifs_sb; | ||
768 | struct cifsTconInfo *pTcon; | 767 | struct cifsTconInfo *pTcon; |
769 | struct cifsFileInfo *cifsFile = NULL; | 768 | struct cifsFileInfo *cifsFile = NULL; |
770 | char *current_entry; | 769 | char *current_entry; |
@@ -775,8 +774,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
775 | 774 | ||
776 | xid = GetXid(); | 775 | xid = GetXid(); |
777 | 776 | ||
778 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | ||
779 | |||
780 | /* | 777 | /* |
781 | * Ensure FindFirst doesn't fail before doing filldir() for '.' and | 778 | * Ensure FindFirst doesn't fail before doing filldir() for '.' and |
782 | * '..'. Otherwise we won't be able to notify VFS in case of failure. | 779 | * '..'. Otherwise we won't be able to notify VFS in case of failure. |
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index b5450e9f40c0..b5041c849981 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c | |||
@@ -58,8 +58,9 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) | |||
58 | 58 | ||
59 | md4 = crypto_alloc_shash("md4", 0, 0); | 59 | md4 = crypto_alloc_shash("md4", 0, 0); |
60 | if (IS_ERR(md4)) { | 60 | if (IS_ERR(md4)) { |
61 | rc = PTR_ERR(md4); | ||
61 | cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc); | 62 | cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc); |
62 | return PTR_ERR(md4); | 63 | return rc; |
63 | } | 64 | } |
64 | size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); | 65 | size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); |
65 | sdescmd4 = kmalloc(size, GFP_KERNEL); | 66 | sdescmd4 = kmalloc(size, GFP_KERNEL); |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index c1ccca1a933f..46d8756f2b24 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -236,9 +236,9 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
236 | server->tcpStatus = CifsNeedReconnect; | 236 | server->tcpStatus = CifsNeedReconnect; |
237 | } | 237 | } |
238 | 238 | ||
239 | if (rc < 0) { | 239 | if (rc < 0 && rc != -EINTR) |
240 | cERROR(1, "Error %d sending data on socket to server", rc); | 240 | cERROR(1, "Error %d sending data on socket to server", rc); |
241 | } else | 241 | else |
242 | rc = 0; | 242 | rc = 0; |
243 | 243 | ||
244 | /* Don't want to modify the buffer as a | 244 | /* Don't want to modify the buffer as a |
@@ -359,6 +359,10 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, | |||
359 | if (rc) | 359 | if (rc) |
360 | return rc; | 360 | return rc; |
361 | 361 | ||
362 | /* enable signing if server requires it */ | ||
363 | if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | ||
364 | in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | ||
365 | |||
362 | mutex_lock(&server->srv_mutex); | 366 | mutex_lock(&server->srv_mutex); |
363 | mid = AllocMidQEntry(in_buf, server); | 367 | mid = AllocMidQEntry(in_buf, server); |
364 | if (mid == NULL) { | 368 | if (mid == NULL) { |
@@ -453,6 +457,9 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) | |||
453 | case MID_RETRY_NEEDED: | 457 | case MID_RETRY_NEEDED: |
454 | rc = -EAGAIN; | 458 | rc = -EAGAIN; |
455 | break; | 459 | break; |
460 | case MID_RESPONSE_MALFORMED: | ||
461 | rc = -EIO; | ||
462 | break; | ||
456 | default: | 463 | default: |
457 | cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, | 464 | cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, |
458 | mid->mid, mid->midState); | 465 | mid->mid, mid->midState); |
@@ -570,17 +577,33 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
570 | #endif | 577 | #endif |
571 | 578 | ||
572 | mutex_unlock(&ses->server->srv_mutex); | 579 | mutex_unlock(&ses->server->srv_mutex); |
573 | cifs_small_buf_release(in_buf); | ||
574 | 580 | ||
575 | if (rc < 0) | 581 | if (rc < 0) { |
582 | cifs_small_buf_release(in_buf); | ||
576 | goto out; | 583 | goto out; |
584 | } | ||
577 | 585 | ||
578 | if (long_op == CIFS_ASYNC_OP) | 586 | if (long_op == CIFS_ASYNC_OP) { |
587 | cifs_small_buf_release(in_buf); | ||
579 | goto out; | 588 | goto out; |
589 | } | ||
580 | 590 | ||
581 | rc = wait_for_response(ses->server, midQ); | 591 | rc = wait_for_response(ses->server, midQ); |
582 | if (rc != 0) | 592 | if (rc != 0) { |
583 | goto out; | 593 | send_nt_cancel(ses->server, in_buf, midQ); |
594 | spin_lock(&GlobalMid_Lock); | ||
595 | if (midQ->midState == MID_REQUEST_SUBMITTED) { | ||
596 | midQ->callback = DeleteMidQEntry; | ||
597 | spin_unlock(&GlobalMid_Lock); | ||
598 | cifs_small_buf_release(in_buf); | ||
599 | atomic_dec(&ses->server->inFlight); | ||
600 | wake_up(&ses->server->request_q); | ||
601 | return rc; | ||
602 | } | ||
603 | spin_unlock(&GlobalMid_Lock); | ||
604 | } | ||
605 | |||
606 | cifs_small_buf_release(in_buf); | ||
584 | 607 | ||
585 | rc = sync_mid_result(midQ, ses->server); | 608 | rc = sync_mid_result(midQ, ses->server); |
586 | if (rc != 0) { | 609 | if (rc != 0) { |
@@ -724,8 +747,19 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
724 | goto out; | 747 | goto out; |
725 | 748 | ||
726 | rc = wait_for_response(ses->server, midQ); | 749 | rc = wait_for_response(ses->server, midQ); |
727 | if (rc != 0) | 750 | if (rc != 0) { |
728 | goto out; | 751 | send_nt_cancel(ses->server, in_buf, midQ); |
752 | spin_lock(&GlobalMid_Lock); | ||
753 | if (midQ->midState == MID_REQUEST_SUBMITTED) { | ||
754 | /* no longer considered to be "in-flight" */ | ||
755 | midQ->callback = DeleteMidQEntry; | ||
756 | spin_unlock(&GlobalMid_Lock); | ||
757 | atomic_dec(&ses->server->inFlight); | ||
758 | wake_up(&ses->server->request_q); | ||
759 | return rc; | ||
760 | } | ||
761 | spin_unlock(&GlobalMid_Lock); | ||
762 | } | ||
729 | 763 | ||
730 | rc = sync_mid_result(midQ, ses->server); | 764 | rc = sync_mid_result(midQ, ses->server); |
731 | if (rc != 0) { | 765 | if (rc != 0) { |
@@ -922,10 +956,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
922 | } | 956 | } |
923 | } | 957 | } |
924 | 958 | ||
925 | if (wait_for_response(ses->server, midQ) == 0) { | 959 | rc = wait_for_response(ses->server, midQ); |
926 | /* We got the response - restart system call. */ | 960 | if (rc) { |
927 | rstart = 1; | 961 | send_nt_cancel(ses->server, in_buf, midQ); |
962 | spin_lock(&GlobalMid_Lock); | ||
963 | if (midQ->midState == MID_REQUEST_SUBMITTED) { | ||
964 | /* no longer considered to be "in-flight" */ | ||
965 | midQ->callback = DeleteMidQEntry; | ||
966 | spin_unlock(&GlobalMid_Lock); | ||
967 | return rc; | ||
968 | } | ||
969 | spin_unlock(&GlobalMid_Lock); | ||
928 | } | 970 | } |
971 | |||
972 | /* We got the response - restart system call. */ | ||
973 | rstart = 1; | ||
929 | } | 974 | } |
930 | 975 | ||
931 | rc = sync_mid_result(midQ, ses->server); | 976 | rc = sync_mid_result(midQ, ses->server); |
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 9c64ae9e4c1a..2d8c87b951c2 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c | |||
@@ -1468,15 +1468,13 @@ static void work_stop(void) | |||
1468 | 1468 | ||
1469 | static int work_start(void) | 1469 | static int work_start(void) |
1470 | { | 1470 | { |
1471 | recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM | | 1471 | recv_workqueue = create_singlethread_workqueue("dlm_recv"); |
1472 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | ||
1473 | if (!recv_workqueue) { | 1472 | if (!recv_workqueue) { |
1474 | log_print("can't start dlm_recv"); | 1473 | log_print("can't start dlm_recv"); |
1475 | return -ENOMEM; | 1474 | return -ENOMEM; |
1476 | } | 1475 | } |
1477 | 1476 | ||
1478 | send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM | | 1477 | send_workqueue = create_singlethread_workqueue("dlm_send"); |
1479 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | ||
1480 | if (!send_workqueue) { | 1478 | if (!send_workqueue) { |
1481 | log_print("can't start dlm_send"); | 1479 | log_print("can't start dlm_send"); |
1482 | destroy_workqueue(recv_workqueue); | 1480 | destroy_workqueue(recv_workqueue); |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index cc8a9b7d6064..267d0ada4541 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -1114,6 +1114,17 @@ static int ep_send_events(struct eventpoll *ep, | |||
1114 | return ep_scan_ready_list(ep, ep_send_events_proc, &esed); | 1114 | return ep_scan_ready_list(ep, ep_send_events_proc, &esed); |
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | static inline struct timespec ep_set_mstimeout(long ms) | ||
1118 | { | ||
1119 | struct timespec now, ts = { | ||
1120 | .tv_sec = ms / MSEC_PER_SEC, | ||
1121 | .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), | ||
1122 | }; | ||
1123 | |||
1124 | ktime_get_ts(&now); | ||
1125 | return timespec_add_safe(now, ts); | ||
1126 | } | ||
1127 | |||
1117 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | 1128 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, |
1118 | int maxevents, long timeout) | 1129 | int maxevents, long timeout) |
1119 | { | 1130 | { |
@@ -1121,12 +1132,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | |||
1121 | unsigned long flags; | 1132 | unsigned long flags; |
1122 | long slack; | 1133 | long slack; |
1123 | wait_queue_t wait; | 1134 | wait_queue_t wait; |
1124 | struct timespec end_time; | ||
1125 | ktime_t expires, *to = NULL; | 1135 | ktime_t expires, *to = NULL; |
1126 | 1136 | ||
1127 | if (timeout > 0) { | 1137 | if (timeout > 0) { |
1128 | ktime_get_ts(&end_time); | 1138 | struct timespec end_time = ep_set_mstimeout(timeout); |
1129 | timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC); | 1139 | |
1130 | slack = select_estimate_accuracy(&end_time); | 1140 | slack = select_estimate_accuracy(&end_time); |
1131 | to = &expires; | 1141 | to = &expires; |
1132 | *to = timespec_to_ktime(end_time); | 1142 | *to = timespec_to_ktime(end_time); |
@@ -120,7 +120,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) | |||
120 | goto out; | 120 | goto out; |
121 | 121 | ||
122 | file = do_filp_open(AT_FDCWD, tmp, | 122 | file = do_filp_open(AT_FDCWD, tmp, |
123 | O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, | 123 | O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0, |
124 | MAY_READ | MAY_EXEC | MAY_OPEN); | 124 | MAY_READ | MAY_EXEC | MAY_OPEN); |
125 | putname(tmp); | 125 | putname(tmp); |
126 | error = PTR_ERR(file); | 126 | error = PTR_ERR(file); |
@@ -723,7 +723,7 @@ struct file *open_exec(const char *name) | |||
723 | int err; | 723 | int err; |
724 | 724 | ||
725 | file = do_filp_open(AT_FDCWD, name, | 725 | file = do_filp_open(AT_FDCWD, name, |
726 | O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, | 726 | O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0, |
727 | MAY_EXEC | MAY_OPEN); | 727 | MAY_EXEC | MAY_OPEN); |
728 | if (IS_ERR(file)) | 728 | if (IS_ERR(file)) |
729 | goto out; | 729 | goto out; |
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 42685424817b..a7555238c41a 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c | |||
@@ -1030,7 +1030,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino) | |||
1030 | memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data)); | 1030 | memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data)); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | inode->i_mapping->backing_dev_info = sb->s_bdi; | ||
1034 | if (S_ISREG(inode->i_mode)) { | 1033 | if (S_ISREG(inode->i_mode)) { |
1035 | inode->i_op = &exofs_file_inode_operations; | 1034 | inode->i_op = &exofs_file_inode_operations; |
1036 | inode->i_fop = &exofs_file_operations; | 1035 | inode->i_fop = &exofs_file_operations; |
@@ -1131,7 +1130,6 @@ struct inode *exofs_new_inode(struct inode *dir, int mode) | |||
1131 | 1130 | ||
1132 | sbi = sb->s_fs_info; | 1131 | sbi = sb->s_fs_info; |
1133 | 1132 | ||
1134 | inode->i_mapping->backing_dev_info = sb->s_bdi; | ||
1135 | sb->s_dirt = 1; | 1133 | sb->s_dirt = 1; |
1136 | inode_init_owner(inode, dir, mode); | 1134 | inode_init_owner(inode, dir, mode); |
1137 | inode->i_ino = sbi->s_nextid++; | 1135 | inode->i_ino = sbi->s_nextid++; |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0c8d97b56f34..3aa0b72b3b94 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -848,6 +848,7 @@ struct ext4_inode_info { | |||
848 | atomic_t i_ioend_count; /* Number of outstanding io_end structs */ | 848 | atomic_t i_ioend_count; /* Number of outstanding io_end structs */ |
849 | /* current io_end structure for async DIO write*/ | 849 | /* current io_end structure for async DIO write*/ |
850 | ext4_io_end_t *cur_aio_dio; | 850 | ext4_io_end_t *cur_aio_dio; |
851 | atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */ | ||
851 | 852 | ||
852 | spinlock_t i_block_reservation_lock; | 853 | spinlock_t i_block_reservation_lock; |
853 | 854 | ||
@@ -2119,6 +2120,15 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh) | |||
2119 | 2120 | ||
2120 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | 2121 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) |
2121 | 2122 | ||
2123 | /* For ioend & aio unwritten conversion wait queues */ | ||
2124 | #define EXT4_WQ_HASH_SZ 37 | ||
2125 | #define ext4_ioend_wq(v) (&ext4__ioend_wq[((unsigned long)(v)) %\ | ||
2126 | EXT4_WQ_HASH_SZ]) | ||
2127 | #define ext4_aio_mutex(v) (&ext4__aio_mutex[((unsigned long)(v)) %\ | ||
2128 | EXT4_WQ_HASH_SZ]) | ||
2129 | extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; | ||
2130 | extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; | ||
2131 | |||
2122 | #endif /* __KERNEL__ */ | 2132 | #endif /* __KERNEL__ */ |
2123 | 2133 | ||
2124 | #endif /* _EXT4_H */ | 2134 | #endif /* _EXT4_H */ |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 63a75810b7c3..ccce8a7e94ed 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -3174,9 +3174,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3174 | * that this IO needs to convertion to written when IO is | 3174 | * that this IO needs to convertion to written when IO is |
3175 | * completed | 3175 | * completed |
3176 | */ | 3176 | */ |
3177 | if (io) | 3177 | if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { |
3178 | io->flag = EXT4_IO_END_UNWRITTEN; | 3178 | io->flag = EXT4_IO_END_UNWRITTEN; |
3179 | else | 3179 | atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); |
3180 | } else | ||
3180 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); | 3181 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
3181 | if (ext4_should_dioread_nolock(inode)) | 3182 | if (ext4_should_dioread_nolock(inode)) |
3182 | map->m_flags |= EXT4_MAP_UNINIT; | 3183 | map->m_flags |= EXT4_MAP_UNINIT; |
@@ -3463,9 +3464,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
3463 | * that we need to perform convertion when IO is done. | 3464 | * that we need to perform convertion when IO is done. |
3464 | */ | 3465 | */ |
3465 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { | 3466 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
3466 | if (io) | 3467 | if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { |
3467 | io->flag = EXT4_IO_END_UNWRITTEN; | 3468 | io->flag = EXT4_IO_END_UNWRITTEN; |
3468 | else | 3469 | atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); |
3470 | } else | ||
3469 | ext4_set_inode_state(inode, | 3471 | ext4_set_inode_state(inode, |
3470 | EXT4_STATE_DIO_UNWRITTEN); | 3472 | EXT4_STATE_DIO_UNWRITTEN); |
3471 | } | 3473 | } |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 2e8322c8aa88..7b80d543b89e 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -55,11 +55,47 @@ static int ext4_release_file(struct inode *inode, struct file *filp) | |||
55 | return 0; | 55 | return 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | static void ext4_aiodio_wait(struct inode *inode) | ||
59 | { | ||
60 | wait_queue_head_t *wq = ext4_ioend_wq(inode); | ||
61 | |||
62 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0)); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * This tests whether the IO in question is block-aligned or not. | ||
67 | * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they | ||
68 | * are converted to written only after the IO is complete. Until they are | ||
69 | * mapped, these blocks appear as holes, so dio_zero_block() will assume that | ||
70 | * it needs to zero out portions of the start and/or end block. If 2 AIO | ||
71 | * threads are at work on the same unwritten block, they must be synchronized | ||
72 | * or one thread will zero the other's data, causing corruption. | ||
73 | */ | ||
74 | static int | ||
75 | ext4_unaligned_aio(struct inode *inode, const struct iovec *iov, | ||
76 | unsigned long nr_segs, loff_t pos) | ||
77 | { | ||
78 | struct super_block *sb = inode->i_sb; | ||
79 | int blockmask = sb->s_blocksize - 1; | ||
80 | size_t count = iov_length(iov, nr_segs); | ||
81 | loff_t final_size = pos + count; | ||
82 | |||
83 | if (pos >= inode->i_size) | ||
84 | return 0; | ||
85 | |||
86 | if ((pos & blockmask) || (final_size & blockmask)) | ||
87 | return 1; | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
58 | static ssize_t | 92 | static ssize_t |
59 | ext4_file_write(struct kiocb *iocb, const struct iovec *iov, | 93 | ext4_file_write(struct kiocb *iocb, const struct iovec *iov, |
60 | unsigned long nr_segs, loff_t pos) | 94 | unsigned long nr_segs, loff_t pos) |
61 | { | 95 | { |
62 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; | 96 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; |
97 | int unaligned_aio = 0; | ||
98 | int ret; | ||
63 | 99 | ||
64 | /* | 100 | /* |
65 | * If we have encountered a bitmap-format file, the size limit | 101 | * If we have encountered a bitmap-format file, the size limit |
@@ -78,9 +114,31 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, | |||
78 | nr_segs = iov_shorten((struct iovec *)iov, nr_segs, | 114 | nr_segs = iov_shorten((struct iovec *)iov, nr_segs, |
79 | sbi->s_bitmap_maxbytes - pos); | 115 | sbi->s_bitmap_maxbytes - pos); |
80 | } | 116 | } |
117 | } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) && | ||
118 | !is_sync_kiocb(iocb))) { | ||
119 | unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos); | ||
81 | } | 120 | } |
82 | 121 | ||
83 | return generic_file_aio_write(iocb, iov, nr_segs, pos); | 122 | /* Unaligned direct AIO must be serialized; see comment above */ |
123 | if (unaligned_aio) { | ||
124 | static unsigned long unaligned_warn_time; | ||
125 | |||
126 | /* Warn about this once per day */ | ||
127 | if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ)) | ||
128 | ext4_msg(inode->i_sb, KERN_WARNING, | ||
129 | "Unaligned AIO/DIO on inode %ld by %s; " | ||
130 | "performance will be poor.", | ||
131 | inode->i_ino, current->comm); | ||
132 | mutex_lock(ext4_aio_mutex(inode)); | ||
133 | ext4_aiodio_wait(inode); | ||
134 | } | ||
135 | |||
136 | ret = generic_file_aio_write(iocb, iov, nr_segs, pos); | ||
137 | |||
138 | if (unaligned_aio) | ||
139 | mutex_unlock(ext4_aio_mutex(inode)); | ||
140 | |||
141 | return ret; | ||
84 | } | 142 | } |
85 | 143 | ||
86 | static const struct vm_operations_struct ext4_file_vm_ops = { | 144 | static const struct vm_operations_struct ext4_file_vm_ops = { |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 851f49b2f9d2..d1fe09aea73d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -342,10 +342,15 @@ static struct kmem_cache *ext4_free_ext_cachep; | |||
342 | /* We create slab caches for groupinfo data structures based on the | 342 | /* We create slab caches for groupinfo data structures based on the |
343 | * superblock block size. There will be one per mounted filesystem for | 343 | * superblock block size. There will be one per mounted filesystem for |
344 | * each unique s_blocksize_bits */ | 344 | * each unique s_blocksize_bits */ |
345 | #define NR_GRPINFO_CACHES \ | 345 | #define NR_GRPINFO_CACHES 8 |
346 | (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1) | ||
347 | static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; | 346 | static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; |
348 | 347 | ||
348 | static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { | ||
349 | "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", | ||
350 | "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", | ||
351 | "ext4_groupinfo_64k", "ext4_groupinfo_128k" | ||
352 | }; | ||
353 | |||
349 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | 354 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, |
350 | ext4_group_t group); | 355 | ext4_group_t group); |
351 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | 356 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, |
@@ -2414,6 +2419,55 @@ err_freesgi: | |||
2414 | return -ENOMEM; | 2419 | return -ENOMEM; |
2415 | } | 2420 | } |
2416 | 2421 | ||
2422 | static void ext4_groupinfo_destroy_slabs(void) | ||
2423 | { | ||
2424 | int i; | ||
2425 | |||
2426 | for (i = 0; i < NR_GRPINFO_CACHES; i++) { | ||
2427 | if (ext4_groupinfo_caches[i]) | ||
2428 | kmem_cache_destroy(ext4_groupinfo_caches[i]); | ||
2429 | ext4_groupinfo_caches[i] = NULL; | ||
2430 | } | ||
2431 | } | ||
2432 | |||
2433 | static int ext4_groupinfo_create_slab(size_t size) | ||
2434 | { | ||
2435 | static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); | ||
2436 | int slab_size; | ||
2437 | int blocksize_bits = order_base_2(size); | ||
2438 | int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; | ||
2439 | struct kmem_cache *cachep; | ||
2440 | |||
2441 | if (cache_index >= NR_GRPINFO_CACHES) | ||
2442 | return -EINVAL; | ||
2443 | |||
2444 | if (unlikely(cache_index < 0)) | ||
2445 | cache_index = 0; | ||
2446 | |||
2447 | mutex_lock(&ext4_grpinfo_slab_create_mutex); | ||
2448 | if (ext4_groupinfo_caches[cache_index]) { | ||
2449 | mutex_unlock(&ext4_grpinfo_slab_create_mutex); | ||
2450 | return 0; /* Already created */ | ||
2451 | } | ||
2452 | |||
2453 | slab_size = offsetof(struct ext4_group_info, | ||
2454 | bb_counters[blocksize_bits + 2]); | ||
2455 | |||
2456 | cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], | ||
2457 | slab_size, 0, SLAB_RECLAIM_ACCOUNT, | ||
2458 | NULL); | ||
2459 | |||
2460 | mutex_unlock(&ext4_grpinfo_slab_create_mutex); | ||
2461 | if (!cachep) { | ||
2462 | printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n"); | ||
2463 | return -ENOMEM; | ||
2464 | } | ||
2465 | |||
2466 | ext4_groupinfo_caches[cache_index] = cachep; | ||
2467 | |||
2468 | return 0; | ||
2469 | } | ||
2470 | |||
2417 | int ext4_mb_init(struct super_block *sb, int needs_recovery) | 2471 | int ext4_mb_init(struct super_block *sb, int needs_recovery) |
2418 | { | 2472 | { |
2419 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 2473 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
@@ -2421,9 +2475,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) | |||
2421 | unsigned offset; | 2475 | unsigned offset; |
2422 | unsigned max; | 2476 | unsigned max; |
2423 | int ret; | 2477 | int ret; |
2424 | int cache_index; | ||
2425 | struct kmem_cache *cachep; | ||
2426 | char *namep = NULL; | ||
2427 | 2478 | ||
2428 | i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); | 2479 | i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); |
2429 | 2480 | ||
@@ -2440,30 +2491,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) | |||
2440 | goto out; | 2491 | goto out; |
2441 | } | 2492 | } |
2442 | 2493 | ||
2443 | cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; | 2494 | ret = ext4_groupinfo_create_slab(sb->s_blocksize); |
2444 | cachep = ext4_groupinfo_caches[cache_index]; | 2495 | if (ret < 0) |
2445 | if (!cachep) { | 2496 | goto out; |
2446 | char name[32]; | ||
2447 | int len = offsetof(struct ext4_group_info, | ||
2448 | bb_counters[sb->s_blocksize_bits + 2]); | ||
2449 | |||
2450 | sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits); | ||
2451 | namep = kstrdup(name, GFP_KERNEL); | ||
2452 | if (!namep) { | ||
2453 | ret = -ENOMEM; | ||
2454 | goto out; | ||
2455 | } | ||
2456 | |||
2457 | /* Need to free the kmem_cache_name() when we | ||
2458 | * destroy the slab */ | ||
2459 | cachep = kmem_cache_create(namep, len, 0, | ||
2460 | SLAB_RECLAIM_ACCOUNT, NULL); | ||
2461 | if (!cachep) { | ||
2462 | ret = -ENOMEM; | ||
2463 | goto out; | ||
2464 | } | ||
2465 | ext4_groupinfo_caches[cache_index] = cachep; | ||
2466 | } | ||
2467 | 2497 | ||
2468 | /* order 0 is regular bitmap */ | 2498 | /* order 0 is regular bitmap */ |
2469 | sbi->s_mb_maxs[0] = sb->s_blocksize << 3; | 2499 | sbi->s_mb_maxs[0] = sb->s_blocksize << 3; |
@@ -2520,7 +2550,6 @@ out: | |||
2520 | if (ret) { | 2550 | if (ret) { |
2521 | kfree(sbi->s_mb_offsets); | 2551 | kfree(sbi->s_mb_offsets); |
2522 | kfree(sbi->s_mb_maxs); | 2552 | kfree(sbi->s_mb_maxs); |
2523 | kfree(namep); | ||
2524 | } | 2553 | } |
2525 | return ret; | 2554 | return ret; |
2526 | } | 2555 | } |
@@ -2734,7 +2763,6 @@ int __init ext4_init_mballoc(void) | |||
2734 | 2763 | ||
2735 | void ext4_exit_mballoc(void) | 2764 | void ext4_exit_mballoc(void) |
2736 | { | 2765 | { |
2737 | int i; | ||
2738 | /* | 2766 | /* |
2739 | * Wait for completion of call_rcu()'s on ext4_pspace_cachep | 2767 | * Wait for completion of call_rcu()'s on ext4_pspace_cachep |
2740 | * before destroying the slab cache. | 2768 | * before destroying the slab cache. |
@@ -2743,15 +2771,7 @@ void ext4_exit_mballoc(void) | |||
2743 | kmem_cache_destroy(ext4_pspace_cachep); | 2771 | kmem_cache_destroy(ext4_pspace_cachep); |
2744 | kmem_cache_destroy(ext4_ac_cachep); | 2772 | kmem_cache_destroy(ext4_ac_cachep); |
2745 | kmem_cache_destroy(ext4_free_ext_cachep); | 2773 | kmem_cache_destroy(ext4_free_ext_cachep); |
2746 | 2774 | ext4_groupinfo_destroy_slabs(); | |
2747 | for (i = 0; i < NR_GRPINFO_CACHES; i++) { | ||
2748 | struct kmem_cache *cachep = ext4_groupinfo_caches[i]; | ||
2749 | if (cachep) { | ||
2750 | char *name = (char *)kmem_cache_name(cachep); | ||
2751 | kmem_cache_destroy(cachep); | ||
2752 | kfree(name); | ||
2753 | } | ||
2754 | } | ||
2755 | ext4_remove_debugfs_entry(); | 2775 | ext4_remove_debugfs_entry(); |
2756 | } | 2776 | } |
2757 | 2777 | ||
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 7270dcfca92a..955cc309142f 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -32,14 +32,8 @@ | |||
32 | 32 | ||
33 | static struct kmem_cache *io_page_cachep, *io_end_cachep; | 33 | static struct kmem_cache *io_page_cachep, *io_end_cachep; |
34 | 34 | ||
35 | #define WQ_HASH_SZ 37 | ||
36 | #define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ]) | ||
37 | static wait_queue_head_t ioend_wq[WQ_HASH_SZ]; | ||
38 | |||
39 | int __init ext4_init_pageio(void) | 35 | int __init ext4_init_pageio(void) |
40 | { | 36 | { |
41 | int i; | ||
42 | |||
43 | io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); | 37 | io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); |
44 | if (io_page_cachep == NULL) | 38 | if (io_page_cachep == NULL) |
45 | return -ENOMEM; | 39 | return -ENOMEM; |
@@ -48,9 +42,6 @@ int __init ext4_init_pageio(void) | |||
48 | kmem_cache_destroy(io_page_cachep); | 42 | kmem_cache_destroy(io_page_cachep); |
49 | return -ENOMEM; | 43 | return -ENOMEM; |
50 | } | 44 | } |
51 | for (i = 0; i < WQ_HASH_SZ; i++) | ||
52 | init_waitqueue_head(&ioend_wq[i]); | ||
53 | |||
54 | return 0; | 45 | return 0; |
55 | } | 46 | } |
56 | 47 | ||
@@ -62,7 +53,7 @@ void ext4_exit_pageio(void) | |||
62 | 53 | ||
63 | void ext4_ioend_wait(struct inode *inode) | 54 | void ext4_ioend_wait(struct inode *inode) |
64 | { | 55 | { |
65 | wait_queue_head_t *wq = to_ioend_wq(inode); | 56 | wait_queue_head_t *wq = ext4_ioend_wq(inode); |
66 | 57 | ||
67 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); | 58 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); |
68 | } | 59 | } |
@@ -87,7 +78,7 @@ void ext4_free_io_end(ext4_io_end_t *io) | |||
87 | for (i = 0; i < io->num_io_pages; i++) | 78 | for (i = 0; i < io->num_io_pages; i++) |
88 | put_io_page(io->pages[i]); | 79 | put_io_page(io->pages[i]); |
89 | io->num_io_pages = 0; | 80 | io->num_io_pages = 0; |
90 | wq = to_ioend_wq(io->inode); | 81 | wq = ext4_ioend_wq(io->inode); |
91 | if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && | 82 | if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && |
92 | waitqueue_active(wq)) | 83 | waitqueue_active(wq)) |
93 | wake_up_all(wq); | 84 | wake_up_all(wq); |
@@ -102,6 +93,7 @@ int ext4_end_io_nolock(ext4_io_end_t *io) | |||
102 | struct inode *inode = io->inode; | 93 | struct inode *inode = io->inode; |
103 | loff_t offset = io->offset; | 94 | loff_t offset = io->offset; |
104 | ssize_t size = io->size; | 95 | ssize_t size = io->size; |
96 | wait_queue_head_t *wq; | ||
105 | int ret = 0; | 97 | int ret = 0; |
106 | 98 | ||
107 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," | 99 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," |
@@ -126,7 +118,16 @@ int ext4_end_io_nolock(ext4_io_end_t *io) | |||
126 | if (io->iocb) | 118 | if (io->iocb) |
127 | aio_complete(io->iocb, io->result, 0); | 119 | aio_complete(io->iocb, io->result, 0); |
128 | /* clear the DIO AIO unwritten flag */ | 120 | /* clear the DIO AIO unwritten flag */ |
129 | io->flag &= ~EXT4_IO_END_UNWRITTEN; | 121 | if (io->flag & EXT4_IO_END_UNWRITTEN) { |
122 | io->flag &= ~EXT4_IO_END_UNWRITTEN; | ||
123 | /* Wake up anyone waiting on unwritten extent conversion */ | ||
124 | wq = ext4_ioend_wq(io->inode); | ||
125 | if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) && | ||
126 | waitqueue_active(wq)) { | ||
127 | wake_up_all(wq); | ||
128 | } | ||
129 | } | ||
130 | |||
130 | return ret; | 131 | return ret; |
131 | } | 132 | } |
132 | 133 | ||
@@ -190,6 +191,7 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
190 | struct inode *inode; | 191 | struct inode *inode; |
191 | unsigned long flags; | 192 | unsigned long flags; |
192 | int i; | 193 | int i; |
194 | sector_t bi_sector = bio->bi_sector; | ||
193 | 195 | ||
194 | BUG_ON(!io_end); | 196 | BUG_ON(!io_end); |
195 | bio->bi_private = NULL; | 197 | bio->bi_private = NULL; |
@@ -207,9 +209,7 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
207 | if (error) | 209 | if (error) |
208 | SetPageError(page); | 210 | SetPageError(page); |
209 | BUG_ON(!head); | 211 | BUG_ON(!head); |
210 | if (head->b_size == PAGE_CACHE_SIZE) | 212 | if (head->b_size != PAGE_CACHE_SIZE) { |
211 | clear_buffer_dirty(head); | ||
212 | else { | ||
213 | loff_t offset; | 213 | loff_t offset; |
214 | loff_t io_end_offset = io_end->offset + io_end->size; | 214 | loff_t io_end_offset = io_end->offset + io_end->size; |
215 | 215 | ||
@@ -221,7 +221,6 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
221 | if (error) | 221 | if (error) |
222 | buffer_io_error(bh); | 222 | buffer_io_error(bh); |
223 | 223 | ||
224 | clear_buffer_dirty(bh); | ||
225 | } | 224 | } |
226 | if (buffer_delay(bh)) | 225 | if (buffer_delay(bh)) |
227 | partial_write = 1; | 226 | partial_write = 1; |
@@ -257,7 +256,7 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
257 | (unsigned long long) io_end->offset, | 256 | (unsigned long long) io_end->offset, |
258 | (long) io_end->size, | 257 | (long) io_end->size, |
259 | (unsigned long long) | 258 | (unsigned long long) |
260 | bio->bi_sector >> (inode->i_blkbits - 9)); | 259 | bi_sector >> (inode->i_blkbits - 9)); |
261 | } | 260 | } |
262 | 261 | ||
263 | /* Add the io_end to per-inode completed io list*/ | 262 | /* Add the io_end to per-inode completed io list*/ |
@@ -380,6 +379,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
380 | 379 | ||
381 | blocksize = 1 << inode->i_blkbits; | 380 | blocksize = 1 << inode->i_blkbits; |
382 | 381 | ||
382 | BUG_ON(!PageLocked(page)); | ||
383 | BUG_ON(PageWriteback(page)); | 383 | BUG_ON(PageWriteback(page)); |
384 | set_page_writeback(page); | 384 | set_page_writeback(page); |
385 | ClearPageError(page); | 385 | ClearPageError(page); |
@@ -397,12 +397,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
397 | for (bh = head = page_buffers(page), block_start = 0; | 397 | for (bh = head = page_buffers(page), block_start = 0; |
398 | bh != head || !block_start; | 398 | bh != head || !block_start; |
399 | block_start = block_end, bh = bh->b_this_page) { | 399 | block_start = block_end, bh = bh->b_this_page) { |
400 | |||
400 | block_end = block_start + blocksize; | 401 | block_end = block_start + blocksize; |
401 | if (block_start >= len) { | 402 | if (block_start >= len) { |
402 | clear_buffer_dirty(bh); | 403 | clear_buffer_dirty(bh); |
403 | set_buffer_uptodate(bh); | 404 | set_buffer_uptodate(bh); |
404 | continue; | 405 | continue; |
405 | } | 406 | } |
407 | clear_buffer_dirty(bh); | ||
406 | ret = io_submit_add_bh(io, io_page, inode, wbc, bh); | 408 | ret = io_submit_add_bh(io, io_page, inode, wbc, bh); |
407 | if (ret) { | 409 | if (ret) { |
408 | /* | 410 | /* |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 48ce561fafac..f6a318f836b2 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -77,6 +77,7 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, | |||
77 | const char *dev_name, void *data); | 77 | const char *dev_name, void *data); |
78 | static void ext4_destroy_lazyinit_thread(void); | 78 | static void ext4_destroy_lazyinit_thread(void); |
79 | static void ext4_unregister_li_request(struct super_block *sb); | 79 | static void ext4_unregister_li_request(struct super_block *sb); |
80 | static void ext4_clear_request_list(void); | ||
80 | 81 | ||
81 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | 82 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) |
82 | static struct file_system_type ext3_fs_type = { | 83 | static struct file_system_type ext3_fs_type = { |
@@ -832,6 +833,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
832 | ei->i_sync_tid = 0; | 833 | ei->i_sync_tid = 0; |
833 | ei->i_datasync_tid = 0; | 834 | ei->i_datasync_tid = 0; |
834 | atomic_set(&ei->i_ioend_count, 0); | 835 | atomic_set(&ei->i_ioend_count, 0); |
836 | atomic_set(&ei->i_aiodio_unwritten, 0); | ||
835 | 837 | ||
836 | return &ei->vfs_inode; | 838 | return &ei->vfs_inode; |
837 | } | 839 | } |
@@ -2716,6 +2718,8 @@ static void ext4_unregister_li_request(struct super_block *sb) | |||
2716 | mutex_unlock(&ext4_li_info->li_list_mtx); | 2718 | mutex_unlock(&ext4_li_info->li_list_mtx); |
2717 | } | 2719 | } |
2718 | 2720 | ||
2721 | static struct task_struct *ext4_lazyinit_task; | ||
2722 | |||
2719 | /* | 2723 | /* |
2720 | * This is the function where ext4lazyinit thread lives. It walks | 2724 | * This is the function where ext4lazyinit thread lives. It walks |
2721 | * through the request list searching for next scheduled filesystem. | 2725 | * through the request list searching for next scheduled filesystem. |
@@ -2784,6 +2788,10 @@ cont_thread: | |||
2784 | if (time_before(jiffies, next_wakeup)) | 2788 | if (time_before(jiffies, next_wakeup)) |
2785 | schedule(); | 2789 | schedule(); |
2786 | finish_wait(&eli->li_wait_daemon, &wait); | 2790 | finish_wait(&eli->li_wait_daemon, &wait); |
2791 | if (kthread_should_stop()) { | ||
2792 | ext4_clear_request_list(); | ||
2793 | goto exit_thread; | ||
2794 | } | ||
2787 | } | 2795 | } |
2788 | 2796 | ||
2789 | exit_thread: | 2797 | exit_thread: |
@@ -2808,6 +2816,7 @@ exit_thread: | |||
2808 | wake_up(&eli->li_wait_task); | 2816 | wake_up(&eli->li_wait_task); |
2809 | 2817 | ||
2810 | kfree(ext4_li_info); | 2818 | kfree(ext4_li_info); |
2819 | ext4_lazyinit_task = NULL; | ||
2811 | ext4_li_info = NULL; | 2820 | ext4_li_info = NULL; |
2812 | mutex_unlock(&ext4_li_mtx); | 2821 | mutex_unlock(&ext4_li_mtx); |
2813 | 2822 | ||
@@ -2830,11 +2839,10 @@ static void ext4_clear_request_list(void) | |||
2830 | 2839 | ||
2831 | static int ext4_run_lazyinit_thread(void) | 2840 | static int ext4_run_lazyinit_thread(void) |
2832 | { | 2841 | { |
2833 | struct task_struct *t; | 2842 | ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, |
2834 | 2843 | ext4_li_info, "ext4lazyinit"); | |
2835 | t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); | 2844 | if (IS_ERR(ext4_lazyinit_task)) { |
2836 | if (IS_ERR(t)) { | 2845 | int err = PTR_ERR(ext4_lazyinit_task); |
2837 | int err = PTR_ERR(t); | ||
2838 | ext4_clear_request_list(); | 2846 | ext4_clear_request_list(); |
2839 | del_timer_sync(&ext4_li_info->li_timer); | 2847 | del_timer_sync(&ext4_li_info->li_timer); |
2840 | kfree(ext4_li_info); | 2848 | kfree(ext4_li_info); |
@@ -2985,16 +2993,10 @@ static void ext4_destroy_lazyinit_thread(void) | |||
2985 | * If thread exited earlier | 2993 | * If thread exited earlier |
2986 | * there's nothing to be done. | 2994 | * there's nothing to be done. |
2987 | */ | 2995 | */ |
2988 | if (!ext4_li_info) | 2996 | if (!ext4_li_info || !ext4_lazyinit_task) |
2989 | return; | 2997 | return; |
2990 | 2998 | ||
2991 | ext4_clear_request_list(); | 2999 | kthread_stop(ext4_lazyinit_task); |
2992 | |||
2993 | while (ext4_li_info->li_task) { | ||
2994 | wake_up(&ext4_li_info->li_wait_daemon); | ||
2995 | wait_event(ext4_li_info->li_wait_task, | ||
2996 | ext4_li_info->li_task == NULL); | ||
2997 | } | ||
2998 | } | 3000 | } |
2999 | 3001 | ||
3000 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) | 3002 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
@@ -4768,7 +4770,7 @@ static struct file_system_type ext4_fs_type = { | |||
4768 | .fs_flags = FS_REQUIRES_DEV, | 4770 | .fs_flags = FS_REQUIRES_DEV, |
4769 | }; | 4771 | }; |
4770 | 4772 | ||
4771 | int __init ext4_init_feat_adverts(void) | 4773 | static int __init ext4_init_feat_adverts(void) |
4772 | { | 4774 | { |
4773 | struct ext4_features *ef; | 4775 | struct ext4_features *ef; |
4774 | int ret = -ENOMEM; | 4776 | int ret = -ENOMEM; |
@@ -4792,23 +4794,44 @@ out: | |||
4792 | return ret; | 4794 | return ret; |
4793 | } | 4795 | } |
4794 | 4796 | ||
4797 | static void ext4_exit_feat_adverts(void) | ||
4798 | { | ||
4799 | kobject_put(&ext4_feat->f_kobj); | ||
4800 | wait_for_completion(&ext4_feat->f_kobj_unregister); | ||
4801 | kfree(ext4_feat); | ||
4802 | } | ||
4803 | |||
4804 | /* Shared across all ext4 file systems */ | ||
4805 | wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; | ||
4806 | struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; | ||
4807 | |||
4795 | static int __init ext4_init_fs(void) | 4808 | static int __init ext4_init_fs(void) |
4796 | { | 4809 | { |
4797 | int err; | 4810 | int i, err; |
4798 | 4811 | ||
4799 | ext4_check_flag_values(); | 4812 | ext4_check_flag_values(); |
4813 | |||
4814 | for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { | ||
4815 | mutex_init(&ext4__aio_mutex[i]); | ||
4816 | init_waitqueue_head(&ext4__ioend_wq[i]); | ||
4817 | } | ||
4818 | |||
4800 | err = ext4_init_pageio(); | 4819 | err = ext4_init_pageio(); |
4801 | if (err) | 4820 | if (err) |
4802 | return err; | 4821 | return err; |
4803 | err = ext4_init_system_zone(); | 4822 | err = ext4_init_system_zone(); |
4804 | if (err) | 4823 | if (err) |
4805 | goto out5; | 4824 | goto out7; |
4806 | ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); | 4825 | ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); |
4807 | if (!ext4_kset) | 4826 | if (!ext4_kset) |
4808 | goto out4; | 4827 | goto out6; |
4809 | ext4_proc_root = proc_mkdir("fs/ext4", NULL); | 4828 | ext4_proc_root = proc_mkdir("fs/ext4", NULL); |
4829 | if (!ext4_proc_root) | ||
4830 | goto out5; | ||
4810 | 4831 | ||
4811 | err = ext4_init_feat_adverts(); | 4832 | err = ext4_init_feat_adverts(); |
4833 | if (err) | ||
4834 | goto out4; | ||
4812 | 4835 | ||
4813 | err = ext4_init_mballoc(); | 4836 | err = ext4_init_mballoc(); |
4814 | if (err) | 4837 | if (err) |
@@ -4838,12 +4861,14 @@ out1: | |||
4838 | out2: | 4861 | out2: |
4839 | ext4_exit_mballoc(); | 4862 | ext4_exit_mballoc(); |
4840 | out3: | 4863 | out3: |
4841 | kfree(ext4_feat); | 4864 | ext4_exit_feat_adverts(); |
4865 | out4: | ||
4842 | remove_proc_entry("fs/ext4", NULL); | 4866 | remove_proc_entry("fs/ext4", NULL); |
4867 | out5: | ||
4843 | kset_unregister(ext4_kset); | 4868 | kset_unregister(ext4_kset); |
4844 | out4: | 4869 | out6: |
4845 | ext4_exit_system_zone(); | 4870 | ext4_exit_system_zone(); |
4846 | out5: | 4871 | out7: |
4847 | ext4_exit_pageio(); | 4872 | ext4_exit_pageio(); |
4848 | return err; | 4873 | return err; |
4849 | } | 4874 | } |
@@ -4857,6 +4882,7 @@ static void __exit ext4_exit_fs(void) | |||
4857 | destroy_inodecache(); | 4882 | destroy_inodecache(); |
4858 | ext4_exit_xattr(); | 4883 | ext4_exit_xattr(); |
4859 | ext4_exit_mballoc(); | 4884 | ext4_exit_mballoc(); |
4885 | ext4_exit_feat_adverts(); | ||
4860 | remove_proc_entry("fs/ext4", NULL); | 4886 | remove_proc_entry("fs/ext4", NULL); |
4861 | kset_unregister(ext4_kset); | 4887 | kset_unregister(ext4_kset); |
4862 | ext4_exit_system_zone(); | 4888 | ext4_exit_system_zone(); |
diff --git a/fs/fcntl.c b/fs/fcntl.c index ecc8b3954ed6..cb1026181bdc 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -815,7 +815,7 @@ static int __init fcntl_init(void) | |||
815 | __O_SYNC | O_DSYNC | FASYNC | | 815 | __O_SYNC | O_DSYNC | FASYNC | |
816 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | | 816 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | |
817 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | | 817 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | |
818 | FMODE_EXEC | 818 | __FMODE_EXEC |
819 | )); | 819 | )); |
820 | 820 | ||
821 | fasync_cache = kmem_cache_create("fasync_cache", | 821 | fasync_cache = kmem_cache_create("fasync_cache", |
diff --git a/fs/file_table.c b/fs/file_table.c index c3e89adf53c0..eb36b6b17e26 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -125,13 +125,13 @@ struct file *get_empty_filp(void) | |||
125 | goto fail; | 125 | goto fail; |
126 | 126 | ||
127 | percpu_counter_inc(&nr_files); | 127 | percpu_counter_inc(&nr_files); |
128 | f->f_cred = get_cred(cred); | ||
128 | if (security_file_alloc(f)) | 129 | if (security_file_alloc(f)) |
129 | goto fail_sec; | 130 | goto fail_sec; |
130 | 131 | ||
131 | INIT_LIST_HEAD(&f->f_u.fu_list); | 132 | INIT_LIST_HEAD(&f->f_u.fu_list); |
132 | atomic_long_set(&f->f_count, 1); | 133 | atomic_long_set(&f->f_count, 1); |
133 | rwlock_init(&f->f_owner.lock); | 134 | rwlock_init(&f->f_owner.lock); |
134 | f->f_cred = get_cred(cred); | ||
135 | spin_lock_init(&f->f_lock); | 135 | spin_lock_init(&f->f_lock); |
136 | eventpoll_init_file(f); | 136 | eventpoll_init_file(f); |
137 | /* f->f_version: 0 */ | 137 | /* f->f_version: 0 */ |
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index 52a0bcaa7b6d..b1991a2a08e0 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c | |||
@@ -397,8 +397,8 @@ int hfsplus_file_extend(struct inode *inode) | |||
397 | u32 start, len, goal; | 397 | u32 start, len, goal; |
398 | int res; | 398 | int res; |
399 | 399 | ||
400 | if (sbi->total_blocks - sbi->free_blocks + 8 > | 400 | if (sbi->alloc_file->i_size * 8 < |
401 | sbi->alloc_file->i_size * 8) { | 401 | sbi->total_blocks - sbi->free_blocks + 8) { |
402 | /* extend alloc file */ | 402 | /* extend alloc file */ |
403 | printk(KERN_ERR "hfs: extend alloc file! " | 403 | printk(KERN_ERR "hfs: extend alloc file! " |
404 | "(%llu,%u,%u)\n", | 404 | "(%llu,%u,%u)\n", |
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c index d66ad113b1cc..40ad88c12c64 100644 --- a/fs/hfsplus/part_tbl.c +++ b/fs/hfsplus/part_tbl.c | |||
@@ -134,7 +134,7 @@ int hfs_part_find(struct super_block *sb, | |||
134 | res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK, | 134 | res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK, |
135 | data, READ); | 135 | data, READ); |
136 | if (res) | 136 | if (res) |
137 | return res; | 137 | goto out; |
138 | 138 | ||
139 | switch (be16_to_cpu(*((__be16 *)data))) { | 139 | switch (be16_to_cpu(*((__be16 *)data))) { |
140 | case HFS_OLD_PMAP_MAGIC: | 140 | case HFS_OLD_PMAP_MAGIC: |
@@ -147,7 +147,7 @@ int hfs_part_find(struct super_block *sb, | |||
147 | res = -ENOENT; | 147 | res = -ENOENT; |
148 | break; | 148 | break; |
149 | } | 149 | } |
150 | 150 | out: | |
151 | kfree(data); | 151 | kfree(data); |
152 | return res; | 152 | return res; |
153 | } | 153 | } |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 9a3b4795f43c..b49b55584c84 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
@@ -338,20 +338,22 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
338 | struct inode *root, *inode; | 338 | struct inode *root, *inode; |
339 | struct qstr str; | 339 | struct qstr str; |
340 | struct nls_table *nls = NULL; | 340 | struct nls_table *nls = NULL; |
341 | int err = -EINVAL; | 341 | int err; |
342 | 342 | ||
343 | err = -EINVAL; | ||
343 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); | 344 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); |
344 | if (!sbi) | 345 | if (!sbi) |
345 | return -ENOMEM; | 346 | goto out; |
346 | 347 | ||
347 | sb->s_fs_info = sbi; | 348 | sb->s_fs_info = sbi; |
348 | mutex_init(&sbi->alloc_mutex); | 349 | mutex_init(&sbi->alloc_mutex); |
349 | mutex_init(&sbi->vh_mutex); | 350 | mutex_init(&sbi->vh_mutex); |
350 | hfsplus_fill_defaults(sbi); | 351 | hfsplus_fill_defaults(sbi); |
352 | |||
353 | err = -EINVAL; | ||
351 | if (!hfsplus_parse_options(data, sbi)) { | 354 | if (!hfsplus_parse_options(data, sbi)) { |
352 | printk(KERN_ERR "hfs: unable to parse mount options\n"); | 355 | printk(KERN_ERR "hfs: unable to parse mount options\n"); |
353 | err = -EINVAL; | 356 | goto out_unload_nls; |
354 | goto cleanup; | ||
355 | } | 357 | } |
356 | 358 | ||
357 | /* temporarily use utf8 to correctly find the hidden dir below */ | 359 | /* temporarily use utf8 to correctly find the hidden dir below */ |
@@ -359,16 +361,14 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
359 | sbi->nls = load_nls("utf8"); | 361 | sbi->nls = load_nls("utf8"); |
360 | if (!sbi->nls) { | 362 | if (!sbi->nls) { |
361 | printk(KERN_ERR "hfs: unable to load nls for utf8\n"); | 363 | printk(KERN_ERR "hfs: unable to load nls for utf8\n"); |
362 | err = -EINVAL; | 364 | goto out_unload_nls; |
363 | goto cleanup; | ||
364 | } | 365 | } |
365 | 366 | ||
366 | /* Grab the volume header */ | 367 | /* Grab the volume header */ |
367 | if (hfsplus_read_wrapper(sb)) { | 368 | if (hfsplus_read_wrapper(sb)) { |
368 | if (!silent) | 369 | if (!silent) |
369 | printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n"); | 370 | printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n"); |
370 | err = -EINVAL; | 371 | goto out_unload_nls; |
371 | goto cleanup; | ||
372 | } | 372 | } |
373 | vhdr = sbi->s_vhdr; | 373 | vhdr = sbi->s_vhdr; |
374 | 374 | ||
@@ -377,7 +377,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
377 | if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION || | 377 | if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION || |
378 | be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) { | 378 | be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) { |
379 | printk(KERN_ERR "hfs: wrong filesystem version\n"); | 379 | printk(KERN_ERR "hfs: wrong filesystem version\n"); |
380 | goto cleanup; | 380 | goto out_free_vhdr; |
381 | } | 381 | } |
382 | sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); | 382 | sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); |
383 | sbi->free_blocks = be32_to_cpu(vhdr->free_blocks); | 383 | sbi->free_blocks = be32_to_cpu(vhdr->free_blocks); |
@@ -421,19 +421,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
421 | sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); | 421 | sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); |
422 | if (!sbi->ext_tree) { | 422 | if (!sbi->ext_tree) { |
423 | printk(KERN_ERR "hfs: failed to load extents file\n"); | 423 | printk(KERN_ERR "hfs: failed to load extents file\n"); |
424 | goto cleanup; | 424 | goto out_free_vhdr; |
425 | } | 425 | } |
426 | sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); | 426 | sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); |
427 | if (!sbi->cat_tree) { | 427 | if (!sbi->cat_tree) { |
428 | printk(KERN_ERR "hfs: failed to load catalog file\n"); | 428 | printk(KERN_ERR "hfs: failed to load catalog file\n"); |
429 | goto cleanup; | 429 | goto out_close_ext_tree; |
430 | } | 430 | } |
431 | 431 | ||
432 | inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); | 432 | inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); |
433 | if (IS_ERR(inode)) { | 433 | if (IS_ERR(inode)) { |
434 | printk(KERN_ERR "hfs: failed to load allocation file\n"); | 434 | printk(KERN_ERR "hfs: failed to load allocation file\n"); |
435 | err = PTR_ERR(inode); | 435 | err = PTR_ERR(inode); |
436 | goto cleanup; | 436 | goto out_close_cat_tree; |
437 | } | 437 | } |
438 | sbi->alloc_file = inode; | 438 | sbi->alloc_file = inode; |
439 | 439 | ||
@@ -442,14 +442,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
442 | if (IS_ERR(root)) { | 442 | if (IS_ERR(root)) { |
443 | printk(KERN_ERR "hfs: failed to load root directory\n"); | 443 | printk(KERN_ERR "hfs: failed to load root directory\n"); |
444 | err = PTR_ERR(root); | 444 | err = PTR_ERR(root); |
445 | goto cleanup; | 445 | goto out_put_alloc_file; |
446 | } | ||
447 | sb->s_d_op = &hfsplus_dentry_operations; | ||
448 | sb->s_root = d_alloc_root(root); | ||
449 | if (!sb->s_root) { | ||
450 | iput(root); | ||
451 | err = -ENOMEM; | ||
452 | goto cleanup; | ||
453 | } | 446 | } |
454 | 447 | ||
455 | str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; | 448 | str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; |
@@ -459,46 +452,69 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
459 | if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { | 452 | if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { |
460 | hfs_find_exit(&fd); | 453 | hfs_find_exit(&fd); |
461 | if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) | 454 | if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) |
462 | goto cleanup; | 455 | goto out_put_root; |
463 | inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); | 456 | inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); |
464 | if (IS_ERR(inode)) { | 457 | if (IS_ERR(inode)) { |
465 | err = PTR_ERR(inode); | 458 | err = PTR_ERR(inode); |
466 | goto cleanup; | 459 | goto out_put_root; |
467 | } | 460 | } |
468 | sbi->hidden_dir = inode; | 461 | sbi->hidden_dir = inode; |
469 | } else | 462 | } else |
470 | hfs_find_exit(&fd); | 463 | hfs_find_exit(&fd); |
471 | 464 | ||
472 | if (sb->s_flags & MS_RDONLY) | 465 | if (!(sb->s_flags & MS_RDONLY)) { |
473 | goto out; | 466 | /* |
467 | * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused | ||
468 | * all three are registered with Apple for our use | ||
469 | */ | ||
470 | vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); | ||
471 | vhdr->modify_date = hfsp_now2mt(); | ||
472 | be32_add_cpu(&vhdr->write_count, 1); | ||
473 | vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); | ||
474 | vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); | ||
475 | hfsplus_sync_fs(sb, 1); | ||
474 | 476 | ||
475 | /* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused | 477 | if (!sbi->hidden_dir) { |
476 | * all three are registered with Apple for our use | 478 | mutex_lock(&sbi->vh_mutex); |
477 | */ | 479 | sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR); |
478 | vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); | 480 | hfsplus_create_cat(sbi->hidden_dir->i_ino, root, &str, |
479 | vhdr->modify_date = hfsp_now2mt(); | 481 | sbi->hidden_dir); |
480 | be32_add_cpu(&vhdr->write_count, 1); | 482 | mutex_unlock(&sbi->vh_mutex); |
481 | vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); | 483 | |
482 | vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); | 484 | hfsplus_mark_inode_dirty(sbi->hidden_dir, |
483 | hfsplus_sync_fs(sb, 1); | 485 | HFSPLUS_I_CAT_DIRTY); |
484 | 486 | } | |
485 | if (!sbi->hidden_dir) { | ||
486 | mutex_lock(&sbi->vh_mutex); | ||
487 | sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR); | ||
488 | hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode, | ||
489 | &str, sbi->hidden_dir); | ||
490 | mutex_unlock(&sbi->vh_mutex); | ||
491 | |||
492 | hfsplus_mark_inode_dirty(sbi->hidden_dir, HFSPLUS_I_CAT_DIRTY); | ||
493 | } | 487 | } |
494 | out: | 488 | |
489 | sb->s_d_op = &hfsplus_dentry_operations; | ||
490 | sb->s_root = d_alloc_root(root); | ||
491 | if (!sb->s_root) { | ||
492 | err = -ENOMEM; | ||
493 | goto out_put_hidden_dir; | ||
494 | } | ||
495 | |||
495 | unload_nls(sbi->nls); | 496 | unload_nls(sbi->nls); |
496 | sbi->nls = nls; | 497 | sbi->nls = nls; |
497 | return 0; | 498 | return 0; |
498 | 499 | ||
499 | cleanup: | 500 | out_put_hidden_dir: |
500 | hfsplus_put_super(sb); | 501 | iput(sbi->hidden_dir); |
502 | out_put_root: | ||
503 | iput(sbi->alloc_file); | ||
504 | out_put_alloc_file: | ||
505 | iput(sbi->alloc_file); | ||
506 | out_close_cat_tree: | ||
507 | hfs_btree_close(sbi->cat_tree); | ||
508 | out_close_ext_tree: | ||
509 | hfs_btree_close(sbi->ext_tree); | ||
510 | out_free_vhdr: | ||
511 | kfree(sbi->s_vhdr); | ||
512 | kfree(sbi->s_backup_vhdr); | ||
513 | out_unload_nls: | ||
514 | unload_nls(sbi->nls); | ||
501 | unload_nls(nls); | 515 | unload_nls(nls); |
516 | kfree(sbi); | ||
517 | out: | ||
502 | return err; | 518 | return err; |
503 | } | 519 | } |
504 | 520 | ||
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 196231794f64..3031d81f5f0f 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c | |||
@@ -167,7 +167,7 @@ reread: | |||
167 | break; | 167 | break; |
168 | case cpu_to_be16(HFSP_WRAP_MAGIC): | 168 | case cpu_to_be16(HFSP_WRAP_MAGIC): |
169 | if (!hfsplus_read_mdb(sbi->s_vhdr, &wd)) | 169 | if (!hfsplus_read_mdb(sbi->s_vhdr, &wd)) |
170 | goto out; | 170 | goto out_free_backup_vhdr; |
171 | wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT; | 171 | wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT; |
172 | part_start += wd.ablk_start + wd.embed_start * wd.ablk_size; | 172 | part_start += wd.ablk_start + wd.embed_start * wd.ablk_size; |
173 | part_size = wd.embed_count * wd.ablk_size; | 173 | part_size = wd.embed_count * wd.ablk_size; |
@@ -179,7 +179,7 @@ reread: | |||
179 | * (should do this only for cdrom/loop though) | 179 | * (should do this only for cdrom/loop though) |
180 | */ | 180 | */ |
181 | if (hfs_part_find(sb, &part_start, &part_size)) | 181 | if (hfs_part_find(sb, &part_start, &part_size)) |
182 | goto out; | 182 | goto out_free_backup_vhdr; |
183 | goto reread; | 183 | goto reread; |
184 | } | 184 | } |
185 | 185 | ||
diff --git a/fs/ioctl.c b/fs/ioctl.c index a59635e295fa..1eebeb72b202 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c | |||
@@ -273,6 +273,13 @@ int __generic_block_fiemap(struct inode *inode, | |||
273 | len = isize; | 273 | len = isize; |
274 | } | 274 | } |
275 | 275 | ||
276 | /* | ||
277 | * Some filesystems can't deal with being asked to map less than | ||
278 | * blocksize, so make sure our len is at least block length. | ||
279 | */ | ||
280 | if (logical_to_blk(inode, len) == 0) | ||
281 | len = blk_to_logical(inode, 1); | ||
282 | |||
276 | start_blk = logical_to_blk(inode, start); | 283 | start_blk = logical_to_blk(inode, start); |
277 | last_blk = logical_to_blk(inode, start + len - 1); | 284 | last_blk = logical_to_blk(inode, start + len - 1); |
278 | 285 | ||
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 9e4686900f18..97e73469b2c4 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -473,7 +473,8 @@ int __jbd2_log_space_left(journal_t *journal) | |||
473 | } | 473 | } |
474 | 474 | ||
475 | /* | 475 | /* |
476 | * Called under j_state_lock. Returns true if a transaction commit was started. | 476 | * Called with j_state_lock locked for writing. |
477 | * Returns true if a transaction commit was started. | ||
477 | */ | 478 | */ |
478 | int __jbd2_log_start_commit(journal_t *journal, tid_t target) | 479 | int __jbd2_log_start_commit(journal_t *journal, tid_t target) |
479 | { | 480 | { |
@@ -520,11 +521,13 @@ int jbd2_journal_force_commit_nested(journal_t *journal) | |||
520 | { | 521 | { |
521 | transaction_t *transaction = NULL; | 522 | transaction_t *transaction = NULL; |
522 | tid_t tid; | 523 | tid_t tid; |
524 | int need_to_start = 0; | ||
523 | 525 | ||
524 | read_lock(&journal->j_state_lock); | 526 | read_lock(&journal->j_state_lock); |
525 | if (journal->j_running_transaction && !current->journal_info) { | 527 | if (journal->j_running_transaction && !current->journal_info) { |
526 | transaction = journal->j_running_transaction; | 528 | transaction = journal->j_running_transaction; |
527 | __jbd2_log_start_commit(journal, transaction->t_tid); | 529 | if (!tid_geq(journal->j_commit_request, transaction->t_tid)) |
530 | need_to_start = 1; | ||
528 | } else if (journal->j_committing_transaction) | 531 | } else if (journal->j_committing_transaction) |
529 | transaction = journal->j_committing_transaction; | 532 | transaction = journal->j_committing_transaction; |
530 | 533 | ||
@@ -535,6 +538,8 @@ int jbd2_journal_force_commit_nested(journal_t *journal) | |||
535 | 538 | ||
536 | tid = transaction->t_tid; | 539 | tid = transaction->t_tid; |
537 | read_unlock(&journal->j_state_lock); | 540 | read_unlock(&journal->j_state_lock); |
541 | if (need_to_start) | ||
542 | jbd2_log_start_commit(journal, tid); | ||
538 | jbd2_log_wait_commit(journal, tid); | 543 | jbd2_log_wait_commit(journal, tid); |
539 | return 1; | 544 | return 1; |
540 | } | 545 | } |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index faad2bd787c7..1d1191050f99 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -117,10 +117,10 @@ static inline void update_t_max_wait(transaction_t *transaction) | |||
117 | static int start_this_handle(journal_t *journal, handle_t *handle, | 117 | static int start_this_handle(journal_t *journal, handle_t *handle, |
118 | int gfp_mask) | 118 | int gfp_mask) |
119 | { | 119 | { |
120 | transaction_t *transaction; | 120 | transaction_t *transaction, *new_transaction = NULL; |
121 | int needed; | 121 | tid_t tid; |
122 | int nblocks = handle->h_buffer_credits; | 122 | int needed, need_to_start; |
123 | transaction_t *new_transaction = NULL; | 123 | int nblocks = handle->h_buffer_credits; |
124 | 124 | ||
125 | if (nblocks > journal->j_max_transaction_buffers) { | 125 | if (nblocks > journal->j_max_transaction_buffers) { |
126 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", | 126 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", |
@@ -222,8 +222,11 @@ repeat: | |||
222 | atomic_sub(nblocks, &transaction->t_outstanding_credits); | 222 | atomic_sub(nblocks, &transaction->t_outstanding_credits); |
223 | prepare_to_wait(&journal->j_wait_transaction_locked, &wait, | 223 | prepare_to_wait(&journal->j_wait_transaction_locked, &wait, |
224 | TASK_UNINTERRUPTIBLE); | 224 | TASK_UNINTERRUPTIBLE); |
225 | __jbd2_log_start_commit(journal, transaction->t_tid); | 225 | tid = transaction->t_tid; |
226 | need_to_start = !tid_geq(journal->j_commit_request, tid); | ||
226 | read_unlock(&journal->j_state_lock); | 227 | read_unlock(&journal->j_state_lock); |
228 | if (need_to_start) | ||
229 | jbd2_log_start_commit(journal, tid); | ||
227 | schedule(); | 230 | schedule(); |
228 | finish_wait(&journal->j_wait_transaction_locked, &wait); | 231 | finish_wait(&journal->j_wait_transaction_locked, &wait); |
229 | goto repeat; | 232 | goto repeat; |
@@ -442,7 +445,8 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) | |||
442 | { | 445 | { |
443 | transaction_t *transaction = handle->h_transaction; | 446 | transaction_t *transaction = handle->h_transaction; |
444 | journal_t *journal = transaction->t_journal; | 447 | journal_t *journal = transaction->t_journal; |
445 | int ret; | 448 | tid_t tid; |
449 | int need_to_start, ret; | ||
446 | 450 | ||
447 | /* If we've had an abort of any type, don't even think about | 451 | /* If we've had an abort of any type, don't even think about |
448 | * actually doing the restart! */ | 452 | * actually doing the restart! */ |
@@ -465,8 +469,11 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) | |||
465 | spin_unlock(&transaction->t_handle_lock); | 469 | spin_unlock(&transaction->t_handle_lock); |
466 | 470 | ||
467 | jbd_debug(2, "restarting handle %p\n", handle); | 471 | jbd_debug(2, "restarting handle %p\n", handle); |
468 | __jbd2_log_start_commit(journal, transaction->t_tid); | 472 | tid = transaction->t_tid; |
473 | need_to_start = !tid_geq(journal->j_commit_request, tid); | ||
469 | read_unlock(&journal->j_state_lock); | 474 | read_unlock(&journal->j_state_lock); |
475 | if (need_to_start) | ||
476 | jbd2_log_start_commit(journal, tid); | ||
470 | 477 | ||
471 | lock_map_release(&handle->h_lockdep_map); | 478 | lock_map_release(&handle->h_lockdep_map); |
472 | handle->h_buffer_credits = nblocks; | 479 | handle->h_buffer_credits = nblocks; |
diff --git a/fs/namei.c b/fs/namei.c index 7d77f24d32a9..ec4b2d0190a8 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -561,10 +561,14 @@ static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd) | |||
561 | */ | 561 | */ |
562 | void release_open_intent(struct nameidata *nd) | 562 | void release_open_intent(struct nameidata *nd) |
563 | { | 563 | { |
564 | if (nd->intent.open.file->f_path.dentry == NULL) | 564 | struct file *file = nd->intent.open.file; |
565 | put_filp(nd->intent.open.file); | 565 | |
566 | else | 566 | if (file && !IS_ERR(file)) { |
567 | fput(nd->intent.open.file); | 567 | if (file->f_path.dentry == NULL) |
568 | put_filp(file); | ||
569 | else | ||
570 | fput(file); | ||
571 | } | ||
568 | } | 572 | } |
569 | 573 | ||
570 | /* | 574 | /* |
@@ -2265,8 +2269,6 @@ static struct file *finish_open(struct nameidata *nd, | |||
2265 | return filp; | 2269 | return filp; |
2266 | 2270 | ||
2267 | exit: | 2271 | exit: |
2268 | if (!IS_ERR(nd->intent.open.file)) | ||
2269 | release_open_intent(nd); | ||
2270 | path_put(&nd->path); | 2272 | path_put(&nd->path); |
2271 | return ERR_PTR(error); | 2273 | return ERR_PTR(error); |
2272 | } | 2274 | } |
@@ -2389,8 +2391,6 @@ exit_mutex_unlock: | |||
2389 | exit_dput: | 2391 | exit_dput: |
2390 | path_put_conditional(path, nd); | 2392 | path_put_conditional(path, nd); |
2391 | exit: | 2393 | exit: |
2392 | if (!IS_ERR(nd->intent.open.file)) | ||
2393 | release_open_intent(nd); | ||
2394 | path_put(&nd->path); | 2394 | path_put(&nd->path); |
2395 | return ERR_PTR(error); | 2395 | return ERR_PTR(error); |
2396 | } | 2396 | } |
@@ -2477,6 +2477,7 @@ struct file *do_filp_open(int dfd, const char *pathname, | |||
2477 | } | 2477 | } |
2478 | audit_inode(pathname, nd.path.dentry); | 2478 | audit_inode(pathname, nd.path.dentry); |
2479 | filp = finish_open(&nd, open_flag, acc_mode); | 2479 | filp = finish_open(&nd, open_flag, acc_mode); |
2480 | release_open_intent(&nd); | ||
2480 | return filp; | 2481 | return filp; |
2481 | 2482 | ||
2482 | creat: | 2483 | creat: |
@@ -2553,6 +2554,7 @@ out: | |||
2553 | path_put(&nd.root); | 2554 | path_put(&nd.root); |
2554 | if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL)) | 2555 | if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL)) |
2555 | goto reval; | 2556 | goto reval; |
2557 | release_open_intent(&nd); | ||
2556 | return filp; | 2558 | return filp; |
2557 | 2559 | ||
2558 | exit_dput: | 2560 | exit_dput: |
@@ -2560,8 +2562,6 @@ exit_dput: | |||
2560 | out_path: | 2562 | out_path: |
2561 | path_put(&nd.path); | 2563 | path_put(&nd.path); |
2562 | out_filp: | 2564 | out_filp: |
2563 | if (!IS_ERR(nd.intent.open.file)) | ||
2564 | release_open_intent(&nd); | ||
2565 | filp = ERR_PTR(error); | 2565 | filp = ERR_PTR(error); |
2566 | goto out; | 2566 | goto out; |
2567 | } | 2567 | } |
@@ -790,6 +790,8 @@ struct file *nameidata_to_filp(struct nameidata *nd) | |||
790 | 790 | ||
791 | /* Pick up the filp from the open intent */ | 791 | /* Pick up the filp from the open intent */ |
792 | filp = nd->intent.open.file; | 792 | filp = nd->intent.open.file; |
793 | nd->intent.open.file = NULL; | ||
794 | |||
793 | /* Has the filesystem initialised the file for us? */ | 795 | /* Has the filesystem initialised the file for us? */ |
794 | if (filp->f_path.dentry == NULL) { | 796 | if (filp->f_path.dentry == NULL) { |
795 | path_get(&nd->path); | 797 | path_get(&nd->path); |
diff --git a/fs/super.c b/fs/super.c index 74e149efed81..7e9dd4cc2c01 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -177,6 +177,11 @@ void deactivate_locked_super(struct super_block *s) | |||
177 | struct file_system_type *fs = s->s_type; | 177 | struct file_system_type *fs = s->s_type; |
178 | if (atomic_dec_and_test(&s->s_active)) { | 178 | if (atomic_dec_and_test(&s->s_active)) { |
179 | fs->kill_sb(s); | 179 | fs->kill_sb(s); |
180 | /* | ||
181 | * We need to call rcu_barrier so all the delayed rcu free | ||
182 | * inodes are flushed before we release the fs module. | ||
183 | */ | ||
184 | rcu_barrier(); | ||
180 | put_filesystem(fs); | 185 | put_filesystem(fs); |
181 | put_super(s); | 186 | put_super(s); |
182 | } else { | 187 | } else { |