diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-27 16:55:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-27 16:55:35 -0400 |
commit | 0559bc8e9bf8cb6063b9bc7206fbc28982491a5d (patch) | |
tree | 3ec60750466eea38ae6443100b7213f5122e3d21 /fs | |
parent | e472233fc52d9556cab7d8a1164ccd93ab36fb91 (diff) | |
parent | 5168c47b4c294412f079dd3cc891e0276bb0479e (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: remove blk_queue_tag_depth() and blk_queue_tag_queue()
block: remove unused ->busy part of the block queue tag map
bio: fix __bio_copy_iov() handling of bio->bv_len
bio: fix bio_copy_kern() handling of bio->bv_len
block: submit_bh() inadvertently discards barrier flag on a sync write
block: clean up cmdfilter sysfs interface
block: rename blk_scsi_cmd_filter to blk_cmd_filter
sg: restore command permission for TYPE_SCANNER
block: move cmdfilter from gendisk to request_queue
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bio.c | 48 | ||||
-rw-r--r-- | fs/buffer.c | 13 |
2 files changed, 41 insertions, 20 deletions
@@ -469,20 +469,21 @@ static void bio_free_map_data(struct bio_map_data *bmd) | |||
469 | kfree(bmd); | 469 | kfree(bmd); |
470 | } | 470 | } |
471 | 471 | ||
472 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count) | 472 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, |
473 | gfp_t gfp_mask) | ||
473 | { | 474 | { |
474 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); | 475 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); |
475 | 476 | ||
476 | if (!bmd) | 477 | if (!bmd) |
477 | return NULL; | 478 | return NULL; |
478 | 479 | ||
479 | bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); | 480 | bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask); |
480 | if (!bmd->iovecs) { | 481 | if (!bmd->iovecs) { |
481 | kfree(bmd); | 482 | kfree(bmd); |
482 | return NULL; | 483 | return NULL; |
483 | } | 484 | } |
484 | 485 | ||
485 | bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL); | 486 | bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask); |
486 | if (bmd->sgvecs) | 487 | if (bmd->sgvecs) |
487 | return bmd; | 488 | return bmd; |
488 | 489 | ||
@@ -491,8 +492,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count) | |||
491 | return NULL; | 492 | return NULL; |
492 | } | 493 | } |
493 | 494 | ||
494 | static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count, | 495 | static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, |
495 | int uncopy) | 496 | struct sg_iovec *iov, int iov_count, int uncopy) |
496 | { | 497 | { |
497 | int ret = 0, i; | 498 | int ret = 0, i; |
498 | struct bio_vec *bvec; | 499 | struct bio_vec *bvec; |
@@ -502,7 +503,7 @@ static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count, | |||
502 | 503 | ||
503 | __bio_for_each_segment(bvec, bio, i, 0) { | 504 | __bio_for_each_segment(bvec, bio, i, 0) { |
504 | char *bv_addr = page_address(bvec->bv_page); | 505 | char *bv_addr = page_address(bvec->bv_page); |
505 | unsigned int bv_len = bvec->bv_len; | 506 | unsigned int bv_len = iovecs[i].bv_len; |
506 | 507 | ||
507 | while (bv_len && iov_idx < iov_count) { | 508 | while (bv_len && iov_idx < iov_count) { |
508 | unsigned int bytes; | 509 | unsigned int bytes; |
@@ -554,7 +555,7 @@ int bio_uncopy_user(struct bio *bio) | |||
554 | struct bio_map_data *bmd = bio->bi_private; | 555 | struct bio_map_data *bmd = bio->bi_private; |
555 | int ret; | 556 | int ret; |
556 | 557 | ||
557 | ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1); | 558 | ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1); |
558 | 559 | ||
559 | bio_free_map_data(bmd); | 560 | bio_free_map_data(bmd); |
560 | bio_put(bio); | 561 | bio_put(bio); |
@@ -596,7 +597,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | |||
596 | len += iov[i].iov_len; | 597 | len += iov[i].iov_len; |
597 | } | 598 | } |
598 | 599 | ||
599 | bmd = bio_alloc_map_data(nr_pages, iov_count); | 600 | bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL); |
600 | if (!bmd) | 601 | if (!bmd) |
601 | return ERR_PTR(-ENOMEM); | 602 | return ERR_PTR(-ENOMEM); |
602 | 603 | ||
@@ -633,7 +634,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | |||
633 | * success | 634 | * success |
634 | */ | 635 | */ |
635 | if (!write_to_vm) { | 636 | if (!write_to_vm) { |
636 | ret = __bio_copy_iov(bio, iov, iov_count, 0); | 637 | ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0); |
637 | if (ret) | 638 | if (ret) |
638 | goto cleanup; | 639 | goto cleanup; |
639 | } | 640 | } |
@@ -942,19 +943,22 @@ static void bio_copy_kern_endio(struct bio *bio, int err) | |||
942 | { | 943 | { |
943 | struct bio_vec *bvec; | 944 | struct bio_vec *bvec; |
944 | const int read = bio_data_dir(bio) == READ; | 945 | const int read = bio_data_dir(bio) == READ; |
945 | char *p = bio->bi_private; | 946 | struct bio_map_data *bmd = bio->bi_private; |
946 | int i; | 947 | int i; |
948 | char *p = bmd->sgvecs[0].iov_base; | ||
947 | 949 | ||
948 | __bio_for_each_segment(bvec, bio, i, 0) { | 950 | __bio_for_each_segment(bvec, bio, i, 0) { |
949 | char *addr = page_address(bvec->bv_page); | 951 | char *addr = page_address(bvec->bv_page); |
952 | int len = bmd->iovecs[i].bv_len; | ||
950 | 953 | ||
951 | if (read && !err) | 954 | if (read && !err) |
952 | memcpy(p, addr, bvec->bv_len); | 955 | memcpy(p, addr, len); |
953 | 956 | ||
954 | __free_page(bvec->bv_page); | 957 | __free_page(bvec->bv_page); |
955 | p += bvec->bv_len; | 958 | p += len; |
956 | } | 959 | } |
957 | 960 | ||
961 | bio_free_map_data(bmd); | ||
958 | bio_put(bio); | 962 | bio_put(bio); |
959 | } | 963 | } |
960 | 964 | ||
@@ -978,11 +982,21 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
978 | const int nr_pages = end - start; | 982 | const int nr_pages = end - start; |
979 | struct bio *bio; | 983 | struct bio *bio; |
980 | struct bio_vec *bvec; | 984 | struct bio_vec *bvec; |
985 | struct bio_map_data *bmd; | ||
981 | int i, ret; | 986 | int i, ret; |
987 | struct sg_iovec iov; | ||
988 | |||
989 | iov.iov_base = data; | ||
990 | iov.iov_len = len; | ||
991 | |||
992 | bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask); | ||
993 | if (!bmd) | ||
994 | return ERR_PTR(-ENOMEM); | ||
982 | 995 | ||
996 | ret = -ENOMEM; | ||
983 | bio = bio_alloc(gfp_mask, nr_pages); | 997 | bio = bio_alloc(gfp_mask, nr_pages); |
984 | if (!bio) | 998 | if (!bio) |
985 | return ERR_PTR(-ENOMEM); | 999 | goto out_bmd; |
986 | 1000 | ||
987 | while (len) { | 1001 | while (len) { |
988 | struct page *page; | 1002 | struct page *page; |
@@ -1016,14 +1030,18 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
1016 | } | 1030 | } |
1017 | } | 1031 | } |
1018 | 1032 | ||
1019 | bio->bi_private = data; | 1033 | bio->bi_private = bmd; |
1020 | bio->bi_end_io = bio_copy_kern_endio; | 1034 | bio->bi_end_io = bio_copy_kern_endio; |
1035 | |||
1036 | bio_set_map_data(bmd, bio, &iov, 1); | ||
1021 | return bio; | 1037 | return bio; |
1022 | cleanup: | 1038 | cleanup: |
1023 | bio_for_each_segment(bvec, bio, i) | 1039 | bio_for_each_segment(bvec, bio, i) |
1024 | __free_page(bvec->bv_page); | 1040 | __free_page(bvec->bv_page); |
1025 | 1041 | ||
1026 | bio_put(bio); | 1042 | bio_put(bio); |
1043 | out_bmd: | ||
1044 | bio_free_map_data(bmd); | ||
1027 | 1045 | ||
1028 | return ERR_PTR(ret); | 1046 | return ERR_PTR(ret); |
1029 | } | 1047 | } |
diff --git a/fs/buffer.c b/fs/buffer.c index 38653e36e225..ac78d4c19b3b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2926,14 +2926,17 @@ int submit_bh(int rw, struct buffer_head * bh) | |||
2926 | BUG_ON(!buffer_mapped(bh)); | 2926 | BUG_ON(!buffer_mapped(bh)); |
2927 | BUG_ON(!bh->b_end_io); | 2927 | BUG_ON(!bh->b_end_io); |
2928 | 2928 | ||
2929 | if (buffer_ordered(bh) && (rw == WRITE)) | 2929 | /* |
2930 | rw = WRITE_BARRIER; | 2930 | * Mask in barrier bit for a write (could be either a WRITE or a |
2931 | * WRITE_SYNC | ||
2932 | */ | ||
2933 | if (buffer_ordered(bh) && (rw & WRITE)) | ||
2934 | rw |= WRITE_BARRIER; | ||
2931 | 2935 | ||
2932 | /* | 2936 | /* |
2933 | * Only clear out a write error when rewriting, should this | 2937 | * Only clear out a write error when rewriting |
2934 | * include WRITE_SYNC as well? | ||
2935 | */ | 2938 | */ |
2936 | if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER)) | 2939 | if (test_set_buffer_req(bh) && (rw & WRITE)) |
2937 | clear_buffer_write_io_error(bh); | 2940 | clear_buffer_write_io_error(bh); |
2938 | 2941 | ||
2939 | /* | 2942 | /* |