diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-07-01 10:17:13 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-07-01 10:17:13 -0400 |
commit | 04bf7869ca0fd12009aee301cac2264a36df4d98 (patch) | |
tree | 66cb81ebf8b76560a31433c2c493dc430c914af9 /fs/btrfs/scrub.c | |
parent | d2f31a5fd60d168b00fc4f7617b68a1287b21e90 (diff) | |
parent | 7b28afe01ab6ffb5f152f47831b44933facd2328 (diff) |
Merge branch 'for-linus' into for-3.1/core
Conflicts:
block/blk-throttle.c
block/cfq-iosched.c
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r-- | fs/btrfs/scrub.c | 192 |
1 files changed, 109 insertions, 83 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 6dfed0c27ac3..a8d03d5efb5d 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -16,13 +16,7 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/pagemap.h> | ||
21 | #include <linux/writeback.h> | ||
22 | #include <linux/blkdev.h> | 19 | #include <linux/blkdev.h> |
23 | #include <linux/rbtree.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include "ctree.h" | 20 | #include "ctree.h" |
27 | #include "volumes.h" | 21 | #include "volumes.h" |
28 | #include "disk-io.h" | 22 | #include "disk-io.h" |
@@ -117,33 +111,37 @@ static void scrub_free_csums(struct scrub_dev *sdev) | |||
117 | } | 111 | } |
118 | } | 112 | } |
119 | 113 | ||
114 | static void scrub_free_bio(struct bio *bio) | ||
115 | { | ||
116 | int i; | ||
117 | struct page *last_page = NULL; | ||
118 | |||
119 | if (!bio) | ||
120 | return; | ||
121 | |||
122 | for (i = 0; i < bio->bi_vcnt; ++i) { | ||
123 | if (bio->bi_io_vec[i].bv_page == last_page) | ||
124 | continue; | ||
125 | last_page = bio->bi_io_vec[i].bv_page; | ||
126 | __free_page(last_page); | ||
127 | } | ||
128 | bio_put(bio); | ||
129 | } | ||
130 | |||
120 | static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) | 131 | static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) |
121 | { | 132 | { |
122 | int i; | 133 | int i; |
123 | int j; | ||
124 | struct page *last_page; | ||
125 | 134 | ||
126 | if (!sdev) | 135 | if (!sdev) |
127 | return; | 136 | return; |
128 | 137 | ||
129 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { | 138 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { |
130 | struct scrub_bio *sbio = sdev->bios[i]; | 139 | struct scrub_bio *sbio = sdev->bios[i]; |
131 | struct bio *bio; | ||
132 | 140 | ||
133 | if (!sbio) | 141 | if (!sbio) |
134 | break; | 142 | break; |
135 | 143 | ||
136 | bio = sbio->bio; | 144 | scrub_free_bio(sbio->bio); |
137 | if (bio) { | ||
138 | last_page = NULL; | ||
139 | for (j = 0; j < bio->bi_vcnt; ++j) { | ||
140 | if (bio->bi_io_vec[j].bv_page == last_page) | ||
141 | continue; | ||
142 | last_page = bio->bi_io_vec[j].bv_page; | ||
143 | __free_page(last_page); | ||
144 | } | ||
145 | bio_put(bio); | ||
146 | } | ||
147 | kfree(sbio); | 145 | kfree(sbio); |
148 | } | 146 | } |
149 | 147 | ||
@@ -156,8 +154,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) | |||
156 | { | 154 | { |
157 | struct scrub_dev *sdev; | 155 | struct scrub_dev *sdev; |
158 | int i; | 156 | int i; |
159 | int j; | ||
160 | int ret; | ||
161 | struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; | 157 | struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; |
162 | 158 | ||
163 | sdev = kzalloc(sizeof(*sdev), GFP_NOFS); | 159 | sdev = kzalloc(sizeof(*sdev), GFP_NOFS); |
@@ -165,7 +161,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) | |||
165 | goto nomem; | 161 | goto nomem; |
166 | sdev->dev = dev; | 162 | sdev->dev = dev; |
167 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { | 163 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { |
168 | struct bio *bio; | ||
169 | struct scrub_bio *sbio; | 164 | struct scrub_bio *sbio; |
170 | 165 | ||
171 | sbio = kzalloc(sizeof(*sbio), GFP_NOFS); | 166 | sbio = kzalloc(sizeof(*sbio), GFP_NOFS); |
@@ -173,32 +168,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) | |||
173 | goto nomem; | 168 | goto nomem; |
174 | sdev->bios[i] = sbio; | 169 | sdev->bios[i] = sbio; |
175 | 170 | ||
176 | bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); | ||
177 | if (!bio) | ||
178 | goto nomem; | ||
179 | |||
180 | sbio->index = i; | 171 | sbio->index = i; |
181 | sbio->sdev = sdev; | 172 | sbio->sdev = sdev; |
182 | sbio->bio = bio; | ||
183 | sbio->count = 0; | 173 | sbio->count = 0; |
184 | sbio->work.func = scrub_checksum; | 174 | sbio->work.func = scrub_checksum; |
185 | bio->bi_private = sdev->bios[i]; | ||
186 | bio->bi_end_io = scrub_bio_end_io; | ||
187 | bio->bi_sector = 0; | ||
188 | bio->bi_bdev = dev->bdev; | ||
189 | bio->bi_size = 0; | ||
190 | |||
191 | for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { | ||
192 | struct page *page; | ||
193 | page = alloc_page(GFP_NOFS); | ||
194 | if (!page) | ||
195 | goto nomem; | ||
196 | |||
197 | ret = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
198 | if (!ret) | ||
199 | goto nomem; | ||
200 | } | ||
201 | WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); | ||
202 | 175 | ||
203 | if (i != SCRUB_BIOS_PER_DEV-1) | 176 | if (i != SCRUB_BIOS_PER_DEV-1) |
204 | sdev->bios[i]->next_free = i + 1; | 177 | sdev->bios[i]->next_free = i + 1; |
@@ -369,9 +342,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, | |||
369 | int ret; | 342 | int ret; |
370 | DECLARE_COMPLETION_ONSTACK(complete); | 343 | DECLARE_COMPLETION_ONSTACK(complete); |
371 | 344 | ||
372 | /* we are going to wait on this IO */ | ||
373 | rw |= REQ_SYNC; | ||
374 | |||
375 | bio = bio_alloc(GFP_NOFS, 1); | 345 | bio = bio_alloc(GFP_NOFS, 1); |
376 | bio->bi_bdev = bdev; | 346 | bio->bi_bdev = bdev; |
377 | bio->bi_sector = sector; | 347 | bio->bi_sector = sector; |
@@ -380,6 +350,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, | |||
380 | bio->bi_private = &complete; | 350 | bio->bi_private = &complete; |
381 | submit_bio(rw, bio); | 351 | submit_bio(rw, bio); |
382 | 352 | ||
353 | /* this will also unplug the queue */ | ||
383 | wait_for_completion(&complete); | 354 | wait_for_completion(&complete); |
384 | 355 | ||
385 | ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); | 356 | ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); |
@@ -394,6 +365,7 @@ static void scrub_bio_end_io(struct bio *bio, int err) | |||
394 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; | 365 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; |
395 | 366 | ||
396 | sbio->err = err; | 367 | sbio->err = err; |
368 | sbio->bio = bio; | ||
397 | 369 | ||
398 | btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); | 370 | btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); |
399 | } | 371 | } |
@@ -453,6 +425,8 @@ static void scrub_checksum(struct btrfs_work *work) | |||
453 | } | 425 | } |
454 | 426 | ||
455 | out: | 427 | out: |
428 | scrub_free_bio(sbio->bio); | ||
429 | sbio->bio = NULL; | ||
456 | spin_lock(&sdev->list_lock); | 430 | spin_lock(&sdev->list_lock); |
457 | sbio->next_free = sdev->first_free; | 431 | sbio->next_free = sdev->first_free; |
458 | sdev->first_free = sbio->index; | 432 | sdev->first_free = sbio->index; |
@@ -583,25 +557,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) | |||
583 | static int scrub_submit(struct scrub_dev *sdev) | 557 | static int scrub_submit(struct scrub_dev *sdev) |
584 | { | 558 | { |
585 | struct scrub_bio *sbio; | 559 | struct scrub_bio *sbio; |
560 | struct bio *bio; | ||
561 | int i; | ||
586 | 562 | ||
587 | if (sdev->curr == -1) | 563 | if (sdev->curr == -1) |
588 | return 0; | 564 | return 0; |
589 | 565 | ||
590 | sbio = sdev->bios[sdev->curr]; | 566 | sbio = sdev->bios[sdev->curr]; |
591 | 567 | ||
592 | sbio->bio->bi_sector = sbio->physical >> 9; | 568 | bio = bio_alloc(GFP_NOFS, sbio->count); |
593 | sbio->bio->bi_size = sbio->count * PAGE_SIZE; | 569 | if (!bio) |
594 | sbio->bio->bi_next = NULL; | 570 | goto nomem; |
595 | sbio->bio->bi_flags |= 1 << BIO_UPTODATE; | 571 | |
596 | sbio->bio->bi_comp_cpu = -1; | 572 | bio->bi_private = sbio; |
597 | sbio->bio->bi_bdev = sdev->dev->bdev; | 573 | bio->bi_end_io = scrub_bio_end_io; |
574 | bio->bi_bdev = sdev->dev->bdev; | ||
575 | bio->bi_sector = sbio->physical >> 9; | ||
576 | |||
577 | for (i = 0; i < sbio->count; ++i) { | ||
578 | struct page *page; | ||
579 | int ret; | ||
580 | |||
581 | page = alloc_page(GFP_NOFS); | ||
582 | if (!page) | ||
583 | goto nomem; | ||
584 | |||
585 | ret = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
586 | if (!ret) { | ||
587 | __free_page(page); | ||
588 | goto nomem; | ||
589 | } | ||
590 | } | ||
591 | |||
598 | sbio->err = 0; | 592 | sbio->err = 0; |
599 | sdev->curr = -1; | 593 | sdev->curr = -1; |
600 | atomic_inc(&sdev->in_flight); | 594 | atomic_inc(&sdev->in_flight); |
601 | 595 | ||
602 | submit_bio(0, sbio->bio); | 596 | submit_bio(READ, bio); |
603 | 597 | ||
604 | return 0; | 598 | return 0; |
599 | |||
600 | nomem: | ||
601 | scrub_free_bio(bio); | ||
602 | |||
603 | return -ENOMEM; | ||
605 | } | 604 | } |
606 | 605 | ||
607 | static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, | 606 | static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, |
@@ -633,7 +632,11 @@ again: | |||
633 | sbio->logical = logical; | 632 | sbio->logical = logical; |
634 | } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || | 633 | } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || |
635 | sbio->logical + sbio->count * PAGE_SIZE != logical) { | 634 | sbio->logical + sbio->count * PAGE_SIZE != logical) { |
636 | scrub_submit(sdev); | 635 | int ret; |
636 | |||
637 | ret = scrub_submit(sdev); | ||
638 | if (ret) | ||
639 | return ret; | ||
637 | goto again; | 640 | goto again; |
638 | } | 641 | } |
639 | sbio->spag[sbio->count].flags = flags; | 642 | sbio->spag[sbio->count].flags = flags; |
@@ -645,8 +648,13 @@ again: | |||
645 | memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); | 648 | memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); |
646 | } | 649 | } |
647 | ++sbio->count; | 650 | ++sbio->count; |
648 | if (sbio->count == SCRUB_PAGES_PER_BIO || force) | 651 | if (sbio->count == SCRUB_PAGES_PER_BIO || force) { |
649 | scrub_submit(sdev); | 652 | int ret; |
653 | |||
654 | ret = scrub_submit(sdev); | ||
655 | if (ret) | ||
656 | return ret; | ||
657 | } | ||
650 | 658 | ||
651 | return 0; | 659 | return 0; |
652 | } | 660 | } |
@@ -727,6 +735,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | |||
727 | struct btrfs_root *root = fs_info->extent_root; | 735 | struct btrfs_root *root = fs_info->extent_root; |
728 | struct btrfs_root *csum_root = fs_info->csum_root; | 736 | struct btrfs_root *csum_root = fs_info->csum_root; |
729 | struct btrfs_extent_item *extent; | 737 | struct btrfs_extent_item *extent; |
738 | struct blk_plug plug; | ||
730 | u64 flags; | 739 | u64 flags; |
731 | int ret; | 740 | int ret; |
732 | int slot; | 741 | int slot; |
@@ -789,18 +798,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | |||
789 | 798 | ||
790 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 799 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
791 | if (ret < 0) | 800 | if (ret < 0) |
792 | goto out; | 801 | goto out_noplug; |
793 | |||
794 | l = path->nodes[0]; | ||
795 | slot = path->slots[0]; | ||
796 | btrfs_item_key_to_cpu(l, &key, slot); | ||
797 | if (key.objectid != logical) { | ||
798 | ret = btrfs_previous_item(root, path, 0, | ||
799 | BTRFS_EXTENT_ITEM_KEY); | ||
800 | if (ret < 0) | ||
801 | goto out; | ||
802 | } | ||
803 | 802 | ||
803 | /* | ||
804 | * we might miss half an extent here, but that doesn't matter, | ||
805 | * as it's only the prefetch | ||
806 | */ | ||
804 | while (1) { | 807 | while (1) { |
805 | l = path->nodes[0]; | 808 | l = path->nodes[0]; |
806 | slot = path->slots[0]; | 809 | slot = path->slots[0]; |
@@ -809,7 +812,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | |||
809 | if (ret == 0) | 812 | if (ret == 0) |
810 | continue; | 813 | continue; |
811 | if (ret < 0) | 814 | if (ret < 0) |
812 | goto out; | 815 | goto out_noplug; |
813 | 816 | ||
814 | break; | 817 | break; |
815 | } | 818 | } |
@@ -831,6 +834,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | |||
831 | * the scrub. This might currently (crc32) end up to be about 1MB | 834 | * the scrub. This might currently (crc32) end up to be about 1MB |
832 | */ | 835 | */ |
833 | start_stripe = 0; | 836 | start_stripe = 0; |
837 | blk_start_plug(&plug); | ||
834 | again: | 838 | again: |
835 | logical = base + offset + start_stripe * increment; | 839 | logical = base + offset + start_stripe * increment; |
836 | for (i = start_stripe; i < nstripes; ++i) { | 840 | for (i = start_stripe; i < nstripes; ++i) { |
@@ -890,15 +894,20 @@ again: | |||
890 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 894 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
891 | if (ret < 0) | 895 | if (ret < 0) |
892 | goto out; | 896 | goto out; |
893 | 897 | if (ret > 0) { | |
894 | l = path->nodes[0]; | ||
895 | slot = path->slots[0]; | ||
896 | btrfs_item_key_to_cpu(l, &key, slot); | ||
897 | if (key.objectid != logical) { | ||
898 | ret = btrfs_previous_item(root, path, 0, | 898 | ret = btrfs_previous_item(root, path, 0, |
899 | BTRFS_EXTENT_ITEM_KEY); | 899 | BTRFS_EXTENT_ITEM_KEY); |
900 | if (ret < 0) | 900 | if (ret < 0) |
901 | goto out; | 901 | goto out; |
902 | if (ret > 0) { | ||
903 | /* there's no smaller item, so stick with the | ||
904 | * larger one */ | ||
905 | btrfs_release_path(path); | ||
906 | ret = btrfs_search_slot(NULL, root, &key, | ||
907 | path, 0, 0); | ||
908 | if (ret < 0) | ||
909 | goto out; | ||
910 | } | ||
902 | } | 911 | } |
903 | 912 | ||
904 | while (1) { | 913 | while (1) { |
@@ -972,6 +981,8 @@ next: | |||
972 | scrub_submit(sdev); | 981 | scrub_submit(sdev); |
973 | 982 | ||
974 | out: | 983 | out: |
984 | blk_finish_plug(&plug); | ||
985 | out_noplug: | ||
975 | btrfs_free_path(path); | 986 | btrfs_free_path(path); |
976 | return ret < 0 ? ret : 0; | 987 | return ret < 0 ? ret : 0; |
977 | } | 988 | } |
@@ -1047,8 +1058,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1047 | while (1) { | 1058 | while (1) { |
1048 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 1059 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
1049 | if (ret < 0) | 1060 | if (ret < 0) |
1050 | goto out; | 1061 | break; |
1051 | ret = 0; | 1062 | if (ret > 0) { |
1063 | if (path->slots[0] >= | ||
1064 | btrfs_header_nritems(path->nodes[0])) { | ||
1065 | ret = btrfs_next_leaf(root, path); | ||
1066 | if (ret) | ||
1067 | break; | ||
1068 | } | ||
1069 | } | ||
1052 | 1070 | ||
1053 | l = path->nodes[0]; | 1071 | l = path->nodes[0]; |
1054 | slot = path->slots[0]; | 1072 | slot = path->slots[0]; |
@@ -1058,7 +1076,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1058 | if (found_key.objectid != sdev->dev->devid) | 1076 | if (found_key.objectid != sdev->dev->devid) |
1059 | break; | 1077 | break; |
1060 | 1078 | ||
1061 | if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) | 1079 | if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) |
1062 | break; | 1080 | break; |
1063 | 1081 | ||
1064 | if (found_key.offset >= end) | 1082 | if (found_key.offset >= end) |
@@ -1087,7 +1105,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1087 | cache = btrfs_lookup_block_group(fs_info, chunk_offset); | 1105 | cache = btrfs_lookup_block_group(fs_info, chunk_offset); |
1088 | if (!cache) { | 1106 | if (!cache) { |
1089 | ret = -ENOENT; | 1107 | ret = -ENOENT; |
1090 | goto out; | 1108 | break; |
1091 | } | 1109 | } |
1092 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, | 1110 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, |
1093 | chunk_offset, length); | 1111 | chunk_offset, length); |
@@ -1099,9 +1117,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1099 | btrfs_release_path(path); | 1117 | btrfs_release_path(path); |
1100 | } | 1118 | } |
1101 | 1119 | ||
1102 | out: | ||
1103 | btrfs_free_path(path); | 1120 | btrfs_free_path(path); |
1104 | return ret; | 1121 | |
1122 | /* | ||
1123 | * ret can still be 1 from search_slot or next_leaf, | ||
1124 | * that's not an error | ||
1125 | */ | ||
1126 | return ret < 0 ? ret : 0; | ||
1105 | } | 1127 | } |
1106 | 1128 | ||
1107 | static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) | 1129 | static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) |
@@ -1138,8 +1160,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) | |||
1138 | struct btrfs_fs_info *fs_info = root->fs_info; | 1160 | struct btrfs_fs_info *fs_info = root->fs_info; |
1139 | 1161 | ||
1140 | mutex_lock(&fs_info->scrub_lock); | 1162 | mutex_lock(&fs_info->scrub_lock); |
1141 | if (fs_info->scrub_workers_refcnt == 0) | 1163 | if (fs_info->scrub_workers_refcnt == 0) { |
1164 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", | ||
1165 | fs_info->thread_pool_size, &fs_info->generic_worker); | ||
1166 | fs_info->scrub_workers.idle_thresh = 4; | ||
1142 | btrfs_start_workers(&fs_info->scrub_workers, 1); | 1167 | btrfs_start_workers(&fs_info->scrub_workers, 1); |
1168 | } | ||
1143 | ++fs_info->scrub_workers_refcnt; | 1169 | ++fs_info->scrub_workers_refcnt; |
1144 | mutex_unlock(&fs_info->scrub_lock); | 1170 | mutex_unlock(&fs_info->scrub_lock); |
1145 | 1171 | ||
@@ -1166,7 +1192,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, | |||
1166 | int ret; | 1192 | int ret; |
1167 | struct btrfs_device *dev; | 1193 | struct btrfs_device *dev; |
1168 | 1194 | ||
1169 | if (root->fs_info->closing) | 1195 | if (btrfs_fs_closing(root->fs_info)) |
1170 | return -EINVAL; | 1196 | return -EINVAL; |
1171 | 1197 | ||
1172 | /* | 1198 | /* |