diff options
Diffstat (limited to 'drivers/md/raid5.c')
| -rw-r--r-- | drivers/md/raid5.c | 1059 |
1 files changed, 988 insertions, 71 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 122e64e557b1..9ba73074df04 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -2,8 +2,11 @@ | |||
| 2 | * raid5.c : Multiple Devices driver for Linux | 2 | * raid5.c : Multiple Devices driver for Linux |
| 3 | * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman | 3 | * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman |
| 4 | * Copyright (C) 1999, 2000 Ingo Molnar | 4 | * Copyright (C) 1999, 2000 Ingo Molnar |
| 5 | * Copyright (C) 2002, 2003 H. Peter Anvin | ||
| 5 | * | 6 | * |
| 6 | * RAID-5 management functions. | 7 | * RAID-4/5/6 management functions. |
| 8 | * Thanks to Penguin Computing for making the RAID-6 development possible | ||
| 9 | * by donating a test server! | ||
| 7 | * | 10 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
| @@ -19,11 +22,11 @@ | |||
| 19 | #include <linux/config.h> | 22 | #include <linux/config.h> |
| 20 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 21 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 22 | #include <linux/raid/raid5.h> | ||
| 23 | #include <linux/highmem.h> | 25 | #include <linux/highmem.h> |
| 24 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
| 25 | #include <linux/kthread.h> | 27 | #include <linux/kthread.h> |
| 26 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
| 29 | #include "raid6.h" | ||
| 27 | 30 | ||
| 28 | #include <linux/raid/bitmap.h> | 31 | #include <linux/raid/bitmap.h> |
| 29 | 32 | ||
| @@ -68,6 +71,16 @@ | |||
| 68 | #define __inline__ | 71 | #define __inline__ |
| 69 | #endif | 72 | #endif |
| 70 | 73 | ||
| 74 | #if !RAID6_USE_EMPTY_ZERO_PAGE | ||
| 75 | /* In .bss so it's zeroed */ | ||
| 76 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); | ||
| 77 | #endif | ||
| 78 | |||
| 79 | static inline int raid6_next_disk(int disk, int raid_disks) | ||
| 80 | { | ||
| 81 | disk++; | ||
| 82 | return (disk < raid_disks) ? disk : 0; | ||
| 83 | } | ||
| 71 | static void print_raid5_conf (raid5_conf_t *conf); | 84 | static void print_raid5_conf (raid5_conf_t *conf); |
| 72 | 85 | ||
| 73 | static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) | 86 | static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) |
| @@ -104,7 +117,7 @@ static void release_stripe(struct stripe_head *sh) | |||
| 104 | { | 117 | { |
| 105 | raid5_conf_t *conf = sh->raid_conf; | 118 | raid5_conf_t *conf = sh->raid_conf; |
| 106 | unsigned long flags; | 119 | unsigned long flags; |
| 107 | 120 | ||
| 108 | spin_lock_irqsave(&conf->device_lock, flags); | 121 | spin_lock_irqsave(&conf->device_lock, flags); |
| 109 | __release_stripe(conf, sh); | 122 | __release_stripe(conf, sh); |
| 110 | spin_unlock_irqrestore(&conf->device_lock, flags); | 123 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| @@ -117,7 +130,7 @@ static inline void remove_hash(struct stripe_head *sh) | |||
| 117 | hlist_del_init(&sh->hash); | 130 | hlist_del_init(&sh->hash); |
| 118 | } | 131 | } |
| 119 | 132 | ||
| 120 | static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) | 133 | static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) |
| 121 | { | 134 | { |
| 122 | struct hlist_head *hp = stripe_hash(conf, sh->sector); | 135 | struct hlist_head *hp = stripe_hash(conf, sh->sector); |
| 123 | 136 | ||
| @@ -190,7 +203,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int | |||
| 190 | (unsigned long long)sh->sector); | 203 | (unsigned long long)sh->sector); |
| 191 | 204 | ||
| 192 | remove_hash(sh); | 205 | remove_hash(sh); |
| 193 | 206 | ||
| 194 | sh->sector = sector; | 207 | sh->sector = sector; |
| 195 | sh->pd_idx = pd_idx; | 208 | sh->pd_idx = pd_idx; |
| 196 | sh->state = 0; | 209 | sh->state = 0; |
| @@ -269,8 +282,9 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector | |||
| 269 | } else { | 282 | } else { |
| 270 | if (!test_bit(STRIPE_HANDLE, &sh->state)) | 283 | if (!test_bit(STRIPE_HANDLE, &sh->state)) |
| 271 | atomic_inc(&conf->active_stripes); | 284 | atomic_inc(&conf->active_stripes); |
| 272 | if (!list_empty(&sh->lru)) | 285 | if (list_empty(&sh->lru)) |
| 273 | list_del_init(&sh->lru); | 286 | BUG(); |
| 287 | list_del_init(&sh->lru); | ||
| 274 | } | 288 | } |
| 275 | } | 289 | } |
| 276 | } while (sh == NULL); | 290 | } while (sh == NULL); |
| @@ -321,10 +335,9 @@ static int grow_stripes(raid5_conf_t *conf, int num) | |||
| 321 | return 1; | 335 | return 1; |
| 322 | conf->slab_cache = sc; | 336 | conf->slab_cache = sc; |
| 323 | conf->pool_size = devs; | 337 | conf->pool_size = devs; |
| 324 | while (num--) { | 338 | while (num--) |
| 325 | if (!grow_one_stripe(conf)) | 339 | if (!grow_one_stripe(conf)) |
| 326 | return 1; | 340 | return 1; |
| 327 | } | ||
| 328 | return 0; | 341 | return 0; |
| 329 | } | 342 | } |
| 330 | 343 | ||
| @@ -631,8 +644,7 @@ static void raid5_build_block (struct stripe_head *sh, int i) | |||
| 631 | dev->req.bi_private = sh; | 644 | dev->req.bi_private = sh; |
| 632 | 645 | ||
| 633 | dev->flags = 0; | 646 | dev->flags = 0; |
| 634 | if (i != sh->pd_idx) | 647 | dev->sector = compute_blocknr(sh, i); |
| 635 | dev->sector = compute_blocknr(sh, i); | ||
| 636 | } | 648 | } |
| 637 | 649 | ||
| 638 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 650 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) |
| @@ -659,7 +671,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 659 | " Operation continuing on %d devices\n", | 671 | " Operation continuing on %d devices\n", |
| 660 | bdevname(rdev->bdev,b), conf->working_disks); | 672 | bdevname(rdev->bdev,b), conf->working_disks); |
| 661 | } | 673 | } |
| 662 | } | 674 | } |
| 663 | 675 | ||
| 664 | /* | 676 | /* |
| 665 | * Input: a 'big' sector number, | 677 | * Input: a 'big' sector number, |
| @@ -697,9 +709,12 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, | |||
| 697 | /* | 709 | /* |
| 698 | * Select the parity disk based on the user selected algorithm. | 710 | * Select the parity disk based on the user selected algorithm. |
| 699 | */ | 711 | */ |
| 700 | if (conf->level == 4) | 712 | switch(conf->level) { |
| 713 | case 4: | ||
| 701 | *pd_idx = data_disks; | 714 | *pd_idx = data_disks; |
| 702 | else switch (conf->algorithm) { | 715 | break; |
| 716 | case 5: | ||
| 717 | switch (conf->algorithm) { | ||
| 703 | case ALGORITHM_LEFT_ASYMMETRIC: | 718 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 704 | *pd_idx = data_disks - stripe % raid_disks; | 719 | *pd_idx = data_disks - stripe % raid_disks; |
| 705 | if (*dd_idx >= *pd_idx) | 720 | if (*dd_idx >= *pd_idx) |
| @@ -721,6 +736,39 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, | |||
| 721 | default: | 736 | default: |
| 722 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", | 737 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", |
| 723 | conf->algorithm); | 738 | conf->algorithm); |
| 739 | } | ||
| 740 | break; | ||
| 741 | case 6: | ||
| 742 | |||
| 743 | /**** FIX THIS ****/ | ||
| 744 | switch (conf->algorithm) { | ||
| 745 | case ALGORITHM_LEFT_ASYMMETRIC: | ||
| 746 | *pd_idx = raid_disks - 1 - (stripe % raid_disks); | ||
| 747 | if (*pd_idx == raid_disks-1) | ||
| 748 | (*dd_idx)++; /* Q D D D P */ | ||
| 749 | else if (*dd_idx >= *pd_idx) | ||
| 750 | (*dd_idx) += 2; /* D D P Q D */ | ||
| 751 | break; | ||
| 752 | case ALGORITHM_RIGHT_ASYMMETRIC: | ||
| 753 | *pd_idx = stripe % raid_disks; | ||
| 754 | if (*pd_idx == raid_disks-1) | ||
| 755 | (*dd_idx)++; /* Q D D D P */ | ||
| 756 | else if (*dd_idx >= *pd_idx) | ||
| 757 | (*dd_idx) += 2; /* D D P Q D */ | ||
| 758 | break; | ||
| 759 | case ALGORITHM_LEFT_SYMMETRIC: | ||
| 760 | *pd_idx = raid_disks - 1 - (stripe % raid_disks); | ||
| 761 | *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; | ||
| 762 | break; | ||
| 763 | case ALGORITHM_RIGHT_SYMMETRIC: | ||
| 764 | *pd_idx = stripe % raid_disks; | ||
| 765 | *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; | ||
| 766 | break; | ||
| 767 | default: | ||
| 768 | printk (KERN_CRIT "raid6: unsupported algorithm %d\n", | ||
| 769 | conf->algorithm); | ||
| 770 | } | ||
| 771 | break; | ||
| 724 | } | 772 | } |
| 725 | 773 | ||
| 726 | /* | 774 | /* |
| @@ -742,12 +790,17 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) | |||
| 742 | int chunk_number, dummy1, dummy2, dd_idx = i; | 790 | int chunk_number, dummy1, dummy2, dd_idx = i; |
| 743 | sector_t r_sector; | 791 | sector_t r_sector; |
| 744 | 792 | ||
| 793 | |||
| 745 | chunk_offset = sector_div(new_sector, sectors_per_chunk); | 794 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
| 746 | stripe = new_sector; | 795 | stripe = new_sector; |
| 747 | BUG_ON(new_sector != stripe); | 796 | BUG_ON(new_sector != stripe); |
| 748 | 797 | ||
| 749 | 798 | if (i == sh->pd_idx) | |
| 750 | switch (conf->algorithm) { | 799 | return 0; |
| 800 | switch(conf->level) { | ||
| 801 | case 4: break; | ||
| 802 | case 5: | ||
| 803 | switch (conf->algorithm) { | ||
| 751 | case ALGORITHM_LEFT_ASYMMETRIC: | 804 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 752 | case ALGORITHM_RIGHT_ASYMMETRIC: | 805 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 753 | if (i > sh->pd_idx) | 806 | if (i > sh->pd_idx) |
| @@ -761,7 +814,37 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) | |||
| 761 | break; | 814 | break; |
| 762 | default: | 815 | default: |
| 763 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", | 816 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", |
| 817 | conf->algorithm); | ||
| 818 | } | ||
| 819 | break; | ||
| 820 | case 6: | ||
| 821 | data_disks = raid_disks - 2; | ||
| 822 | if (i == raid6_next_disk(sh->pd_idx, raid_disks)) | ||
| 823 | return 0; /* It is the Q disk */ | ||
| 824 | switch (conf->algorithm) { | ||
| 825 | case ALGORITHM_LEFT_ASYMMETRIC: | ||
| 826 | case ALGORITHM_RIGHT_ASYMMETRIC: | ||
| 827 | if (sh->pd_idx == raid_disks-1) | ||
| 828 | i--; /* Q D D D P */ | ||
| 829 | else if (i > sh->pd_idx) | ||
| 830 | i -= 2; /* D D P Q D */ | ||
| 831 | break; | ||
| 832 | case ALGORITHM_LEFT_SYMMETRIC: | ||
| 833 | case ALGORITHM_RIGHT_SYMMETRIC: | ||
| 834 | if (sh->pd_idx == raid_disks-1) | ||
| 835 | i--; /* Q D D D P */ | ||
| 836 | else { | ||
| 837 | /* D D P Q D */ | ||
| 838 | if (i < sh->pd_idx) | ||
| 839 | i += raid_disks; | ||
| 840 | i -= (sh->pd_idx + 2); | ||
| 841 | } | ||
| 842 | break; | ||
| 843 | default: | ||
| 844 | printk (KERN_CRIT "raid6: unsupported algorithm %d\n", | ||
| 764 | conf->algorithm); | 845 | conf->algorithm); |
| 846 | } | ||
| 847 | break; | ||
| 765 | } | 848 | } |
| 766 | 849 | ||
| 767 | chunk_number = stripe * data_disks + i; | 850 | chunk_number = stripe * data_disks + i; |
| @@ -778,10 +861,11 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) | |||
| 778 | 861 | ||
| 779 | 862 | ||
| 780 | /* | 863 | /* |
| 781 | * Copy data between a page in the stripe cache, and a bio. | 864 | * Copy data between a page in the stripe cache, and one or more bion |
| 782 | * There are no alignment or size guarantees between the page or the | 865 | * The page could align with the middle of the bio, or there could be |
| 783 | * bio except that there is some overlap. | 866 | * several bion, each with several bio_vecs, which cover part of the page |
| 784 | * All iovecs in the bio must be considered. | 867 | * Multiple bion are linked together on bi_next. There may be extras |
| 868 | * at the end of this list. We ignore them. | ||
| 785 | */ | 869 | */ |
| 786 | static void copy_data(int frombio, struct bio *bio, | 870 | static void copy_data(int frombio, struct bio *bio, |
| 787 | struct page *page, | 871 | struct page *page, |
| @@ -810,7 +894,7 @@ static void copy_data(int frombio, struct bio *bio, | |||
| 810 | if (len > 0 && page_offset + len > STRIPE_SIZE) | 894 | if (len > 0 && page_offset + len > STRIPE_SIZE) |
| 811 | clen = STRIPE_SIZE - page_offset; | 895 | clen = STRIPE_SIZE - page_offset; |
| 812 | else clen = len; | 896 | else clen = len; |
| 813 | 897 | ||
| 814 | if (clen > 0) { | 898 | if (clen > 0) { |
| 815 | char *ba = __bio_kmap_atomic(bio, i, KM_USER0); | 899 | char *ba = __bio_kmap_atomic(bio, i, KM_USER0); |
| 816 | if (frombio) | 900 | if (frombio) |
| @@ -862,14 +946,14 @@ static void compute_block(struct stripe_head *sh, int dd_idx) | |||
| 862 | set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); | 946 | set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); |
| 863 | } | 947 | } |
| 864 | 948 | ||
| 865 | static void compute_parity(struct stripe_head *sh, int method) | 949 | static void compute_parity5(struct stripe_head *sh, int method) |
| 866 | { | 950 | { |
| 867 | raid5_conf_t *conf = sh->raid_conf; | 951 | raid5_conf_t *conf = sh->raid_conf; |
| 868 | int i, pd_idx = sh->pd_idx, disks = sh->disks, count; | 952 | int i, pd_idx = sh->pd_idx, disks = sh->disks, count; |
| 869 | void *ptr[MAX_XOR_BLOCKS]; | 953 | void *ptr[MAX_XOR_BLOCKS]; |
| 870 | struct bio *chosen; | 954 | struct bio *chosen; |
| 871 | 955 | ||
| 872 | PRINTK("compute_parity, stripe %llu, method %d\n", | 956 | PRINTK("compute_parity5, stripe %llu, method %d\n", |
| 873 | (unsigned long long)sh->sector, method); | 957 | (unsigned long long)sh->sector, method); |
| 874 | 958 | ||
| 875 | count = 1; | 959 | count = 1; |
| @@ -956,9 +1040,195 @@ static void compute_parity(struct stripe_head *sh, int method) | |||
| 956 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); | 1040 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
| 957 | } | 1041 | } |
| 958 | 1042 | ||
| 1043 | static void compute_parity6(struct stripe_head *sh, int method) | ||
| 1044 | { | ||
| 1045 | raid6_conf_t *conf = sh->raid_conf; | ||
| 1046 | int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count; | ||
| 1047 | struct bio *chosen; | ||
| 1048 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ | ||
| 1049 | void *ptrs[disks]; | ||
| 1050 | |||
| 1051 | qd_idx = raid6_next_disk(pd_idx, disks); | ||
| 1052 | d0_idx = raid6_next_disk(qd_idx, disks); | ||
| 1053 | |||
| 1054 | PRINTK("compute_parity, stripe %llu, method %d\n", | ||
| 1055 | (unsigned long long)sh->sector, method); | ||
| 1056 | |||
| 1057 | switch(method) { | ||
| 1058 | case READ_MODIFY_WRITE: | ||
| 1059 | BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ | ||
| 1060 | case RECONSTRUCT_WRITE: | ||
| 1061 | for (i= disks; i-- ;) | ||
| 1062 | if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { | ||
| 1063 | chosen = sh->dev[i].towrite; | ||
| 1064 | sh->dev[i].towrite = NULL; | ||
| 1065 | |||
| 1066 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | ||
| 1067 | wake_up(&conf->wait_for_overlap); | ||
| 1068 | |||
| 1069 | if (sh->dev[i].written) BUG(); | ||
| 1070 | sh->dev[i].written = chosen; | ||
| 1071 | } | ||
| 1072 | break; | ||
| 1073 | case CHECK_PARITY: | ||
| 1074 | BUG(); /* Not implemented yet */ | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | for (i = disks; i--;) | ||
| 1078 | if (sh->dev[i].written) { | ||
| 1079 | sector_t sector = sh->dev[i].sector; | ||
| 1080 | struct bio *wbi = sh->dev[i].written; | ||
| 1081 | while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { | ||
| 1082 | copy_data(1, wbi, sh->dev[i].page, sector); | ||
| 1083 | wbi = r5_next_bio(wbi, sector); | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | set_bit(R5_LOCKED, &sh->dev[i].flags); | ||
| 1087 | set_bit(R5_UPTODATE, &sh->dev[i].flags); | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | // switch(method) { | ||
| 1091 | // case RECONSTRUCT_WRITE: | ||
| 1092 | // case CHECK_PARITY: | ||
| 1093 | // case UPDATE_PARITY: | ||
| 1094 | /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ | ||
| 1095 | /* FIX: Is this ordering of drives even remotely optimal? */ | ||
| 1096 | count = 0; | ||
| 1097 | i = d0_idx; | ||
| 1098 | do { | ||
| 1099 | ptrs[count++] = page_address(sh->dev[i].page); | ||
| 1100 | if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) | ||
| 1101 | printk("block %d/%d not uptodate on parity calc\n", i,count); | ||
| 1102 | i = raid6_next_disk(i, disks); | ||
| 1103 | } while ( i != d0_idx ); | ||
| 1104 | // break; | ||
| 1105 | // } | ||
| 1106 | |||
| 1107 | raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); | ||
| 1108 | |||
| 1109 | switch(method) { | ||
| 1110 | case RECONSTRUCT_WRITE: | ||
| 1111 | set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); | ||
| 1112 | set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); | ||
| 1113 | set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); | ||
| 1114 | set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); | ||
| 1115 | break; | ||
| 1116 | case UPDATE_PARITY: | ||
| 1117 | set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); | ||
| 1118 | set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); | ||
| 1119 | break; | ||
| 1120 | } | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | |||
| 1124 | /* Compute one missing block */ | ||
| 1125 | static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) | ||
| 1126 | { | ||
| 1127 | raid6_conf_t *conf = sh->raid_conf; | ||
| 1128 | int i, count, disks = conf->raid_disks; | ||
| 1129 | void *ptr[MAX_XOR_BLOCKS], *p; | ||
| 1130 | int pd_idx = sh->pd_idx; | ||
| 1131 | int qd_idx = raid6_next_disk(pd_idx, disks); | ||
| 1132 | |||
| 1133 | PRINTK("compute_block_1, stripe %llu, idx %d\n", | ||
| 1134 | (unsigned long long)sh->sector, dd_idx); | ||
| 1135 | |||
| 1136 | if ( dd_idx == qd_idx ) { | ||
| 1137 | /* We're actually computing the Q drive */ | ||
| 1138 | compute_parity6(sh, UPDATE_PARITY); | ||
| 1139 | } else { | ||
| 1140 | ptr[0] = page_address(sh->dev[dd_idx].page); | ||
| 1141 | if (!nozero) memset(ptr[0], 0, STRIPE_SIZE); | ||
| 1142 | count = 1; | ||
| 1143 | for (i = disks ; i--; ) { | ||
| 1144 | if (i == dd_idx || i == qd_idx) | ||
| 1145 | continue; | ||
| 1146 | p = page_address(sh->dev[i].page); | ||
| 1147 | if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) | ||
| 1148 | ptr[count++] = p; | ||
| 1149 | else | ||
| 1150 | printk("compute_block() %d, stripe %llu, %d" | ||
| 1151 | " not present\n", dd_idx, | ||
| 1152 | (unsigned long long)sh->sector, i); | ||
| 1153 | |||
| 1154 | check_xor(); | ||
| 1155 | } | ||
| 1156 | if (count != 1) | ||
| 1157 | xor_block(count, STRIPE_SIZE, ptr); | ||
| 1158 | if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); | ||
| 1159 | else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); | ||
| 1160 | } | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | /* Compute two missing blocks */ | ||
| 1164 | static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) | ||
| 1165 | { | ||
| 1166 | raid6_conf_t *conf = sh->raid_conf; | ||
| 1167 | int i, count, disks = conf->raid_disks; | ||
| 1168 | int pd_idx = sh->pd_idx; | ||
| 1169 | int qd_idx = raid6_next_disk(pd_idx, disks); | ||
| 1170 | int d0_idx = raid6_next_disk(qd_idx, disks); | ||
| 1171 | int faila, failb; | ||
| 1172 | |||
| 1173 | /* faila and failb are disk numbers relative to d0_idx */ | ||
| 1174 | /* pd_idx become disks-2 and qd_idx become disks-1 */ | ||
| 1175 | faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; | ||
| 1176 | failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; | ||
| 1177 | |||
| 1178 | BUG_ON(faila == failb); | ||
| 1179 | if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } | ||
| 1180 | |||
| 1181 | PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", | ||
| 1182 | (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); | ||
| 1183 | |||
| 1184 | if ( failb == disks-1 ) { | ||
| 1185 | /* Q disk is one of the missing disks */ | ||
| 1186 | if ( faila == disks-2 ) { | ||
| 1187 | /* Missing P+Q, just recompute */ | ||
| 1188 | compute_parity6(sh, UPDATE_PARITY); | ||
| 1189 | return; | ||
| 1190 | } else { | ||
| 1191 | /* We're missing D+Q; recompute D from P */ | ||
| 1192 | compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); | ||
| 1193 | compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ | ||
| 1194 | return; | ||
| 1195 | } | ||
| 1196 | } | ||
| 1197 | |||
| 1198 | /* We're missing D+P or D+D; build pointer table */ | ||
| 1199 | { | ||
| 1200 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ | ||
| 1201 | void *ptrs[disks]; | ||
| 1202 | |||
| 1203 | count = 0; | ||
| 1204 | i = d0_idx; | ||
| 1205 | do { | ||
| 1206 | ptrs[count++] = page_address(sh->dev[i].page); | ||
| 1207 | i = raid6_next_disk(i, disks); | ||
| 1208 | if (i != dd_idx1 && i != dd_idx2 && | ||
| 1209 | !test_bit(R5_UPTODATE, &sh->dev[i].flags)) | ||
| 1210 | printk("compute_2 with missing block %d/%d\n", count, i); | ||
| 1211 | } while ( i != d0_idx ); | ||
| 1212 | |||
| 1213 | if ( failb == disks-2 ) { | ||
| 1214 | /* We're missing D+P. */ | ||
| 1215 | raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); | ||
| 1216 | } else { | ||
| 1217 | /* We're missing D+D. */ | ||
| 1218 | raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | /* Both the above update both missing blocks */ | ||
| 1222 | set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); | ||
| 1223 | set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); | ||
| 1224 | } | ||
| 1225 | } | ||
| 1226 | |||
| 1227 | |||
| 1228 | |||
| 959 | /* | 1229 | /* |
| 960 | * Each stripe/dev can have one or more bion attached. | 1230 | * Each stripe/dev can have one or more bion attached. |
| 961 | * toread/towrite point to the first in a chain. | 1231 | * toread/towrite point to the first in a chain. |
| 962 | * The bi_next chain must be in order. | 1232 | * The bi_next chain must be in order. |
| 963 | */ | 1233 | */ |
| 964 | static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) | 1234 | static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) |
| @@ -1031,6 +1301,13 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
| 1031 | 1301 | ||
| 1032 | static void end_reshape(raid5_conf_t *conf); | 1302 | static void end_reshape(raid5_conf_t *conf); |
| 1033 | 1303 | ||
| 1304 | static int page_is_zero(struct page *p) | ||
| 1305 | { | ||
| 1306 | char *a = page_address(p); | ||
| 1307 | return ((*(u32*)a) == 0 && | ||
| 1308 | memcmp(a, a+4, STRIPE_SIZE-4)==0); | ||
| 1309 | } | ||
| 1310 | |||
| 1034 | static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) | 1311 | static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) |
| 1035 | { | 1312 | { |
| 1036 | int sectors_per_chunk = conf->chunk_size >> 9; | 1313 | int sectors_per_chunk = conf->chunk_size >> 9; |
| @@ -1062,7 +1339,7 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) | |||
| 1062 | * | 1339 | * |
| 1063 | */ | 1340 | */ |
| 1064 | 1341 | ||
| 1065 | static void handle_stripe(struct stripe_head *sh) | 1342 | static void handle_stripe5(struct stripe_head *sh) |
| 1066 | { | 1343 | { |
| 1067 | raid5_conf_t *conf = sh->raid_conf; | 1344 | raid5_conf_t *conf = sh->raid_conf; |
| 1068 | int disks = sh->disks; | 1345 | int disks = sh->disks; |
| @@ -1394,7 +1671,7 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 1394 | if (locked == 0 && (rcw == 0 ||rmw == 0) && | 1671 | if (locked == 0 && (rcw == 0 ||rmw == 0) && |
| 1395 | !test_bit(STRIPE_BIT_DELAY, &sh->state)) { | 1672 | !test_bit(STRIPE_BIT_DELAY, &sh->state)) { |
| 1396 | PRINTK("Computing parity...\n"); | 1673 | PRINTK("Computing parity...\n"); |
| 1397 | compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); | 1674 | compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); |
| 1398 | /* now every locked buffer is ready to be written */ | 1675 | /* now every locked buffer is ready to be written */ |
| 1399 | for (i=disks; i--;) | 1676 | for (i=disks; i--;) |
| 1400 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { | 1677 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { |
| @@ -1421,13 +1698,10 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 1421 | !test_bit(STRIPE_INSYNC, &sh->state)) { | 1698 | !test_bit(STRIPE_INSYNC, &sh->state)) { |
| 1422 | set_bit(STRIPE_HANDLE, &sh->state); | 1699 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1423 | if (failed == 0) { | 1700 | if (failed == 0) { |
| 1424 | char *pagea; | ||
| 1425 | BUG_ON(uptodate != disks); | 1701 | BUG_ON(uptodate != disks); |
| 1426 | compute_parity(sh, CHECK_PARITY); | 1702 | compute_parity5(sh, CHECK_PARITY); |
| 1427 | uptodate--; | 1703 | uptodate--; |
| 1428 | pagea = page_address(sh->dev[sh->pd_idx].page); | 1704 | if (page_is_zero(sh->dev[sh->pd_idx].page)) { |
| 1429 | if ((*(u32*)pagea) == 0 && | ||
| 1430 | !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) { | ||
| 1431 | /* parity is correct (on disc, not in buffer any more) */ | 1705 | /* parity is correct (on disc, not in buffer any more) */ |
| 1432 | set_bit(STRIPE_INSYNC, &sh->state); | 1706 | set_bit(STRIPE_INSYNC, &sh->state); |
| 1433 | } else { | 1707 | } else { |
| @@ -1487,7 +1761,7 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 1487 | /* Need to write out all blocks after computing parity */ | 1761 | /* Need to write out all blocks after computing parity */ |
| 1488 | sh->disks = conf->raid_disks; | 1762 | sh->disks = conf->raid_disks; |
| 1489 | sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); | 1763 | sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); |
| 1490 | compute_parity(sh, RECONSTRUCT_WRITE); | 1764 | compute_parity5(sh, RECONSTRUCT_WRITE); |
| 1491 | for (i= conf->raid_disks; i--;) { | 1765 | for (i= conf->raid_disks; i--;) { |
| 1492 | set_bit(R5_LOCKED, &sh->dev[i].flags); | 1766 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
| 1493 | locked++; | 1767 | locked++; |
| @@ -1615,6 +1889,569 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 1615 | } | 1889 | } |
| 1616 | } | 1890 | } |
| 1617 | 1891 | ||
| 1892 | static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | ||
| 1893 | { | ||
| 1894 | raid6_conf_t *conf = sh->raid_conf; | ||
| 1895 | int disks = conf->raid_disks; | ||
| 1896 | struct bio *return_bi= NULL; | ||
| 1897 | struct bio *bi; | ||
| 1898 | int i; | ||
| 1899 | int syncing; | ||
| 1900 | int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; | ||
| 1901 | int non_overwrite = 0; | ||
| 1902 | int failed_num[2] = {0, 0}; | ||
| 1903 | struct r5dev *dev, *pdev, *qdev; | ||
| 1904 | int pd_idx = sh->pd_idx; | ||
| 1905 | int qd_idx = raid6_next_disk(pd_idx, disks); | ||
| 1906 | int p_failed, q_failed; | ||
| 1907 | |||
| 1908 | PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n", | ||
| 1909 | (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count), | ||
| 1910 | pd_idx, qd_idx); | ||
| 1911 | |||
| 1912 | spin_lock(&sh->lock); | ||
| 1913 | clear_bit(STRIPE_HANDLE, &sh->state); | ||
| 1914 | clear_bit(STRIPE_DELAYED, &sh->state); | ||
| 1915 | |||
| 1916 | syncing = test_bit(STRIPE_SYNCING, &sh->state); | ||
| 1917 | /* Now to look around and see what can be done */ | ||
| 1918 | |||
| 1919 | rcu_read_lock(); | ||
| 1920 | for (i=disks; i--; ) { | ||
| 1921 | mdk_rdev_t *rdev; | ||
| 1922 | dev = &sh->dev[i]; | ||
| 1923 | clear_bit(R5_Insync, &dev->flags); | ||
| 1924 | |||
| 1925 | PRINTK("check %d: state 0x%lx read %p write %p written %p\n", | ||
| 1926 | i, dev->flags, dev->toread, dev->towrite, dev->written); | ||
| 1927 | /* maybe we can reply to a read */ | ||
| 1928 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { | ||
| 1929 | struct bio *rbi, *rbi2; | ||
| 1930 | PRINTK("Return read for disc %d\n", i); | ||
| 1931 | spin_lock_irq(&conf->device_lock); | ||
| 1932 | rbi = dev->toread; | ||
| 1933 | dev->toread = NULL; | ||
| 1934 | if (test_and_clear_bit(R5_Overlap, &dev->flags)) | ||
| 1935 | wake_up(&conf->wait_for_overlap); | ||
| 1936 | spin_unlock_irq(&conf->device_lock); | ||
| 1937 | while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { | ||
| 1938 | copy_data(0, rbi, dev->page, dev->sector); | ||
| 1939 | rbi2 = r5_next_bio(rbi, dev->sector); | ||
| 1940 | spin_lock_irq(&conf->device_lock); | ||
| 1941 | if (--rbi->bi_phys_segments == 0) { | ||
| 1942 | rbi->bi_next = return_bi; | ||
| 1943 | return_bi = rbi; | ||
| 1944 | } | ||
| 1945 | spin_unlock_irq(&conf->device_lock); | ||
| 1946 | rbi = rbi2; | ||
| 1947 | } | ||
| 1948 | } | ||
| 1949 | |||
| 1950 | /* now count some things */ | ||
| 1951 | if (test_bit(R5_LOCKED, &dev->flags)) locked++; | ||
| 1952 | if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; | ||
| 1953 | |||
| 1954 | |||
| 1955 | if (dev->toread) to_read++; | ||
| 1956 | if (dev->towrite) { | ||
| 1957 | to_write++; | ||
| 1958 | if (!test_bit(R5_OVERWRITE, &dev->flags)) | ||
| 1959 | non_overwrite++; | ||
| 1960 | } | ||
| 1961 | if (dev->written) written++; | ||
| 1962 | rdev = rcu_dereference(conf->disks[i].rdev); | ||
| 1963 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { | ||
| 1964 | /* The ReadError flag will just be confusing now */ | ||
| 1965 | clear_bit(R5_ReadError, &dev->flags); | ||
| 1966 | clear_bit(R5_ReWrite, &dev->flags); | ||
| 1967 | } | ||
| 1968 | if (!rdev || !test_bit(In_sync, &rdev->flags) | ||
| 1969 | || test_bit(R5_ReadError, &dev->flags)) { | ||
| 1970 | if ( failed < 2 ) | ||
| 1971 | failed_num[failed] = i; | ||
| 1972 | failed++; | ||
| 1973 | } else | ||
| 1974 | set_bit(R5_Insync, &dev->flags); | ||
| 1975 | } | ||
| 1976 | rcu_read_unlock(); | ||
| 1977 | PRINTK("locked=%d uptodate=%d to_read=%d" | ||
| 1978 | " to_write=%d failed=%d failed_num=%d,%d\n", | ||
| 1979 | locked, uptodate, to_read, to_write, failed, | ||
| 1980 | failed_num[0], failed_num[1]); | ||
| 1981 | /* check if the array has lost >2 devices and, if so, some requests might | ||
| 1982 | * need to be failed | ||
| 1983 | */ | ||
| 1984 | if (failed > 2 && to_read+to_write+written) { | ||
| 1985 | for (i=disks; i--; ) { | ||
| 1986 | int bitmap_end = 0; | ||
| 1987 | |||
| 1988 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { | ||
| 1989 | mdk_rdev_t *rdev; | ||
| 1990 | rcu_read_lock(); | ||
| 1991 | rdev = rcu_dereference(conf->disks[i].rdev); | ||
| 1992 | if (rdev && test_bit(In_sync, &rdev->flags)) | ||
| 1993 | /* multiple read failures in one stripe */ | ||
| 1994 | md_error(conf->mddev, rdev); | ||
| 1995 | rcu_read_unlock(); | ||
| 1996 | } | ||
| 1997 | |||
| 1998 | spin_lock_irq(&conf->device_lock); | ||
| 1999 | /* fail all writes first */ | ||
| 2000 | bi = sh->dev[i].towrite; | ||
| 2001 | sh->dev[i].towrite = NULL; | ||
| 2002 | if (bi) { to_write--; bitmap_end = 1; } | ||
| 2003 | |||
| 2004 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | ||
| 2005 | wake_up(&conf->wait_for_overlap); | ||
| 2006 | |||
| 2007 | while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ | ||
| 2008 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); | ||
| 2009 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | ||
| 2010 | if (--bi->bi_phys_segments == 0) { | ||
| 2011 | md_write_end(conf->mddev); | ||
| 2012 | bi->bi_next = return_bi; | ||
| 2013 | return_bi = bi; | ||
| 2014 | } | ||
| 2015 | bi = nextbi; | ||
| 2016 | } | ||
| 2017 | /* and fail all 'written' */ | ||
| 2018 | bi = sh->dev[i].written; | ||
| 2019 | sh->dev[i].written = NULL; | ||
| 2020 | if (bi) bitmap_end = 1; | ||
| 2021 | while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { | ||
| 2022 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); | ||
| 2023 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | ||
| 2024 | if (--bi->bi_phys_segments == 0) { | ||
| 2025 | md_write_end(conf->mddev); | ||
| 2026 | bi->bi_next = return_bi; | ||
| 2027 | return_bi = bi; | ||
| 2028 | } | ||
| 2029 | bi = bi2; | ||
| 2030 | } | ||
| 2031 | |||
| 2032 | /* fail any reads if this device is non-operational */ | ||
| 2033 | if (!test_bit(R5_Insync, &sh->dev[i].flags) || | ||
| 2034 | test_bit(R5_ReadError, &sh->dev[i].flags)) { | ||
| 2035 | bi = sh->dev[i].toread; | ||
| 2036 | sh->dev[i].toread = NULL; | ||
| 2037 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | ||
| 2038 | wake_up(&conf->wait_for_overlap); | ||
| 2039 | if (bi) to_read--; | ||
| 2040 | while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ | ||
| 2041 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); | ||
| 2042 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | ||
| 2043 | if (--bi->bi_phys_segments == 0) { | ||
| 2044 | bi->bi_next = return_bi; | ||
| 2045 | return_bi = bi; | ||
| 2046 | } | ||
| 2047 | bi = nextbi; | ||
| 2048 | } | ||
| 2049 | } | ||
| 2050 | spin_unlock_irq(&conf->device_lock); | ||
| 2051 | if (bitmap_end) | ||
| 2052 | bitmap_endwrite(conf->mddev->bitmap, sh->sector, | ||
| 2053 | STRIPE_SECTORS, 0, 0); | ||
| 2054 | } | ||
| 2055 | } | ||
| 2056 | if (failed > 2 && syncing) { | ||
| 2057 | md_done_sync(conf->mddev, STRIPE_SECTORS,0); | ||
| 2058 | clear_bit(STRIPE_SYNCING, &sh->state); | ||
| 2059 | syncing = 0; | ||
| 2060 | } | ||
| 2061 | |||
| 2062 | /* | ||
| 2063 | * might be able to return some write requests if the parity blocks | ||
| 2064 | * are safe, or on a failed drive | ||
| 2065 | */ | ||
| 2066 | pdev = &sh->dev[pd_idx]; | ||
| 2067 | p_failed = (failed >= 1 && failed_num[0] == pd_idx) | ||
| 2068 | || (failed >= 2 && failed_num[1] == pd_idx); | ||
| 2069 | qdev = &sh->dev[qd_idx]; | ||
| 2070 | q_failed = (failed >= 1 && failed_num[0] == qd_idx) | ||
| 2071 | || (failed >= 2 && failed_num[1] == qd_idx); | ||
| 2072 | |||
| 2073 | if ( written && | ||
| 2074 | ( p_failed || ((test_bit(R5_Insync, &pdev->flags) | ||
| 2075 | && !test_bit(R5_LOCKED, &pdev->flags) | ||
| 2076 | && test_bit(R5_UPTODATE, &pdev->flags))) ) && | ||
| 2077 | ( q_failed || ((test_bit(R5_Insync, &qdev->flags) | ||
| 2078 | && !test_bit(R5_LOCKED, &qdev->flags) | ||
| 2079 | && test_bit(R5_UPTODATE, &qdev->flags))) ) ) { | ||
| 2080 | /* any written block on an uptodate or failed drive can be | ||
| 2081 | * returned. Note that if we 'wrote' to a failed drive, | ||
| 2082 | * it will be UPTODATE, but never LOCKED, so we don't need | ||
| 2083 | * to test 'failed' directly. | ||
| 2084 | */ | ||
| 2085 | for (i=disks; i--; ) | ||
| 2086 | if (sh->dev[i].written) { | ||
| 2087 | dev = &sh->dev[i]; | ||
| 2088 | if (!test_bit(R5_LOCKED, &dev->flags) && | ||
| 2089 | test_bit(R5_UPTODATE, &dev->flags) ) { | ||
| 2090 | /* We can return any write requests */ | ||
| 2091 | int bitmap_end = 0; | ||
| 2092 | struct bio *wbi, *wbi2; | ||
| 2093 | PRINTK("Return write for stripe %llu disc %d\n", | ||
| 2094 | (unsigned long long)sh->sector, i); | ||
| 2095 | spin_lock_irq(&conf->device_lock); | ||
| 2096 | wbi = dev->written; | ||
| 2097 | dev->written = NULL; | ||
| 2098 | while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { | ||
| 2099 | wbi2 = r5_next_bio(wbi, dev->sector); | ||
| 2100 | if (--wbi->bi_phys_segments == 0) { | ||
| 2101 | md_write_end(conf->mddev); | ||
| 2102 | wbi->bi_next = return_bi; | ||
| 2103 | return_bi = wbi; | ||
| 2104 | } | ||
| 2105 | wbi = wbi2; | ||
| 2106 | } | ||
| 2107 | if (dev->towrite == NULL) | ||
| 2108 | bitmap_end = 1; | ||
| 2109 | spin_unlock_irq(&conf->device_lock); | ||
| 2110 | if (bitmap_end) | ||
| 2111 | bitmap_endwrite(conf->mddev->bitmap, sh->sector, | ||
| 2112 | STRIPE_SECTORS, | ||
| 2113 | !test_bit(STRIPE_DEGRADED, &sh->state), 0); | ||
| 2114 | } | ||
| 2115 | } | ||
| 2116 | } | ||
| 2117 | |||
| 2118 | /* Now we might consider reading some blocks, either to check/generate | ||
| 2119 | * parity, or to satisfy requests | ||
| 2120 | * or to load a block that is being partially written. | ||
| 2121 | */ | ||
| 2122 | if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) { | ||
| 2123 | for (i=disks; i--;) { | ||
| 2124 | dev = &sh->dev[i]; | ||
| 2125 | if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && | ||
| 2126 | (dev->toread || | ||
| 2127 | (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || | ||
| 2128 | syncing || | ||
| 2129 | (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) || | ||
| 2130 | (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write)) | ||
| 2131 | ) | ||
| 2132 | ) { | ||
| 2133 | /* we would like to get this block, possibly | ||
| 2134 | * by computing it, but we might not be able to | ||
| 2135 | */ | ||
| 2136 | if (uptodate == disks-1) { | ||
| 2137 | PRINTK("Computing stripe %llu block %d\n", | ||
| 2138 | (unsigned long long)sh->sector, i); | ||
| 2139 | compute_block_1(sh, i, 0); | ||
| 2140 | uptodate++; | ||
| 2141 | } else if ( uptodate == disks-2 && failed >= 2 ) { | ||
| 2142 | /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ | ||
| 2143 | int other; | ||
| 2144 | for (other=disks; other--;) { | ||
| 2145 | if ( other == i ) | ||
| 2146 | continue; | ||
| 2147 | if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) ) | ||
| 2148 | break; | ||
| 2149 | } | ||
| 2150 | BUG_ON(other < 0); | ||
| 2151 | PRINTK("Computing stripe %llu blocks %d,%d\n", | ||
| 2152 | (unsigned long long)sh->sector, i, other); | ||
| 2153 | compute_block_2(sh, i, other); | ||
| 2154 | uptodate += 2; | ||
| 2155 | } else if (test_bit(R5_Insync, &dev->flags)) { | ||
| 2156 | set_bit(R5_LOCKED, &dev->flags); | ||
| 2157 | set_bit(R5_Wantread, &dev->flags); | ||
| 2158 | #if 0 | ||
| 2159 | /* if I am just reading this block and we don't have | ||
| 2160 | a failed drive, or any pending writes then sidestep the cache */ | ||
| 2161 | if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && | ||
| 2162 | ! syncing && !failed && !to_write) { | ||
| 2163 | sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; | ||
| 2164 | sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; | ||
| 2165 | } | ||
| 2166 | #endif | ||
| 2167 | locked++; | ||
| 2168 | PRINTK("Reading block %d (sync=%d)\n", | ||
| 2169 | i, syncing); | ||
| 2170 | } | ||
| 2171 | } | ||
| 2172 | } | ||
| 2173 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 2174 | } | ||
| 2175 | |||
| 2176 | /* now to consider writing and what else, if anything should be read */ | ||
| 2177 | if (to_write) { | ||
| 2178 | int rcw=0, must_compute=0; | ||
| 2179 | for (i=disks ; i--;) { | ||
| 2180 | dev = &sh->dev[i]; | ||
| 2181 | /* Would I have to read this buffer for reconstruct_write */ | ||
| 2182 | if (!test_bit(R5_OVERWRITE, &dev->flags) | ||
| 2183 | && i != pd_idx && i != qd_idx | ||
| 2184 | && (!test_bit(R5_LOCKED, &dev->flags) | ||
| 2185 | #if 0 | ||
| 2186 | || sh->bh_page[i] != bh->b_page | ||
| 2187 | #endif | ||
| 2188 | ) && | ||
| 2189 | !test_bit(R5_UPTODATE, &dev->flags)) { | ||
| 2190 | if (test_bit(R5_Insync, &dev->flags)) rcw++; | ||
| 2191 | else { | ||
| 2192 | PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags); | ||
| 2193 | must_compute++; | ||
| 2194 | } | ||
| 2195 | } | ||
| 2196 | } | ||
| 2197 | PRINTK("for sector %llu, rcw=%d, must_compute=%d\n", | ||
| 2198 | (unsigned long long)sh->sector, rcw, must_compute); | ||
| 2199 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 2200 | |||
| 2201 | if (rcw > 0) | ||
| 2202 | /* want reconstruct write, but need to get some data */ | ||
| 2203 | for (i=disks; i--;) { | ||
| 2204 | dev = &sh->dev[i]; | ||
| 2205 | if (!test_bit(R5_OVERWRITE, &dev->flags) | ||
| 2206 | && !(failed == 0 && (i == pd_idx || i == qd_idx)) | ||
| 2207 | && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && | ||
| 2208 | test_bit(R5_Insync, &dev->flags)) { | ||
| 2209 | if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) | ||
| 2210 | { | ||
| 2211 | PRINTK("Read_old stripe %llu block %d for Reconstruct\n", | ||
| 2212 | (unsigned long long)sh->sector, i); | ||
| 2213 | set_bit(R5_LOCKED, &dev->flags); | ||
| 2214 | set_bit(R5_Wantread, &dev->flags); | ||
| 2215 | locked++; | ||
| 2216 | } else { | ||
| 2217 | PRINTK("Request delayed stripe %llu block %d for Reconstruct\n", | ||
| 2218 | (unsigned long long)sh->sector, i); | ||
| 2219 | set_bit(STRIPE_DELAYED, &sh->state); | ||
| 2220 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 2221 | } | ||
| 2222 | } | ||
| 2223 | } | ||
| 2224 | /* now if nothing is locked, and if we have enough data, we can start a write request */ | ||
| 2225 | if (locked == 0 && rcw == 0 && | ||
| 2226 | !test_bit(STRIPE_BIT_DELAY, &sh->state)) { | ||
| 2227 | if ( must_compute > 0 ) { | ||
| 2228 | /* We have failed blocks and need to compute them */ | ||
| 2229 | switch ( failed ) { | ||
| 2230 | case 0: BUG(); | ||
| 2231 | case 1: compute_block_1(sh, failed_num[0], 0); break; | ||
| 2232 | case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; | ||
| 2233 | default: BUG(); /* This request should have been failed? */ | ||
| 2234 | } | ||
| 2235 | } | ||
| 2236 | |||
| 2237 | PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector); | ||
| 2238 | compute_parity6(sh, RECONSTRUCT_WRITE); | ||
| 2239 | /* now every locked buffer is ready to be written */ | ||
| 2240 | for (i=disks; i--;) | ||
| 2241 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { | ||
| 2242 | PRINTK("Writing stripe %llu block %d\n", | ||
| 2243 | (unsigned long long)sh->sector, i); | ||
| 2244 | locked++; | ||
| 2245 | set_bit(R5_Wantwrite, &sh->dev[i].flags); | ||
| 2246 | } | ||
| 2247 | /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ | ||
| 2248 | set_bit(STRIPE_INSYNC, &sh->state); | ||
| 2249 | |||
| 2250 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | ||
| 2251 | atomic_dec(&conf->preread_active_stripes); | ||
| 2252 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) | ||
| 2253 | md_wakeup_thread(conf->mddev->thread); | ||
| 2254 | } | ||
| 2255 | } | ||
| 2256 | } | ||
| 2257 | |||
| 2258 | /* maybe we need to check and possibly fix the parity for this stripe | ||
| 2259 | * Any reads will already have been scheduled, so we just see if enough data | ||
| 2260 | * is available | ||
| 2261 | */ | ||
| 2262 | if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) { | ||
| 2263 | int update_p = 0, update_q = 0; | ||
| 2264 | struct r5dev *dev; | ||
| 2265 | |||
| 2266 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 2267 | |||
| 2268 | BUG_ON(failed>2); | ||
| 2269 | BUG_ON(uptodate < disks); | ||
| 2270 | /* Want to check and possibly repair P and Q. | ||
| 2271 | * However there could be one 'failed' device, in which | ||
| 2272 | * case we can only check one of them, possibly using the | ||
| 2273 | * other to generate missing data | ||
| 2274 | */ | ||
| 2275 | |||
| 2276 | /* If !tmp_page, we cannot do the calculations, | ||
| 2277 | * but as we have set STRIPE_HANDLE, we will soon be called | ||
| 2278 | * by stripe_handle with a tmp_page - just wait until then. | ||
| 2279 | */ | ||
| 2280 | if (tmp_page) { | ||
| 2281 | if (failed == q_failed) { | ||
| 2282 | /* The only possible failed device holds 'Q', so it makes | ||
| 2283 | * sense to check P (If anything else were failed, we would | ||
| 2284 | * have used P to recreate it). | ||
| 2285 | */ | ||
| 2286 | compute_block_1(sh, pd_idx, 1); | ||
| 2287 | if (!page_is_zero(sh->dev[pd_idx].page)) { | ||
| 2288 | compute_block_1(sh,pd_idx,0); | ||
| 2289 | update_p = 1; | ||
| 2290 | } | ||
| 2291 | } | ||
| 2292 | if (!q_failed && failed < 2) { | ||
| 2293 | /* q is not failed, and we didn't use it to generate | ||
| 2294 | * anything, so it makes sense to check it | ||
| 2295 | */ | ||
| 2296 | memcpy(page_address(tmp_page), | ||
| 2297 | page_address(sh->dev[qd_idx].page), | ||
| 2298 | STRIPE_SIZE); | ||
| 2299 | compute_parity6(sh, UPDATE_PARITY); | ||
| 2300 | if (memcmp(page_address(tmp_page), | ||
| 2301 | page_address(sh->dev[qd_idx].page), | ||
| 2302 | STRIPE_SIZE)!= 0) { | ||
| 2303 | clear_bit(STRIPE_INSYNC, &sh->state); | ||
| 2304 | update_q = 1; | ||
| 2305 | } | ||
| 2306 | } | ||
| 2307 | if (update_p || update_q) { | ||
| 2308 | conf->mddev->resync_mismatches += STRIPE_SECTORS; | ||
| 2309 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | ||
| 2310 | /* don't try to repair!! */ | ||
| 2311 | update_p = update_q = 0; | ||
| 2312 | } | ||
| 2313 | |||
| 2314 | /* now write out any block on a failed drive, | ||
| 2315 | * or P or Q if they need it | ||
| 2316 | */ | ||
| 2317 | |||
| 2318 | if (failed == 2) { | ||
| 2319 | dev = &sh->dev[failed_num[1]]; | ||
| 2320 | locked++; | ||
| 2321 | set_bit(R5_LOCKED, &dev->flags); | ||
| 2322 | set_bit(R5_Wantwrite, &dev->flags); | ||
| 2323 | } | ||
| 2324 | if (failed >= 1) { | ||
| 2325 | dev = &sh->dev[failed_num[0]]; | ||
| 2326 | locked++; | ||
| 2327 | set_bit(R5_LOCKED, &dev->flags); | ||
| 2328 | set_bit(R5_Wantwrite, &dev->flags); | ||
| 2329 | } | ||
| 2330 | |||
| 2331 | if (update_p) { | ||
| 2332 | dev = &sh->dev[pd_idx]; | ||
| 2333 | locked ++; | ||
| 2334 | set_bit(R5_LOCKED, &dev->flags); | ||
| 2335 | set_bit(R5_Wantwrite, &dev->flags); | ||
| 2336 | } | ||
| 2337 | if (update_q) { | ||
| 2338 | dev = &sh->dev[qd_idx]; | ||
| 2339 | locked++; | ||
| 2340 | set_bit(R5_LOCKED, &dev->flags); | ||
| 2341 | set_bit(R5_Wantwrite, &dev->flags); | ||
| 2342 | } | ||
| 2343 | clear_bit(STRIPE_DEGRADED, &sh->state); | ||
| 2344 | |||
| 2345 | set_bit(STRIPE_INSYNC, &sh->state); | ||
| 2346 | } | ||
| 2347 | } | ||
| 2348 | |||
| 2349 | if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { | ||
| 2350 | md_done_sync(conf->mddev, STRIPE_SECTORS,1); | ||
| 2351 | clear_bit(STRIPE_SYNCING, &sh->state); | ||
| 2352 | } | ||
| 2353 | |||
| 2354 | /* If the failed drives are just a ReadError, then we might need | ||
| 2355 | * to progress the repair/check process | ||
| 2356 | */ | ||
| 2357 | if (failed <= 2 && ! conf->mddev->ro) | ||
| 2358 | for (i=0; i<failed;i++) { | ||
| 2359 | dev = &sh->dev[failed_num[i]]; | ||
| 2360 | if (test_bit(R5_ReadError, &dev->flags) | ||
| 2361 | && !test_bit(R5_LOCKED, &dev->flags) | ||
| 2362 | && test_bit(R5_UPTODATE, &dev->flags) | ||
| 2363 | ) { | ||
| 2364 | if (!test_bit(R5_ReWrite, &dev->flags)) { | ||
| 2365 | set_bit(R5_Wantwrite, &dev->flags); | ||
| 2366 | set_bit(R5_ReWrite, &dev->flags); | ||
| 2367 | set_bit(R5_LOCKED, &dev->flags); | ||
| 2368 | } else { | ||
| 2369 | /* let's read it back */ | ||
| 2370 | set_bit(R5_Wantread, &dev->flags); | ||
| 2371 | set_bit(R5_LOCKED, &dev->flags); | ||
| 2372 | } | ||
| 2373 | } | ||
| 2374 | } | ||
| 2375 | spin_unlock(&sh->lock); | ||
| 2376 | |||
| 2377 | while ((bi=return_bi)) { | ||
| 2378 | int bytes = bi->bi_size; | ||
| 2379 | |||
| 2380 | return_bi = bi->bi_next; | ||
| 2381 | bi->bi_next = NULL; | ||
| 2382 | bi->bi_size = 0; | ||
| 2383 | bi->bi_end_io(bi, bytes, 0); | ||
| 2384 | } | ||
| 2385 | for (i=disks; i-- ;) { | ||
| 2386 | int rw; | ||
| 2387 | struct bio *bi; | ||
| 2388 | mdk_rdev_t *rdev; | ||
| 2389 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) | ||
| 2390 | rw = 1; | ||
| 2391 | else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) | ||
| 2392 | rw = 0; | ||
| 2393 | else | ||
| 2394 | continue; | ||
| 2395 | |||
| 2396 | bi = &sh->dev[i].req; | ||
| 2397 | |||
| 2398 | bi->bi_rw = rw; | ||
| 2399 | if (rw) | ||
| 2400 | bi->bi_end_io = raid5_end_write_request; | ||
| 2401 | else | ||
| 2402 | bi->bi_end_io = raid5_end_read_request; | ||
| 2403 | |||
| 2404 | rcu_read_lock(); | ||
| 2405 | rdev = rcu_dereference(conf->disks[i].rdev); | ||
| 2406 | if (rdev && test_bit(Faulty, &rdev->flags)) | ||
| 2407 | rdev = NULL; | ||
| 2408 | if (rdev) | ||
| 2409 | atomic_inc(&rdev->nr_pending); | ||
| 2410 | rcu_read_unlock(); | ||
| 2411 | |||
| 2412 | if (rdev) { | ||
| 2413 | if (syncing) | ||
| 2414 | md_sync_acct(rdev->bdev, STRIPE_SECTORS); | ||
| 2415 | |||
| 2416 | bi->bi_bdev = rdev->bdev; | ||
| 2417 | PRINTK("for %llu schedule op %ld on disc %d\n", | ||
| 2418 | (unsigned long long)sh->sector, bi->bi_rw, i); | ||
| 2419 | atomic_inc(&sh->count); | ||
| 2420 | bi->bi_sector = sh->sector + rdev->data_offset; | ||
| 2421 | bi->bi_flags = 1 << BIO_UPTODATE; | ||
| 2422 | bi->bi_vcnt = 1; | ||
| 2423 | bi->bi_max_vecs = 1; | ||
| 2424 | bi->bi_idx = 0; | ||
| 2425 | bi->bi_io_vec = &sh->dev[i].vec; | ||
| 2426 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; | ||
| 2427 | bi->bi_io_vec[0].bv_offset = 0; | ||
| 2428 | bi->bi_size = STRIPE_SIZE; | ||
| 2429 | bi->bi_next = NULL; | ||
| 2430 | if (rw == WRITE && | ||
| 2431 | test_bit(R5_ReWrite, &sh->dev[i].flags)) | ||
| 2432 | atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); | ||
| 2433 | generic_make_request(bi); | ||
| 2434 | } else { | ||
| 2435 | if (rw == 1) | ||
| 2436 | set_bit(STRIPE_DEGRADED, &sh->state); | ||
| 2437 | PRINTK("skip op %ld on disc %d for sector %llu\n", | ||
| 2438 | bi->bi_rw, i, (unsigned long long)sh->sector); | ||
| 2439 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | ||
| 2440 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 2441 | } | ||
| 2442 | } | ||
| 2443 | } | ||
| 2444 | |||
| 2445 | static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) | ||
| 2446 | { | ||
| 2447 | if (sh->raid_conf->level == 6) | ||
| 2448 | handle_stripe6(sh, tmp_page); | ||
| 2449 | else | ||
| 2450 | handle_stripe5(sh); | ||
| 2451 | } | ||
| 2452 | |||
| 2453 | |||
| 2454 | |||
| 1618 | static void raid5_activate_delayed(raid5_conf_t *conf) | 2455 | static void raid5_activate_delayed(raid5_conf_t *conf) |
| 1619 | { | 2456 | { |
| 1620 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { | 2457 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { |
| @@ -1753,7 +2590,7 @@ static int make_request(request_queue_t *q, struct bio * bi) | |||
| 1753 | 2590 | ||
| 1754 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { | 2591 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { |
| 1755 | DEFINE_WAIT(w); | 2592 | DEFINE_WAIT(w); |
| 1756 | int disks; | 2593 | int disks, data_disks; |
| 1757 | 2594 | ||
| 1758 | retry: | 2595 | retry: |
| 1759 | prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); | 2596 | prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); |
| @@ -1781,7 +2618,9 @@ static int make_request(request_queue_t *q, struct bio * bi) | |||
| 1781 | } | 2618 | } |
| 1782 | spin_unlock_irq(&conf->device_lock); | 2619 | spin_unlock_irq(&conf->device_lock); |
| 1783 | } | 2620 | } |
| 1784 | new_sector = raid5_compute_sector(logical_sector, disks, disks - 1, | 2621 | data_disks = disks - conf->max_degraded; |
| 2622 | |||
| 2623 | new_sector = raid5_compute_sector(logical_sector, disks, data_disks, | ||
| 1785 | &dd_idx, &pd_idx, conf); | 2624 | &dd_idx, &pd_idx, conf); |
| 1786 | PRINTK("raid5: make_request, sector %llu logical %llu\n", | 2625 | PRINTK("raid5: make_request, sector %llu logical %llu\n", |
| 1787 | (unsigned long long)new_sector, | 2626 | (unsigned long long)new_sector, |
| @@ -1833,7 +2672,7 @@ static int make_request(request_queue_t *q, struct bio * bi) | |||
| 1833 | } | 2672 | } |
| 1834 | finish_wait(&conf->wait_for_overlap, &w); | 2673 | finish_wait(&conf->wait_for_overlap, &w); |
| 1835 | raid5_plug_device(conf); | 2674 | raid5_plug_device(conf); |
| 1836 | handle_stripe(sh); | 2675 | handle_stripe(sh, NULL); |
| 1837 | release_stripe(sh); | 2676 | release_stripe(sh); |
| 1838 | } else { | 2677 | } else { |
| 1839 | /* cannot get stripe for read-ahead, just give-up */ | 2678 | /* cannot get stripe for read-ahead, just give-up */ |
| @@ -1849,7 +2688,7 @@ static int make_request(request_queue_t *q, struct bio * bi) | |||
| 1849 | if (remaining == 0) { | 2688 | if (remaining == 0) { |
| 1850 | int bytes = bi->bi_size; | 2689 | int bytes = bi->bi_size; |
| 1851 | 2690 | ||
| 1852 | if ( bio_data_dir(bi) == WRITE ) | 2691 | if ( rw == WRITE ) |
| 1853 | md_write_end(mddev); | 2692 | md_write_end(mddev); |
| 1854 | bi->bi_size = 0; | 2693 | bi->bi_size = 0; |
| 1855 | bi->bi_end_io(bi, bytes, 0); | 2694 | bi->bi_end_io(bi, bytes, 0); |
| @@ -1865,9 +2704,11 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 1865 | int pd_idx; | 2704 | int pd_idx; |
| 1866 | sector_t first_sector, last_sector; | 2705 | sector_t first_sector, last_sector; |
| 1867 | int raid_disks = conf->raid_disks; | 2706 | int raid_disks = conf->raid_disks; |
| 1868 | int data_disks = raid_disks-1; | 2707 | int data_disks = raid_disks - conf->max_degraded; |
| 1869 | sector_t max_sector = mddev->size << 1; | 2708 | sector_t max_sector = mddev->size << 1; |
| 1870 | int sync_blocks; | 2709 | int sync_blocks; |
| 2710 | int still_degraded = 0; | ||
| 2711 | int i; | ||
| 1871 | 2712 | ||
| 1872 | if (sector_nr >= max_sector) { | 2713 | if (sector_nr >= max_sector) { |
| 1873 | /* just being told to finish up .. nothing much to do */ | 2714 | /* just being told to finish up .. nothing much to do */ |
| @@ -1880,7 +2721,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 1880 | if (mddev->curr_resync < max_sector) /* aborted */ | 2721 | if (mddev->curr_resync < max_sector) /* aborted */ |
| 1881 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, | 2722 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, |
| 1882 | &sync_blocks, 1); | 2723 | &sync_blocks, 1); |
| 1883 | else /* compelted sync */ | 2724 | else /* completed sync */ |
| 1884 | conf->fullsync = 0; | 2725 | conf->fullsync = 0; |
| 1885 | bitmap_close_sync(mddev->bitmap); | 2726 | bitmap_close_sync(mddev->bitmap); |
| 1886 | 2727 | ||
| @@ -2003,11 +2844,12 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 2003 | } | 2844 | } |
| 2004 | return conf->chunk_size>>9; | 2845 | return conf->chunk_size>>9; |
| 2005 | } | 2846 | } |
| 2006 | /* if there is 1 or more failed drives and we are trying | 2847 | /* if there is too many failed drives and we are trying |
| 2007 | * to resync, then assert that we are finished, because there is | 2848 | * to resync, then assert that we are finished, because there is |
| 2008 | * nothing we can do. | 2849 | * nothing we can do. |
| 2009 | */ | 2850 | */ |
| 2010 | if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | 2851 | if (mddev->degraded >= (data_disks - raid_disks) && |
| 2852 | test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | ||
| 2011 | sector_t rv = (mddev->size << 1) - sector_nr; | 2853 | sector_t rv = (mddev->size << 1) - sector_nr; |
| 2012 | *skipped = 1; | 2854 | *skipped = 1; |
| 2013 | return rv; | 2855 | return rv; |
| @@ -2026,17 +2868,26 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 2026 | if (sh == NULL) { | 2868 | if (sh == NULL) { |
| 2027 | sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); | 2869 | sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); |
| 2028 | /* make sure we don't swamp the stripe cache if someone else | 2870 | /* make sure we don't swamp the stripe cache if someone else |
| 2029 | * is trying to get access | 2871 | * is trying to get access |
| 2030 | */ | 2872 | */ |
| 2031 | schedule_timeout_uninterruptible(1); | 2873 | schedule_timeout_uninterruptible(1); |
| 2032 | } | 2874 | } |
| 2033 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); | 2875 | /* Need to check if array will still be degraded after recovery/resync |
| 2034 | spin_lock(&sh->lock); | 2876 | * We don't need to check the 'failed' flag as when that gets set, |
| 2877 | * recovery aborts. | ||
| 2878 | */ | ||
| 2879 | for (i=0; i<mddev->raid_disks; i++) | ||
| 2880 | if (conf->disks[i].rdev == NULL) | ||
| 2881 | still_degraded = 1; | ||
| 2882 | |||
| 2883 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); | ||
| 2884 | |||
| 2885 | spin_lock(&sh->lock); | ||
| 2035 | set_bit(STRIPE_SYNCING, &sh->state); | 2886 | set_bit(STRIPE_SYNCING, &sh->state); |
| 2036 | clear_bit(STRIPE_INSYNC, &sh->state); | 2887 | clear_bit(STRIPE_INSYNC, &sh->state); |
| 2037 | spin_unlock(&sh->lock); | 2888 | spin_unlock(&sh->lock); |
| 2038 | 2889 | ||
| 2039 | handle_stripe(sh); | 2890 | handle_stripe(sh, NULL); |
| 2040 | release_stripe(sh); | 2891 | release_stripe(sh); |
| 2041 | 2892 | ||
| 2042 | return STRIPE_SECTORS; | 2893 | return STRIPE_SECTORS; |
| @@ -2091,7 +2942,7 @@ static void raid5d (mddev_t *mddev) | |||
| 2091 | spin_unlock_irq(&conf->device_lock); | 2942 | spin_unlock_irq(&conf->device_lock); |
| 2092 | 2943 | ||
| 2093 | handled++; | 2944 | handled++; |
| 2094 | handle_stripe(sh); | 2945 | handle_stripe(sh, conf->spare_page); |
| 2095 | release_stripe(sh); | 2946 | release_stripe(sh); |
| 2096 | 2947 | ||
| 2097 | spin_lock_irq(&conf->device_lock); | 2948 | spin_lock_irq(&conf->device_lock); |
| @@ -2181,8 +3032,8 @@ static int run(mddev_t *mddev) | |||
| 2181 | struct disk_info *disk; | 3032 | struct disk_info *disk; |
| 2182 | struct list_head *tmp; | 3033 | struct list_head *tmp; |
| 2183 | 3034 | ||
| 2184 | if (mddev->level != 5 && mddev->level != 4) { | 3035 | if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { |
| 2185 | printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n", | 3036 | printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", |
| 2186 | mdname(mddev), mddev->level); | 3037 | mdname(mddev), mddev->level); |
| 2187 | return -EIO; | 3038 | return -EIO; |
| 2188 | } | 3039 | } |
| @@ -2251,6 +3102,11 @@ static int run(mddev_t *mddev) | |||
| 2251 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) | 3102 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) |
| 2252 | goto abort; | 3103 | goto abort; |
| 2253 | 3104 | ||
| 3105 | if (mddev->level == 6) { | ||
| 3106 | conf->spare_page = alloc_page(GFP_KERNEL); | ||
| 3107 | if (!conf->spare_page) | ||
| 3108 | goto abort; | ||
| 3109 | } | ||
| 2254 | spin_lock_init(&conf->device_lock); | 3110 | spin_lock_init(&conf->device_lock); |
| 2255 | init_waitqueue_head(&conf->wait_for_stripe); | 3111 | init_waitqueue_head(&conf->wait_for_stripe); |
| 2256 | init_waitqueue_head(&conf->wait_for_overlap); | 3112 | init_waitqueue_head(&conf->wait_for_overlap); |
| @@ -2282,12 +3138,16 @@ static int run(mddev_t *mddev) | |||
| 2282 | } | 3138 | } |
| 2283 | 3139 | ||
| 2284 | /* | 3140 | /* |
| 2285 | * 0 for a fully functional array, 1 for a degraded array. | 3141 | * 0 for a fully functional array, 1 or 2 for a degraded array. |
| 2286 | */ | 3142 | */ |
| 2287 | mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; | 3143 | mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; |
| 2288 | conf->mddev = mddev; | 3144 | conf->mddev = mddev; |
| 2289 | conf->chunk_size = mddev->chunk_size; | 3145 | conf->chunk_size = mddev->chunk_size; |
| 2290 | conf->level = mddev->level; | 3146 | conf->level = mddev->level; |
| 3147 | if (conf->level == 6) | ||
| 3148 | conf->max_degraded = 2; | ||
| 3149 | else | ||
| 3150 | conf->max_degraded = 1; | ||
| 2291 | conf->algorithm = mddev->layout; | 3151 | conf->algorithm = mddev->layout; |
| 2292 | conf->max_nr_stripes = NR_STRIPES; | 3152 | conf->max_nr_stripes = NR_STRIPES; |
| 2293 | conf->expand_progress = mddev->reshape_position; | 3153 | conf->expand_progress = mddev->reshape_position; |
| @@ -2296,6 +3156,11 @@ static int run(mddev_t *mddev) | |||
| 2296 | mddev->size &= ~(mddev->chunk_size/1024 -1); | 3156 | mddev->size &= ~(mddev->chunk_size/1024 -1); |
| 2297 | mddev->resync_max_sectors = mddev->size << 1; | 3157 | mddev->resync_max_sectors = mddev->size << 1; |
| 2298 | 3158 | ||
| 3159 | if (conf->level == 6 && conf->raid_disks < 4) { | ||
| 3160 | printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", | ||
| 3161 | mdname(mddev), conf->raid_disks); | ||
| 3162 | goto abort; | ||
| 3163 | } | ||
| 2299 | if (!conf->chunk_size || conf->chunk_size % 4) { | 3164 | if (!conf->chunk_size || conf->chunk_size % 4) { |
| 2300 | printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", | 3165 | printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", |
| 2301 | conf->chunk_size, mdname(mddev)); | 3166 | conf->chunk_size, mdname(mddev)); |
| @@ -2307,14 +3172,14 @@ static int run(mddev_t *mddev) | |||
| 2307 | conf->algorithm, mdname(mddev)); | 3172 | conf->algorithm, mdname(mddev)); |
| 2308 | goto abort; | 3173 | goto abort; |
| 2309 | } | 3174 | } |
| 2310 | if (mddev->degraded > 1) { | 3175 | if (mddev->degraded > conf->max_degraded) { |
| 2311 | printk(KERN_ERR "raid5: not enough operational devices for %s" | 3176 | printk(KERN_ERR "raid5: not enough operational devices for %s" |
| 2312 | " (%d/%d failed)\n", | 3177 | " (%d/%d failed)\n", |
| 2313 | mdname(mddev), conf->failed_disks, conf->raid_disks); | 3178 | mdname(mddev), conf->failed_disks, conf->raid_disks); |
| 2314 | goto abort; | 3179 | goto abort; |
| 2315 | } | 3180 | } |
| 2316 | 3181 | ||
| 2317 | if (mddev->degraded == 1 && | 3182 | if (mddev->degraded > 0 && |
| 2318 | mddev->recovery_cp != MaxSector) { | 3183 | mddev->recovery_cp != MaxSector) { |
| 2319 | if (mddev->ok_start_degraded) | 3184 | if (mddev->ok_start_degraded) |
| 2320 | printk(KERN_WARNING | 3185 | printk(KERN_WARNING |
| @@ -2379,10 +3244,11 @@ static int run(mddev_t *mddev) | |||
| 2379 | } | 3244 | } |
| 2380 | 3245 | ||
| 2381 | /* read-ahead size must cover two whole stripes, which is | 3246 | /* read-ahead size must cover two whole stripes, which is |
| 2382 | * 2 * (n-1) * chunksize where 'n' is the number of raid devices | 3247 | * 2 * (datadisks) * chunksize where 'n' is the number of raid devices |
| 2383 | */ | 3248 | */ |
| 2384 | { | 3249 | { |
| 2385 | int stripe = (mddev->raid_disks-1) * | 3250 | int data_disks = conf->previous_raid_disks - conf->max_degraded; |
| 3251 | int stripe = data_disks * | ||
| 2386 | (mddev->chunk_size / PAGE_SIZE); | 3252 | (mddev->chunk_size / PAGE_SIZE); |
| 2387 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) | 3253 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
| 2388 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; | 3254 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
| @@ -2393,12 +3259,14 @@ static int run(mddev_t *mddev) | |||
| 2393 | 3259 | ||
| 2394 | mddev->queue->unplug_fn = raid5_unplug_device; | 3260 | mddev->queue->unplug_fn = raid5_unplug_device; |
| 2395 | mddev->queue->issue_flush_fn = raid5_issue_flush; | 3261 | mddev->queue->issue_flush_fn = raid5_issue_flush; |
| 2396 | mddev->array_size = mddev->size * (conf->previous_raid_disks - 1); | 3262 | mddev->array_size = mddev->size * (conf->previous_raid_disks - |
| 3263 | conf->max_degraded); | ||
| 2397 | 3264 | ||
| 2398 | return 0; | 3265 | return 0; |
| 2399 | abort: | 3266 | abort: |
| 2400 | if (conf) { | 3267 | if (conf) { |
| 2401 | print_raid5_conf(conf); | 3268 | print_raid5_conf(conf); |
| 3269 | safe_put_page(conf->spare_page); | ||
| 2402 | kfree(conf->disks); | 3270 | kfree(conf->disks); |
| 2403 | kfree(conf->stripe_hashtbl); | 3271 | kfree(conf->stripe_hashtbl); |
| 2404 | kfree(conf); | 3272 | kfree(conf); |
| @@ -2427,23 +3295,23 @@ static int stop(mddev_t *mddev) | |||
| 2427 | } | 3295 | } |
| 2428 | 3296 | ||
| 2429 | #if RAID5_DEBUG | 3297 | #if RAID5_DEBUG |
| 2430 | static void print_sh (struct stripe_head *sh) | 3298 | static void print_sh (struct seq_file *seq, struct stripe_head *sh) |
| 2431 | { | 3299 | { |
| 2432 | int i; | 3300 | int i; |
| 2433 | 3301 | ||
| 2434 | printk("sh %llu, pd_idx %d, state %ld.\n", | 3302 | seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", |
| 2435 | (unsigned long long)sh->sector, sh->pd_idx, sh->state); | 3303 | (unsigned long long)sh->sector, sh->pd_idx, sh->state); |
| 2436 | printk("sh %llu, count %d.\n", | 3304 | seq_printf(seq, "sh %llu, count %d.\n", |
| 2437 | (unsigned long long)sh->sector, atomic_read(&sh->count)); | 3305 | (unsigned long long)sh->sector, atomic_read(&sh->count)); |
| 2438 | printk("sh %llu, ", (unsigned long long)sh->sector); | 3306 | seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); |
| 2439 | for (i = 0; i < sh->disks; i++) { | 3307 | for (i = 0; i < sh->disks; i++) { |
| 2440 | printk("(cache%d: %p %ld) ", | 3308 | seq_printf(seq, "(cache%d: %p %ld) ", |
| 2441 | i, sh->dev[i].page, sh->dev[i].flags); | 3309 | i, sh->dev[i].page, sh->dev[i].flags); |
| 2442 | } | 3310 | } |
| 2443 | printk("\n"); | 3311 | seq_printf(seq, "\n"); |
| 2444 | } | 3312 | } |
| 2445 | 3313 | ||
| 2446 | static void printall (raid5_conf_t *conf) | 3314 | static void printall (struct seq_file *seq, raid5_conf_t *conf) |
| 2447 | { | 3315 | { |
| 2448 | struct stripe_head *sh; | 3316 | struct stripe_head *sh; |
| 2449 | struct hlist_node *hn; | 3317 | struct hlist_node *hn; |
| @@ -2454,7 +3322,7 @@ static void printall (raid5_conf_t *conf) | |||
| 2454 | hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { | 3322 | hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { |
| 2455 | if (sh->raid_conf != conf) | 3323 | if (sh->raid_conf != conf) |
| 2456 | continue; | 3324 | continue; |
| 2457 | print_sh(sh); | 3325 | print_sh(seq, sh); |
| 2458 | } | 3326 | } |
| 2459 | } | 3327 | } |
| 2460 | spin_unlock_irq(&conf->device_lock); | 3328 | spin_unlock_irq(&conf->device_lock); |
| @@ -2474,9 +3342,8 @@ static void status (struct seq_file *seq, mddev_t *mddev) | |||
| 2474 | test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); | 3342 | test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); |
| 2475 | seq_printf (seq, "]"); | 3343 | seq_printf (seq, "]"); |
| 2476 | #if RAID5_DEBUG | 3344 | #if RAID5_DEBUG |
| 2477 | #define D(x) \ | 3345 | seq_printf (seq, "\n"); |
| 2478 | seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x)) | 3346 | printall(seq, conf); |
| 2479 | printall(conf); | ||
| 2480 | #endif | 3347 | #endif |
| 2481 | } | 3348 | } |
| 2482 | 3349 | ||
| @@ -2560,14 +3427,20 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 2560 | int disk; | 3427 | int disk; |
| 2561 | struct disk_info *p; | 3428 | struct disk_info *p; |
| 2562 | 3429 | ||
| 2563 | if (mddev->degraded > 1) | 3430 | if (mddev->degraded > conf->max_degraded) |
| 2564 | /* no point adding a device */ | 3431 | /* no point adding a device */ |
| 2565 | return 0; | 3432 | return 0; |
| 2566 | 3433 | ||
| 2567 | /* | 3434 | /* |
| 2568 | * find the disk ... | 3435 | * find the disk ... but prefer rdev->saved_raid_disk |
| 3436 | * if possible. | ||
| 2569 | */ | 3437 | */ |
| 2570 | for (disk=0; disk < conf->raid_disks; disk++) | 3438 | if (rdev->saved_raid_disk >= 0 && |
| 3439 | conf->disks[rdev->saved_raid_disk].rdev == NULL) | ||
| 3440 | disk = rdev->saved_raid_disk; | ||
| 3441 | else | ||
| 3442 | disk = 0; | ||
| 3443 | for ( ; disk < conf->raid_disks; disk++) | ||
| 2571 | if ((p=conf->disks + disk)->rdev == NULL) { | 3444 | if ((p=conf->disks + disk)->rdev == NULL) { |
| 2572 | clear_bit(In_sync, &rdev->flags); | 3445 | clear_bit(In_sync, &rdev->flags); |
| 2573 | rdev->raid_disk = disk; | 3446 | rdev->raid_disk = disk; |
| @@ -2590,8 +3463,10 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) | |||
| 2590 | * any io in the removed space completes, but it hardly seems | 3463 | * any io in the removed space completes, but it hardly seems |
| 2591 | * worth it. | 3464 | * worth it. |
| 2592 | */ | 3465 | */ |
| 3466 | raid5_conf_t *conf = mddev_to_conf(mddev); | ||
| 3467 | |||
| 2593 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); | 3468 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); |
| 2594 | mddev->array_size = (sectors * (mddev->raid_disks-1))>>1; | 3469 | mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; |
| 2595 | set_capacity(mddev->gendisk, mddev->array_size << 1); | 3470 | set_capacity(mddev->gendisk, mddev->array_size << 1); |
| 2596 | mddev->changed = 1; | 3471 | mddev->changed = 1; |
| 2597 | if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { | 3472 | if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { |
| @@ -2731,6 +3606,17 @@ static void end_reshape(raid5_conf_t *conf) | |||
| 2731 | conf->expand_progress = MaxSector; | 3606 | conf->expand_progress = MaxSector; |
| 2732 | spin_unlock_irq(&conf->device_lock); | 3607 | spin_unlock_irq(&conf->device_lock); |
| 2733 | conf->mddev->reshape_position = MaxSector; | 3608 | conf->mddev->reshape_position = MaxSector; |
| 3609 | |||
| 3610 | /* read-ahead size must cover two whole stripes, which is | ||
| 3611 | * 2 * (datadisks) * chunksize where 'n' is the number of raid devices | ||
| 3612 | */ | ||
| 3613 | { | ||
| 3614 | int data_disks = conf->previous_raid_disks - conf->max_degraded; | ||
| 3615 | int stripe = data_disks * | ||
| 3616 | (conf->mddev->chunk_size / PAGE_SIZE); | ||
| 3617 | if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) | ||
| 3618 | conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; | ||
| 3619 | } | ||
| 2734 | } | 3620 | } |
| 2735 | } | 3621 | } |
| 2736 | 3622 | ||
| @@ -2762,6 +3648,23 @@ static void raid5_quiesce(mddev_t *mddev, int state) | |||
| 2762 | } | 3648 | } |
| 2763 | } | 3649 | } |
| 2764 | 3650 | ||
| 3651 | static struct mdk_personality raid6_personality = | ||
| 3652 | { | ||
| 3653 | .name = "raid6", | ||
| 3654 | .level = 6, | ||
| 3655 | .owner = THIS_MODULE, | ||
| 3656 | .make_request = make_request, | ||
| 3657 | .run = run, | ||
| 3658 | .stop = stop, | ||
| 3659 | .status = status, | ||
| 3660 | .error_handler = error, | ||
| 3661 | .hot_add_disk = raid5_add_disk, | ||
| 3662 | .hot_remove_disk= raid5_remove_disk, | ||
| 3663 | .spare_active = raid5_spare_active, | ||
| 3664 | .sync_request = sync_request, | ||
| 3665 | .resize = raid5_resize, | ||
| 3666 | .quiesce = raid5_quiesce, | ||
| 3667 | }; | ||
| 2765 | static struct mdk_personality raid5_personality = | 3668 | static struct mdk_personality raid5_personality = |
| 2766 | { | 3669 | { |
| 2767 | .name = "raid5", | 3670 | .name = "raid5", |
| @@ -2804,6 +3707,12 @@ static struct mdk_personality raid4_personality = | |||
| 2804 | 3707 | ||
| 2805 | static int __init raid5_init(void) | 3708 | static int __init raid5_init(void) |
| 2806 | { | 3709 | { |
| 3710 | int e; | ||
| 3711 | |||
| 3712 | e = raid6_select_algo(); | ||
| 3713 | if ( e ) | ||
| 3714 | return e; | ||
| 3715 | register_md_personality(&raid6_personality); | ||
| 2807 | register_md_personality(&raid5_personality); | 3716 | register_md_personality(&raid5_personality); |
| 2808 | register_md_personality(&raid4_personality); | 3717 | register_md_personality(&raid4_personality); |
| 2809 | return 0; | 3718 | return 0; |
| @@ -2811,6 +3720,7 @@ static int __init raid5_init(void) | |||
| 2811 | 3720 | ||
| 2812 | static void raid5_exit(void) | 3721 | static void raid5_exit(void) |
| 2813 | { | 3722 | { |
| 3723 | unregister_md_personality(&raid6_personality); | ||
| 2814 | unregister_md_personality(&raid5_personality); | 3724 | unregister_md_personality(&raid5_personality); |
| 2815 | unregister_md_personality(&raid4_personality); | 3725 | unregister_md_personality(&raid4_personality); |
| 2816 | } | 3726 | } |
| @@ -2823,3 +3733,10 @@ MODULE_ALIAS("md-raid5"); | |||
| 2823 | MODULE_ALIAS("md-raid4"); | 3733 | MODULE_ALIAS("md-raid4"); |
| 2824 | MODULE_ALIAS("md-level-5"); | 3734 | MODULE_ALIAS("md-level-5"); |
| 2825 | MODULE_ALIAS("md-level-4"); | 3735 | MODULE_ALIAS("md-level-4"); |
| 3736 | MODULE_ALIAS("md-personality-8"); /* RAID6 */ | ||
| 3737 | MODULE_ALIAS("md-raid6"); | ||
| 3738 | MODULE_ALIAS("md-level-6"); | ||
| 3739 | |||
| 3740 | /* This used to be two separate modules, they were: */ | ||
| 3741 | MODULE_ALIAS("raid5"); | ||
| 3742 | MODULE_ALIAS("raid6"); | ||
