diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 15:39:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 15:39:42 -0400 |
commit | c3ae1f33569e06984f0426a7834ff63251d44d84 (patch) | |
tree | 0d7f6f6b49c331e440850cf2e8a7bc0cf220f8bc | |
parent | c28cfd60e4ec3f494b73ef7d6c661f5f491cd84f (diff) | |
parent | d890fa2b0586b6177b119643ff66932127d58afa (diff) |
Merge branch 'for-linus' of git://neil.brown.name/md
* 'for-linus' of git://neil.brown.name/md: (34 commits)
md: Fix some bugs in recovery_disabled handling.
md/raid5: fix bug that could result in reads from a failed device.
lib/raid6: Fix filename emitted in generated code
md.c: trivial comment fix
MD: Allow restarting an interrupted incremental recovery.
md: clear In_sync bit on devices added to an active array.
md: add proper write-congestion reporting to RAID1 and RAID10.
md: rename "mdk_personality" to "md_personality"
md/bitmap remove fault injection options.
md/raid5: typedef removal: raid5_conf_t -> struct r5conf
md/raid1: typedef removal: conf_t -> struct r1conf
md/raid10: typedef removal: conf_t -> struct r10conf
md/raid0: typedef removal: raid0_conf_t -> struct r0conf
md/multipath: typedef removal: multipath_conf_t -> struct mpconf
md/linear: typedef removal: linear_conf_t -> struct linear_conf
md/faulty: remove typedef: conf_t -> struct faulty_conf
md/linear: remove typedefs: dev_info_t -> struct dev_info
md: remove typedefs: mirror_info_t -> struct mirror_info
md: remove typedefs: r10bio_t -> struct r10bio and r1bio_t -> struct r1bio
md: remove typedefs: mdk_thread_t -> struct md_thread
...
-rw-r--r-- | drivers/md/bitmap.c | 188 | ||||
-rw-r--r-- | drivers/md/bitmap.h | 12 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 28 | ||||
-rw-r--r-- | drivers/md/faulty.c | 38 | ||||
-rw-r--r-- | drivers/md/linear.c | 46 | ||||
-rw-r--r-- | drivers/md/linear.h | 12 | ||||
-rw-r--r-- | drivers/md/md.c | 631 | ||||
-rw-r--r-- | drivers/md/md.h | 153 | ||||
-rw-r--r-- | drivers/md/multipath.c | 64 | ||||
-rw-r--r-- | drivers/md/multipath.h | 10 | ||||
-rw-r--r-- | drivers/md/raid0.c | 191 | ||||
-rw-r--r-- | drivers/md/raid0.h | 10 | ||||
-rw-r--r-- | drivers/md/raid1.c | 335 | ||||
-rw-r--r-- | drivers/md/raid1.h | 85 | ||||
-rw-r--r-- | drivers/md/raid10.c | 280 | ||||
-rw-r--r-- | drivers/md/raid10.h | 22 | ||||
-rw-r--r-- | drivers/md/raid5.c | 363 | ||||
-rw-r--r-- | drivers/md/raid5.h | 20 | ||||
-rw-r--r-- | lib/raid6/int.uc | 2 |
19 files changed, 1217 insertions, 1273 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 0dc6546b77a8..7878712721bf 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -29,35 +29,6 @@ | |||
29 | #include "md.h" | 29 | #include "md.h" |
30 | #include "bitmap.h" | 30 | #include "bitmap.h" |
31 | 31 | ||
32 | /* debug macros */ | ||
33 | |||
34 | #define DEBUG 0 | ||
35 | |||
36 | #if DEBUG | ||
37 | /* these are for debugging purposes only! */ | ||
38 | |||
39 | /* define one and only one of these */ | ||
40 | #define INJECT_FAULTS_1 0 /* cause bitmap_alloc_page to fail always */ | ||
41 | #define INJECT_FAULTS_2 0 /* cause bitmap file to be kicked when first bit set*/ | ||
42 | #define INJECT_FAULTS_3 0 /* treat bitmap file as kicked at init time */ | ||
43 | #define INJECT_FAULTS_4 0 /* undef */ | ||
44 | #define INJECT_FAULTS_5 0 /* undef */ | ||
45 | #define INJECT_FAULTS_6 0 | ||
46 | |||
47 | /* if these are defined, the driver will fail! debug only */ | ||
48 | #define INJECT_FATAL_FAULT_1 0 /* fail kmalloc, causing bitmap_create to fail */ | ||
49 | #define INJECT_FATAL_FAULT_2 0 /* undef */ | ||
50 | #define INJECT_FATAL_FAULT_3 0 /* undef */ | ||
51 | #endif | ||
52 | |||
53 | #ifndef PRINTK | ||
54 | # if DEBUG > 0 | ||
55 | # define PRINTK(x...) printk(KERN_DEBUG x) | ||
56 | # else | ||
57 | # define PRINTK(x...) | ||
58 | # endif | ||
59 | #endif | ||
60 | |||
61 | static inline char *bmname(struct bitmap *bitmap) | 32 | static inline char *bmname(struct bitmap *bitmap) |
62 | { | 33 | { |
63 | return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; | 34 | return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; |
@@ -70,16 +41,12 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap) | |||
70 | { | 41 | { |
71 | unsigned char *page; | 42 | unsigned char *page; |
72 | 43 | ||
73 | #ifdef INJECT_FAULTS_1 | ||
74 | page = NULL; | ||
75 | #else | ||
76 | page = kzalloc(PAGE_SIZE, GFP_NOIO); | 44 | page = kzalloc(PAGE_SIZE, GFP_NOIO); |
77 | #endif | ||
78 | if (!page) | 45 | if (!page) |
79 | printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap)); | 46 | printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap)); |
80 | else | 47 | else |
81 | PRINTK("%s: bitmap_alloc_page: allocated page at %p\n", | 48 | pr_debug("%s: bitmap_alloc_page: allocated page at %p\n", |
82 | bmname(bitmap), page); | 49 | bmname(bitmap), page); |
83 | return page; | 50 | return page; |
84 | } | 51 | } |
85 | 52 | ||
@@ -88,7 +55,7 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap) | |||
88 | */ | 55 | */ |
89 | static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) | 56 | static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) |
90 | { | 57 | { |
91 | PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page); | 58 | pr_debug("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page); |
92 | kfree(page); | 59 | kfree(page); |
93 | } | 60 | } |
94 | 61 | ||
@@ -133,8 +100,8 @@ __acquires(bitmap->lock) | |||
133 | spin_lock_irq(&bitmap->lock); | 100 | spin_lock_irq(&bitmap->lock); |
134 | 101 | ||
135 | if (mappage == NULL) { | 102 | if (mappage == NULL) { |
136 | PRINTK("%s: bitmap map page allocation failed, hijacking\n", | 103 | pr_debug("%s: bitmap map page allocation failed, hijacking\n", |
137 | bmname(bitmap)); | 104 | bmname(bitmap)); |
138 | /* failed - set the hijacked flag so that we can use the | 105 | /* failed - set the hijacked flag so that we can use the |
139 | * pointer as a counter */ | 106 | * pointer as a counter */ |
140 | if (!bitmap->bp[page].map) | 107 | if (!bitmap->bp[page].map) |
@@ -187,13 +154,13 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) | |||
187 | */ | 154 | */ |
188 | 155 | ||
189 | /* IO operations when bitmap is stored near all superblocks */ | 156 | /* IO operations when bitmap is stored near all superblocks */ |
190 | static struct page *read_sb_page(mddev_t *mddev, loff_t offset, | 157 | static struct page *read_sb_page(struct mddev *mddev, loff_t offset, |
191 | struct page *page, | 158 | struct page *page, |
192 | unsigned long index, int size) | 159 | unsigned long index, int size) |
193 | { | 160 | { |
194 | /* choose a good rdev and read the page from there */ | 161 | /* choose a good rdev and read the page from there */ |
195 | 162 | ||
196 | mdk_rdev_t *rdev; | 163 | struct md_rdev *rdev; |
197 | sector_t target; | 164 | sector_t target; |
198 | int did_alloc = 0; | 165 | int did_alloc = 0; |
199 | 166 | ||
@@ -226,7 +193,7 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, | |||
226 | 193 | ||
227 | } | 194 | } |
228 | 195 | ||
229 | static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | 196 | static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) |
230 | { | 197 | { |
231 | /* Iterate the disks of an mddev, using rcu to protect access to the | 198 | /* Iterate the disks of an mddev, using rcu to protect access to the |
232 | * linked list, and raising the refcount of devices we return to ensure | 199 | * linked list, and raising the refcount of devices we return to ensure |
@@ -247,7 +214,7 @@ static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | |||
247 | pos = &rdev->same_set; | 214 | pos = &rdev->same_set; |
248 | } | 215 | } |
249 | list_for_each_continue_rcu(pos, &mddev->disks) { | 216 | list_for_each_continue_rcu(pos, &mddev->disks) { |
250 | rdev = list_entry(pos, mdk_rdev_t, same_set); | 217 | rdev = list_entry(pos, struct md_rdev, same_set); |
251 | if (rdev->raid_disk >= 0 && | 218 | if (rdev->raid_disk >= 0 && |
252 | !test_bit(Faulty, &rdev->flags)) { | 219 | !test_bit(Faulty, &rdev->flags)) { |
253 | /* this is a usable devices */ | 220 | /* this is a usable devices */ |
@@ -262,9 +229,9 @@ static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | |||
262 | 229 | ||
263 | static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) | 230 | static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) |
264 | { | 231 | { |
265 | mdk_rdev_t *rdev = NULL; | 232 | struct md_rdev *rdev = NULL; |
266 | struct block_device *bdev; | 233 | struct block_device *bdev; |
267 | mddev_t *mddev = bitmap->mddev; | 234 | struct mddev *mddev = bitmap->mddev; |
268 | 235 | ||
269 | while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { | 236 | while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { |
270 | int size = PAGE_SIZE; | 237 | int size = PAGE_SIZE; |
@@ -409,8 +376,8 @@ static struct page *read_page(struct file *file, unsigned long index, | |||
409 | struct buffer_head *bh; | 376 | struct buffer_head *bh; |
410 | sector_t block; | 377 | sector_t block; |
411 | 378 | ||
412 | PRINTK("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, | 379 | pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, |
413 | (unsigned long long)index << PAGE_SHIFT); | 380 | (unsigned long long)index << PAGE_SHIFT); |
414 | 381 | ||
415 | page = alloc_page(GFP_KERNEL); | 382 | page = alloc_page(GFP_KERNEL); |
416 | if (!page) | 383 | if (!page) |
@@ -868,7 +835,8 @@ static void bitmap_file_kick(struct bitmap *bitmap) | |||
868 | 835 | ||
869 | enum bitmap_page_attr { | 836 | enum bitmap_page_attr { |
870 | BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ | 837 | BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ |
871 | BITMAP_PAGE_CLEAN = 1, /* there are bits that might need to be cleared */ | 838 | BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned. |
839 | * i.e. counter is 1 or 2. */ | ||
872 | BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ | 840 | BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ |
873 | }; | 841 | }; |
874 | 842 | ||
@@ -919,7 +887,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) | |||
919 | else | 887 | else |
920 | __set_bit_le(bit, kaddr); | 888 | __set_bit_le(bit, kaddr); |
921 | kunmap_atomic(kaddr, KM_USER0); | 889 | kunmap_atomic(kaddr, KM_USER0); |
922 | PRINTK("set file bit %lu page %lu\n", bit, page->index); | 890 | pr_debug("set file bit %lu page %lu\n", bit, page->index); |
923 | /* record page number so it gets flushed to disk when unplug occurs */ | 891 | /* record page number so it gets flushed to disk when unplug occurs */ |
924 | set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); | 892 | set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); |
925 | } | 893 | } |
@@ -997,11 +965,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
997 | 965 | ||
998 | BUG_ON(!file && !bitmap->mddev->bitmap_info.offset); | 966 | BUG_ON(!file && !bitmap->mddev->bitmap_info.offset); |
999 | 967 | ||
1000 | #ifdef INJECT_FAULTS_3 | ||
1001 | outofdate = 1; | ||
1002 | #else | ||
1003 | outofdate = bitmap->flags & BITMAP_STALE; | 968 | outofdate = bitmap->flags & BITMAP_STALE; |
1004 | #endif | ||
1005 | if (outofdate) | 969 | if (outofdate) |
1006 | printk(KERN_INFO "%s: bitmap file is out of date, doing full " | 970 | printk(KERN_INFO "%s: bitmap file is out of date, doing full " |
1007 | "recovery\n", bmname(bitmap)); | 971 | "recovery\n", bmname(bitmap)); |
@@ -1111,7 +1075,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
1111 | (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap), | 1075 | (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap), |
1112 | needed); | 1076 | needed); |
1113 | bit_cnt++; | 1077 | bit_cnt++; |
1114 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | ||
1115 | } | 1078 | } |
1116 | } | 1079 | } |
1117 | 1080 | ||
@@ -1146,6 +1109,7 @@ void bitmap_write_all(struct bitmap *bitmap) | |||
1146 | for (i = 0; i < bitmap->file_pages; i++) | 1109 | for (i = 0; i < bitmap->file_pages; i++) |
1147 | set_page_attr(bitmap, bitmap->filemap[i], | 1110 | set_page_attr(bitmap, bitmap->filemap[i], |
1148 | BITMAP_PAGE_NEEDWRITE); | 1111 | BITMAP_PAGE_NEEDWRITE); |
1112 | bitmap->allclean = 0; | ||
1149 | } | 1113 | } |
1150 | 1114 | ||
1151 | static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) | 1115 | static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) |
@@ -1164,7 +1128,7 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | |||
1164 | * out to disk | 1128 | * out to disk |
1165 | */ | 1129 | */ |
1166 | 1130 | ||
1167 | void bitmap_daemon_work(mddev_t *mddev) | 1131 | void bitmap_daemon_work(struct mddev *mddev) |
1168 | { | 1132 | { |
1169 | struct bitmap *bitmap; | 1133 | struct bitmap *bitmap; |
1170 | unsigned long j; | 1134 | unsigned long j; |
@@ -1204,17 +1168,15 @@ void bitmap_daemon_work(mddev_t *mddev) | |||
1204 | 1168 | ||
1205 | if (page != lastpage) { | 1169 | if (page != lastpage) { |
1206 | /* skip this page unless it's marked as needing cleaning */ | 1170 | /* skip this page unless it's marked as needing cleaning */ |
1207 | if (!test_page_attr(bitmap, page, BITMAP_PAGE_CLEAN)) { | 1171 | if (!test_page_attr(bitmap, page, BITMAP_PAGE_PENDING)) { |
1208 | int need_write = test_page_attr(bitmap, page, | 1172 | int need_write = test_page_attr(bitmap, page, |
1209 | BITMAP_PAGE_NEEDWRITE); | 1173 | BITMAP_PAGE_NEEDWRITE); |
1210 | if (need_write) | 1174 | if (need_write) |
1211 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); | 1175 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); |
1212 | 1176 | ||
1213 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1177 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1214 | if (need_write) { | 1178 | if (need_write) |
1215 | write_page(bitmap, page, 0); | 1179 | write_page(bitmap, page, 0); |
1216 | bitmap->allclean = 0; | ||
1217 | } | ||
1218 | spin_lock_irqsave(&bitmap->lock, flags); | 1180 | spin_lock_irqsave(&bitmap->lock, flags); |
1219 | j |= (PAGE_BITS - 1); | 1181 | j |= (PAGE_BITS - 1); |
1220 | continue; | 1182 | continue; |
@@ -1222,12 +1184,16 @@ void bitmap_daemon_work(mddev_t *mddev) | |||
1222 | 1184 | ||
1223 | /* grab the new page, sync and release the old */ | 1185 | /* grab the new page, sync and release the old */ |
1224 | if (lastpage != NULL) { | 1186 | if (lastpage != NULL) { |
1225 | if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { | 1187 | if (test_page_attr(bitmap, lastpage, |
1226 | clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); | 1188 | BITMAP_PAGE_NEEDWRITE)) { |
1189 | clear_page_attr(bitmap, lastpage, | ||
1190 | BITMAP_PAGE_NEEDWRITE); | ||
1227 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1191 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1228 | write_page(bitmap, lastpage, 0); | 1192 | write_page(bitmap, lastpage, 0); |
1229 | } else { | 1193 | } else { |
1230 | set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); | 1194 | set_page_attr(bitmap, lastpage, |
1195 | BITMAP_PAGE_NEEDWRITE); | ||
1196 | bitmap->allclean = 0; | ||
1231 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1197 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1232 | } | 1198 | } |
1233 | } else | 1199 | } else |
@@ -1249,19 +1215,17 @@ void bitmap_daemon_work(mddev_t *mddev) | |||
1249 | } | 1215 | } |
1250 | spin_lock_irqsave(&bitmap->lock, flags); | 1216 | spin_lock_irqsave(&bitmap->lock, flags); |
1251 | if (!bitmap->need_sync) | 1217 | if (!bitmap->need_sync) |
1252 | clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1218 | clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING); |
1219 | else | ||
1220 | bitmap->allclean = 0; | ||
1253 | } | 1221 | } |
1254 | bmc = bitmap_get_counter(bitmap, | 1222 | bmc = bitmap_get_counter(bitmap, |
1255 | (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), | 1223 | (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), |
1256 | &blocks, 0); | 1224 | &blocks, 0); |
1257 | if (bmc) { | 1225 | if (!bmc) |
1258 | if (*bmc) | 1226 | j |= PAGE_COUNTER_MASK; |
1259 | bitmap->allclean = 0; | 1227 | else if (*bmc) { |
1260 | 1228 | if (*bmc == 1 && !bitmap->need_sync) { | |
1261 | if (*bmc == 2) { | ||
1262 | *bmc = 1; /* maybe clear the bit next time */ | ||
1263 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | ||
1264 | } else if (*bmc == 1 && !bitmap->need_sync) { | ||
1265 | /* we can clear the bit */ | 1229 | /* we can clear the bit */ |
1266 | *bmc = 0; | 1230 | *bmc = 0; |
1267 | bitmap_count_page(bitmap, | 1231 | bitmap_count_page(bitmap, |
@@ -1275,13 +1239,16 @@ void bitmap_daemon_work(mddev_t *mddev) | |||
1275 | paddr); | 1239 | paddr); |
1276 | else | 1240 | else |
1277 | __clear_bit_le( | 1241 | __clear_bit_le( |
1278 | file_page_offset(bitmap, | 1242 | file_page_offset(bitmap, |
1279 | j), | 1243 | j), |
1280 | paddr); | 1244 | paddr); |
1281 | kunmap_atomic(paddr, KM_USER0); | 1245 | kunmap_atomic(paddr, KM_USER0); |
1246 | } else if (*bmc <= 2) { | ||
1247 | *bmc = 1; /* maybe clear the bit next time */ | ||
1248 | set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); | ||
1249 | bitmap->allclean = 0; | ||
1282 | } | 1250 | } |
1283 | } else | 1251 | } |
1284 | j |= PAGE_COUNTER_MASK; | ||
1285 | } | 1252 | } |
1286 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1253 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1287 | 1254 | ||
@@ -1294,6 +1261,7 @@ void bitmap_daemon_work(mddev_t *mddev) | |||
1294 | write_page(bitmap, lastpage, 0); | 1261 | write_page(bitmap, lastpage, 0); |
1295 | } else { | 1262 | } else { |
1296 | set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); | 1263 | set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); |
1264 | bitmap->allclean = 0; | ||
1297 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1265 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1298 | } | 1266 | } |
1299 | } | 1267 | } |
@@ -1359,8 +1327,8 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1359 | if (bw > bitmap->behind_writes_used) | 1327 | if (bw > bitmap->behind_writes_used) |
1360 | bitmap->behind_writes_used = bw; | 1328 | bitmap->behind_writes_used = bw; |
1361 | 1329 | ||
1362 | PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n", | 1330 | pr_debug("inc write-behind count %d/%lu\n", |
1363 | bw, bitmap->max_write_behind); | 1331 | bw, bitmap->mddev->bitmap_info.max_write_behind); |
1364 | } | 1332 | } |
1365 | 1333 | ||
1366 | while (sectors) { | 1334 | while (sectors) { |
@@ -1407,7 +1375,6 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1407 | else | 1375 | else |
1408 | sectors = 0; | 1376 | sectors = 0; |
1409 | } | 1377 | } |
1410 | bitmap->allclean = 0; | ||
1411 | return 0; | 1378 | return 0; |
1412 | } | 1379 | } |
1413 | EXPORT_SYMBOL(bitmap_startwrite); | 1380 | EXPORT_SYMBOL(bitmap_startwrite); |
@@ -1420,8 +1387,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto | |||
1420 | if (behind) { | 1387 | if (behind) { |
1421 | if (atomic_dec_and_test(&bitmap->behind_writes)) | 1388 | if (atomic_dec_and_test(&bitmap->behind_writes)) |
1422 | wake_up(&bitmap->behind_wait); | 1389 | wake_up(&bitmap->behind_wait); |
1423 | PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n", | 1390 | pr_debug("dec write-behind count %d/%lu\n", |
1424 | atomic_read(&bitmap->behind_writes), bitmap->max_write_behind); | 1391 | atomic_read(&bitmap->behind_writes), |
1392 | bitmap->mddev->bitmap_info.max_write_behind); | ||
1425 | } | 1393 | } |
1426 | if (bitmap->mddev->degraded) | 1394 | if (bitmap->mddev->degraded) |
1427 | /* Never clear bits or update events_cleared when degraded */ | 1395 | /* Never clear bits or update events_cleared when degraded */ |
@@ -1453,13 +1421,14 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto | |||
1453 | wake_up(&bitmap->overflow_wait); | 1421 | wake_up(&bitmap->overflow_wait); |
1454 | 1422 | ||
1455 | (*bmc)--; | 1423 | (*bmc)--; |
1456 | if (*bmc <= 2) | 1424 | if (*bmc <= 2) { |
1457 | set_page_attr(bitmap, | 1425 | set_page_attr(bitmap, |
1458 | filemap_get_page( | 1426 | filemap_get_page( |
1459 | bitmap, | 1427 | bitmap, |
1460 | offset >> CHUNK_BLOCK_SHIFT(bitmap)), | 1428 | offset >> CHUNK_BLOCK_SHIFT(bitmap)), |
1461 | BITMAP_PAGE_CLEAN); | 1429 | BITMAP_PAGE_PENDING); |
1462 | 1430 | bitmap->allclean = 0; | |
1431 | } | ||
1463 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1432 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1464 | offset += blocks; | 1433 | offset += blocks; |
1465 | if (sectors > blocks) | 1434 | if (sectors > blocks) |
@@ -1495,7 +1464,6 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t | |||
1495 | } | 1464 | } |
1496 | } | 1465 | } |
1497 | spin_unlock_irq(&bitmap->lock); | 1466 | spin_unlock_irq(&bitmap->lock); |
1498 | bitmap->allclean = 0; | ||
1499 | return rv; | 1467 | return rv; |
1500 | } | 1468 | } |
1501 | 1469 | ||
@@ -1543,15 +1511,16 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i | |||
1543 | if (!NEEDED(*bmc) && aborted) | 1511 | if (!NEEDED(*bmc) && aborted) |
1544 | *bmc |= NEEDED_MASK; | 1512 | *bmc |= NEEDED_MASK; |
1545 | else { | 1513 | else { |
1546 | if (*bmc <= 2) | 1514 | if (*bmc <= 2) { |
1547 | set_page_attr(bitmap, | 1515 | set_page_attr(bitmap, |
1548 | filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), | 1516 | filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), |
1549 | BITMAP_PAGE_CLEAN); | 1517 | BITMAP_PAGE_PENDING); |
1518 | bitmap->allclean = 0; | ||
1519 | } | ||
1550 | } | 1520 | } |
1551 | } | 1521 | } |
1552 | unlock: | 1522 | unlock: |
1553 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1523 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1554 | bitmap->allclean = 0; | ||
1555 | } | 1524 | } |
1556 | EXPORT_SYMBOL(bitmap_end_sync); | 1525 | EXPORT_SYMBOL(bitmap_end_sync); |
1557 | 1526 | ||
@@ -1622,10 +1591,10 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n | |||
1622 | *bmc = 1 | (needed ? NEEDED_MASK : 0); | 1591 | *bmc = 1 | (needed ? NEEDED_MASK : 0); |
1623 | bitmap_count_page(bitmap, offset, 1); | 1592 | bitmap_count_page(bitmap, offset, 1); |
1624 | page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); | 1593 | page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); |
1625 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1594 | set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); |
1595 | bitmap->allclean = 0; | ||
1626 | } | 1596 | } |
1627 | spin_unlock_irq(&bitmap->lock); | 1597 | spin_unlock_irq(&bitmap->lock); |
1628 | bitmap->allclean = 0; | ||
1629 | } | 1598 | } |
1630 | 1599 | ||
1631 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ | 1600 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ |
@@ -1649,7 +1618,7 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) | |||
1649 | /* | 1618 | /* |
1650 | * flush out any pending updates | 1619 | * flush out any pending updates |
1651 | */ | 1620 | */ |
1652 | void bitmap_flush(mddev_t *mddev) | 1621 | void bitmap_flush(struct mddev *mddev) |
1653 | { | 1622 | { |
1654 | struct bitmap *bitmap = mddev->bitmap; | 1623 | struct bitmap *bitmap = mddev->bitmap; |
1655 | long sleep; | 1624 | long sleep; |
@@ -1697,7 +1666,7 @@ static void bitmap_free(struct bitmap *bitmap) | |||
1697 | kfree(bitmap); | 1666 | kfree(bitmap); |
1698 | } | 1667 | } |
1699 | 1668 | ||
1700 | void bitmap_destroy(mddev_t *mddev) | 1669 | void bitmap_destroy(struct mddev *mddev) |
1701 | { | 1670 | { |
1702 | struct bitmap *bitmap = mddev->bitmap; | 1671 | struct bitmap *bitmap = mddev->bitmap; |
1703 | 1672 | ||
@@ -1720,7 +1689,7 @@ void bitmap_destroy(mddev_t *mddev) | |||
1720 | * initialize the bitmap structure | 1689 | * initialize the bitmap structure |
1721 | * if this returns an error, bitmap_destroy must be called to do clean up | 1690 | * if this returns an error, bitmap_destroy must be called to do clean up |
1722 | */ | 1691 | */ |
1723 | int bitmap_create(mddev_t *mddev) | 1692 | int bitmap_create(struct mddev *mddev) |
1724 | { | 1693 | { |
1725 | struct bitmap *bitmap; | 1694 | struct bitmap *bitmap; |
1726 | sector_t blocks = mddev->resync_max_sectors; | 1695 | sector_t blocks = mddev->resync_max_sectors; |
@@ -1802,11 +1771,8 @@ int bitmap_create(mddev_t *mddev) | |||
1802 | bitmap->pages = pages; | 1771 | bitmap->pages = pages; |
1803 | bitmap->missing_pages = pages; | 1772 | bitmap->missing_pages = pages; |
1804 | 1773 | ||
1805 | #ifdef INJECT_FATAL_FAULT_1 | ||
1806 | bitmap->bp = NULL; | ||
1807 | #else | ||
1808 | bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); | 1774 | bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); |
1809 | #endif | 1775 | |
1810 | err = -ENOMEM; | 1776 | err = -ENOMEM; |
1811 | if (!bitmap->bp) | 1777 | if (!bitmap->bp) |
1812 | goto error; | 1778 | goto error; |
@@ -1824,7 +1790,7 @@ int bitmap_create(mddev_t *mddev) | |||
1824 | return err; | 1790 | return err; |
1825 | } | 1791 | } |
1826 | 1792 | ||
1827 | int bitmap_load(mddev_t *mddev) | 1793 | int bitmap_load(struct mddev *mddev) |
1828 | { | 1794 | { |
1829 | int err = 0; | 1795 | int err = 0; |
1830 | sector_t start = 0; | 1796 | sector_t start = 0; |
@@ -1870,7 +1836,7 @@ out: | |||
1870 | EXPORT_SYMBOL_GPL(bitmap_load); | 1836 | EXPORT_SYMBOL_GPL(bitmap_load); |
1871 | 1837 | ||
1872 | static ssize_t | 1838 | static ssize_t |
1873 | location_show(mddev_t *mddev, char *page) | 1839 | location_show(struct mddev *mddev, char *page) |
1874 | { | 1840 | { |
1875 | ssize_t len; | 1841 | ssize_t len; |
1876 | if (mddev->bitmap_info.file) | 1842 | if (mddev->bitmap_info.file) |
@@ -1884,7 +1850,7 @@ location_show(mddev_t *mddev, char *page) | |||
1884 | } | 1850 | } |
1885 | 1851 | ||
1886 | static ssize_t | 1852 | static ssize_t |
1887 | location_store(mddev_t *mddev, const char *buf, size_t len) | 1853 | location_store(struct mddev *mddev, const char *buf, size_t len) |
1888 | { | 1854 | { |
1889 | 1855 | ||
1890 | if (mddev->pers) { | 1856 | if (mddev->pers) { |
@@ -1961,7 +1927,7 @@ static struct md_sysfs_entry bitmap_location = | |||
1961 | __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); | 1927 | __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); |
1962 | 1928 | ||
1963 | static ssize_t | 1929 | static ssize_t |
1964 | timeout_show(mddev_t *mddev, char *page) | 1930 | timeout_show(struct mddev *mddev, char *page) |
1965 | { | 1931 | { |
1966 | ssize_t len; | 1932 | ssize_t len; |
1967 | unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; | 1933 | unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; |
@@ -1975,7 +1941,7 @@ timeout_show(mddev_t *mddev, char *page) | |||
1975 | } | 1941 | } |
1976 | 1942 | ||
1977 | static ssize_t | 1943 | static ssize_t |
1978 | timeout_store(mddev_t *mddev, const char *buf, size_t len) | 1944 | timeout_store(struct mddev *mddev, const char *buf, size_t len) |
1979 | { | 1945 | { |
1980 | /* timeout can be set at any time */ | 1946 | /* timeout can be set at any time */ |
1981 | unsigned long timeout; | 1947 | unsigned long timeout; |
@@ -2011,13 +1977,13 @@ static struct md_sysfs_entry bitmap_timeout = | |||
2011 | __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); | 1977 | __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); |
2012 | 1978 | ||
2013 | static ssize_t | 1979 | static ssize_t |
2014 | backlog_show(mddev_t *mddev, char *page) | 1980 | backlog_show(struct mddev *mddev, char *page) |
2015 | { | 1981 | { |
2016 | return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); | 1982 | return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); |
2017 | } | 1983 | } |
2018 | 1984 | ||
2019 | static ssize_t | 1985 | static ssize_t |
2020 | backlog_store(mddev_t *mddev, const char *buf, size_t len) | 1986 | backlog_store(struct mddev *mddev, const char *buf, size_t len) |
2021 | { | 1987 | { |
2022 | unsigned long backlog; | 1988 | unsigned long backlog; |
2023 | int rv = strict_strtoul(buf, 10, &backlog); | 1989 | int rv = strict_strtoul(buf, 10, &backlog); |
@@ -2033,13 +1999,13 @@ static struct md_sysfs_entry bitmap_backlog = | |||
2033 | __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); | 1999 | __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); |
2034 | 2000 | ||
2035 | static ssize_t | 2001 | static ssize_t |
2036 | chunksize_show(mddev_t *mddev, char *page) | 2002 | chunksize_show(struct mddev *mddev, char *page) |
2037 | { | 2003 | { |
2038 | return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); | 2004 | return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); |
2039 | } | 2005 | } |
2040 | 2006 | ||
2041 | static ssize_t | 2007 | static ssize_t |
2042 | chunksize_store(mddev_t *mddev, const char *buf, size_t len) | 2008 | chunksize_store(struct mddev *mddev, const char *buf, size_t len) |
2043 | { | 2009 | { |
2044 | /* Can only be changed when no bitmap is active */ | 2010 | /* Can only be changed when no bitmap is active */ |
2045 | int rv; | 2011 | int rv; |
@@ -2059,13 +2025,13 @@ chunksize_store(mddev_t *mddev, const char *buf, size_t len) | |||
2059 | static struct md_sysfs_entry bitmap_chunksize = | 2025 | static struct md_sysfs_entry bitmap_chunksize = |
2060 | __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); | 2026 | __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); |
2061 | 2027 | ||
2062 | static ssize_t metadata_show(mddev_t *mddev, char *page) | 2028 | static ssize_t metadata_show(struct mddev *mddev, char *page) |
2063 | { | 2029 | { |
2064 | return sprintf(page, "%s\n", (mddev->bitmap_info.external | 2030 | return sprintf(page, "%s\n", (mddev->bitmap_info.external |
2065 | ? "external" : "internal")); | 2031 | ? "external" : "internal")); |
2066 | } | 2032 | } |
2067 | 2033 | ||
2068 | static ssize_t metadata_store(mddev_t *mddev, const char *buf, size_t len) | 2034 | static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) |
2069 | { | 2035 | { |
2070 | if (mddev->bitmap || | 2036 | if (mddev->bitmap || |
2071 | mddev->bitmap_info.file || | 2037 | mddev->bitmap_info.file || |
@@ -2083,7 +2049,7 @@ static ssize_t metadata_store(mddev_t *mddev, const char *buf, size_t len) | |||
2083 | static struct md_sysfs_entry bitmap_metadata = | 2049 | static struct md_sysfs_entry bitmap_metadata = |
2084 | __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); | 2050 | __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); |
2085 | 2051 | ||
2086 | static ssize_t can_clear_show(mddev_t *mddev, char *page) | 2052 | static ssize_t can_clear_show(struct mddev *mddev, char *page) |
2087 | { | 2053 | { |
2088 | int len; | 2054 | int len; |
2089 | if (mddev->bitmap) | 2055 | if (mddev->bitmap) |
@@ -2094,7 +2060,7 @@ static ssize_t can_clear_show(mddev_t *mddev, char *page) | |||
2094 | return len; | 2060 | return len; |
2095 | } | 2061 | } |
2096 | 2062 | ||
2097 | static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len) | 2063 | static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) |
2098 | { | 2064 | { |
2099 | if (mddev->bitmap == NULL) | 2065 | if (mddev->bitmap == NULL) |
2100 | return -ENOENT; | 2066 | return -ENOENT; |
@@ -2113,7 +2079,7 @@ static struct md_sysfs_entry bitmap_can_clear = | |||
2113 | __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); | 2079 | __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); |
2114 | 2080 | ||
2115 | static ssize_t | 2081 | static ssize_t |
2116 | behind_writes_used_show(mddev_t *mddev, char *page) | 2082 | behind_writes_used_show(struct mddev *mddev, char *page) |
2117 | { | 2083 | { |
2118 | if (mddev->bitmap == NULL) | 2084 | if (mddev->bitmap == NULL) |
2119 | return sprintf(page, "0\n"); | 2085 | return sprintf(page, "0\n"); |
@@ -2122,7 +2088,7 @@ behind_writes_used_show(mddev_t *mddev, char *page) | |||
2122 | } | 2088 | } |
2123 | 2089 | ||
2124 | static ssize_t | 2090 | static ssize_t |
2125 | behind_writes_used_reset(mddev_t *mddev, const char *buf, size_t len) | 2091 | behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) |
2126 | { | 2092 | { |
2127 | if (mddev->bitmap) | 2093 | if (mddev->bitmap) |
2128 | mddev->bitmap->behind_writes_used = 0; | 2094 | mddev->bitmap->behind_writes_used = 0; |
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index a28f2e5588c6..a15436dd9b3e 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h | |||
@@ -193,7 +193,7 @@ struct bitmap { | |||
193 | unsigned long pages; /* total number of pages in the bitmap */ | 193 | unsigned long pages; /* total number of pages in the bitmap */ |
194 | unsigned long missing_pages; /* number of pages not yet allocated */ | 194 | unsigned long missing_pages; /* number of pages not yet allocated */ |
195 | 195 | ||
196 | mddev_t *mddev; /* the md device that the bitmap is for */ | 196 | struct mddev *mddev; /* the md device that the bitmap is for */ |
197 | 197 | ||
198 | /* bitmap chunksize -- how much data does each bit represent? */ | 198 | /* bitmap chunksize -- how much data does each bit represent? */ |
199 | unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ | 199 | unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ |
@@ -238,10 +238,10 @@ struct bitmap { | |||
238 | /* the bitmap API */ | 238 | /* the bitmap API */ |
239 | 239 | ||
240 | /* these are used only by md/bitmap */ | 240 | /* these are used only by md/bitmap */ |
241 | int bitmap_create(mddev_t *mddev); | 241 | int bitmap_create(struct mddev *mddev); |
242 | int bitmap_load(mddev_t *mddev); | 242 | int bitmap_load(struct mddev *mddev); |
243 | void bitmap_flush(mddev_t *mddev); | 243 | void bitmap_flush(struct mddev *mddev); |
244 | void bitmap_destroy(mddev_t *mddev); | 244 | void bitmap_destroy(struct mddev *mddev); |
245 | 245 | ||
246 | void bitmap_print_sb(struct bitmap *bitmap); | 246 | void bitmap_print_sb(struct bitmap *bitmap); |
247 | void bitmap_update_sb(struct bitmap *bitmap); | 247 | void bitmap_update_sb(struct bitmap *bitmap); |
@@ -262,7 +262,7 @@ void bitmap_close_sync(struct bitmap *bitmap); | |||
262 | void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); | 262 | void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); |
263 | 263 | ||
264 | void bitmap_unplug(struct bitmap *bitmap); | 264 | void bitmap_unplug(struct bitmap *bitmap); |
265 | void bitmap_daemon_work(mddev_t *mddev); | 265 | void bitmap_daemon_work(struct mddev *mddev); |
266 | #endif | 266 | #endif |
267 | 267 | ||
268 | #endif | 268 | #endif |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 86df8b2cf927..37a37266a1e3 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -37,7 +37,7 @@ struct raid_dev { | |||
37 | */ | 37 | */ |
38 | struct dm_dev *meta_dev; | 38 | struct dm_dev *meta_dev; |
39 | struct dm_dev *data_dev; | 39 | struct dm_dev *data_dev; |
40 | struct mdk_rdev_s rdev; | 40 | struct md_rdev rdev; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | /* | 43 | /* |
@@ -57,7 +57,7 @@ struct raid_set { | |||
57 | 57 | ||
58 | uint64_t print_flags; | 58 | uint64_t print_flags; |
59 | 59 | ||
60 | struct mddev_s md; | 60 | struct mddev md; |
61 | struct raid_type *raid_type; | 61 | struct raid_type *raid_type; |
62 | struct dm_target_callbacks callbacks; | 62 | struct dm_target_callbacks callbacks; |
63 | 63 | ||
@@ -594,7 +594,7 @@ struct dm_raid_superblock { | |||
594 | /* Always set to 0 when writing. */ | 594 | /* Always set to 0 when writing. */ |
595 | } __packed; | 595 | } __packed; |
596 | 596 | ||
597 | static int read_disk_sb(mdk_rdev_t *rdev, int size) | 597 | static int read_disk_sb(struct md_rdev *rdev, int size) |
598 | { | 598 | { |
599 | BUG_ON(!rdev->sb_page); | 599 | BUG_ON(!rdev->sb_page); |
600 | 600 | ||
@@ -611,9 +611,9 @@ static int read_disk_sb(mdk_rdev_t *rdev, int size) | |||
611 | return 0; | 611 | return 0; |
612 | } | 612 | } |
613 | 613 | ||
614 | static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) | 614 | static void super_sync(struct mddev *mddev, struct md_rdev *rdev) |
615 | { | 615 | { |
616 | mdk_rdev_t *r, *t; | 616 | struct md_rdev *r, *t; |
617 | uint64_t failed_devices; | 617 | uint64_t failed_devices; |
618 | struct dm_raid_superblock *sb; | 618 | struct dm_raid_superblock *sb; |
619 | 619 | ||
@@ -651,7 +651,7 @@ static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
651 | * | 651 | * |
652 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise | 652 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise |
653 | */ | 653 | */ |
654 | static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) | 654 | static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) |
655 | { | 655 | { |
656 | int ret; | 656 | int ret; |
657 | struct dm_raid_superblock *sb; | 657 | struct dm_raid_superblock *sb; |
@@ -689,7 +689,7 @@ static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) | |||
689 | return (events_sb > events_refsb) ? 1 : 0; | 689 | return (events_sb > events_refsb) ? 1 : 0; |
690 | } | 690 | } |
691 | 691 | ||
692 | static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) | 692 | static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) |
693 | { | 693 | { |
694 | int role; | 694 | int role; |
695 | struct raid_set *rs = container_of(mddev, struct raid_set, md); | 695 | struct raid_set *rs = container_of(mddev, struct raid_set, md); |
@@ -698,7 +698,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) | |||
698 | struct dm_raid_superblock *sb; | 698 | struct dm_raid_superblock *sb; |
699 | uint32_t new_devs = 0; | 699 | uint32_t new_devs = 0; |
700 | uint32_t rebuilds = 0; | 700 | uint32_t rebuilds = 0; |
701 | mdk_rdev_t *r, *t; | 701 | struct md_rdev *r, *t; |
702 | struct dm_raid_superblock *sb2; | 702 | struct dm_raid_superblock *sb2; |
703 | 703 | ||
704 | sb = page_address(rdev->sb_page); | 704 | sb = page_address(rdev->sb_page); |
@@ -809,7 +809,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) | |||
809 | return 0; | 809 | return 0; |
810 | } | 810 | } |
811 | 811 | ||
812 | static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) | 812 | static int super_validate(struct mddev *mddev, struct md_rdev *rdev) |
813 | { | 813 | { |
814 | struct dm_raid_superblock *sb = page_address(rdev->sb_page); | 814 | struct dm_raid_superblock *sb = page_address(rdev->sb_page); |
815 | 815 | ||
@@ -849,8 +849,8 @@ static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
849 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | 849 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) |
850 | { | 850 | { |
851 | int ret; | 851 | int ret; |
852 | mdk_rdev_t *rdev, *freshest, *tmp; | 852 | struct md_rdev *rdev, *freshest, *tmp; |
853 | mddev_t *mddev = &rs->md; | 853 | struct mddev *mddev = &rs->md; |
854 | 854 | ||
855 | freshest = NULL; | 855 | freshest = NULL; |
856 | rdev_for_each(rdev, tmp, mddev) { | 856 | rdev_for_each(rdev, tmp, mddev) { |
@@ -1004,7 +1004,7 @@ static void raid_dtr(struct dm_target *ti) | |||
1004 | static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) | 1004 | static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) |
1005 | { | 1005 | { |
1006 | struct raid_set *rs = ti->private; | 1006 | struct raid_set *rs = ti->private; |
1007 | mddev_t *mddev = &rs->md; | 1007 | struct mddev *mddev = &rs->md; |
1008 | 1008 | ||
1009 | mddev->pers->make_request(mddev, bio); | 1009 | mddev->pers->make_request(mddev, bio); |
1010 | 1010 | ||
@@ -1097,7 +1097,7 @@ static int raid_status(struct dm_target *ti, status_type_t type, | |||
1097 | rs->md.bitmap_info.max_write_behind); | 1097 | rs->md.bitmap_info.max_write_behind); |
1098 | 1098 | ||
1099 | if (rs->print_flags & DMPF_STRIPE_CACHE) { | 1099 | if (rs->print_flags & DMPF_STRIPE_CACHE) { |
1100 | raid5_conf_t *conf = rs->md.private; | 1100 | struct r5conf *conf = rs->md.private; |
1101 | 1101 | ||
1102 | /* convert from kiB to sectors */ | 1102 | /* convert from kiB to sectors */ |
1103 | DMEMIT(" stripe_cache %d", | 1103 | DMEMIT(" stripe_cache %d", |
@@ -1146,7 +1146,7 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
1146 | { | 1146 | { |
1147 | struct raid_set *rs = ti->private; | 1147 | struct raid_set *rs = ti->private; |
1148 | unsigned chunk_size = rs->md.chunk_sectors << 9; | 1148 | unsigned chunk_size = rs->md.chunk_sectors << 9; |
1149 | raid5_conf_t *conf = rs->md.private; | 1149 | struct r5conf *conf = rs->md.private; |
1150 | 1150 | ||
1151 | blk_limits_io_min(limits, chunk_size); | 1151 | blk_limits_io_min(limits, chunk_size); |
1152 | blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); | 1152 | blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); |
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 23078dabb6df..60816b132c2e 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c | |||
@@ -81,16 +81,16 @@ static void faulty_fail(struct bio *bio, int error) | |||
81 | bio_io_error(b); | 81 | bio_io_error(b); |
82 | } | 82 | } |
83 | 83 | ||
84 | typedef struct faulty_conf { | 84 | struct faulty_conf { |
85 | int period[Modes]; | 85 | int period[Modes]; |
86 | atomic_t counters[Modes]; | 86 | atomic_t counters[Modes]; |
87 | sector_t faults[MaxFault]; | 87 | sector_t faults[MaxFault]; |
88 | int modes[MaxFault]; | 88 | int modes[MaxFault]; |
89 | int nfaults; | 89 | int nfaults; |
90 | mdk_rdev_t *rdev; | 90 | struct md_rdev *rdev; |
91 | } conf_t; | 91 | }; |
92 | 92 | ||
93 | static int check_mode(conf_t *conf, int mode) | 93 | static int check_mode(struct faulty_conf *conf, int mode) |
94 | { | 94 | { |
95 | if (conf->period[mode] == 0 && | 95 | if (conf->period[mode] == 0 && |
96 | atomic_read(&conf->counters[mode]) <= 0) | 96 | atomic_read(&conf->counters[mode]) <= 0) |
@@ -105,7 +105,7 @@ static int check_mode(conf_t *conf, int mode) | |||
105 | return 0; | 105 | return 0; |
106 | } | 106 | } |
107 | 107 | ||
108 | static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir) | 108 | static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir) |
109 | { | 109 | { |
110 | /* If we find a ReadFixable sector, we fix it ... */ | 110 | /* If we find a ReadFixable sector, we fix it ... */ |
111 | int i; | 111 | int i; |
@@ -129,7 +129,7 @@ static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir) | |||
129 | return 0; | 129 | return 0; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void add_sector(conf_t *conf, sector_t start, int mode) | 132 | static void add_sector(struct faulty_conf *conf, sector_t start, int mode) |
133 | { | 133 | { |
134 | int i; | 134 | int i; |
135 | int n = conf->nfaults; | 135 | int n = conf->nfaults; |
@@ -169,9 +169,9 @@ static void add_sector(conf_t *conf, sector_t start, int mode) | |||
169 | conf->nfaults = n+1; | 169 | conf->nfaults = n+1; |
170 | } | 170 | } |
171 | 171 | ||
172 | static int make_request(mddev_t *mddev, struct bio *bio) | 172 | static int make_request(struct mddev *mddev, struct bio *bio) |
173 | { | 173 | { |
174 | conf_t *conf = mddev->private; | 174 | struct faulty_conf *conf = mddev->private; |
175 | int failit = 0; | 175 | int failit = 0; |
176 | 176 | ||
177 | if (bio_data_dir(bio) == WRITE) { | 177 | if (bio_data_dir(bio) == WRITE) { |
@@ -222,9 +222,9 @@ static int make_request(mddev_t *mddev, struct bio *bio) | |||
222 | } | 222 | } |
223 | } | 223 | } |
224 | 224 | ||
225 | static void status(struct seq_file *seq, mddev_t *mddev) | 225 | static void status(struct seq_file *seq, struct mddev *mddev) |
226 | { | 226 | { |
227 | conf_t *conf = mddev->private; | 227 | struct faulty_conf *conf = mddev->private; |
228 | int n; | 228 | int n; |
229 | 229 | ||
230 | if ((n=atomic_read(&conf->counters[WriteTransient])) != 0) | 230 | if ((n=atomic_read(&conf->counters[WriteTransient])) != 0) |
@@ -255,11 +255,11 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
255 | } | 255 | } |
256 | 256 | ||
257 | 257 | ||
258 | static int reshape(mddev_t *mddev) | 258 | static int reshape(struct mddev *mddev) |
259 | { | 259 | { |
260 | int mode = mddev->new_layout & ModeMask; | 260 | int mode = mddev->new_layout & ModeMask; |
261 | int count = mddev->new_layout >> ModeShift; | 261 | int count = mddev->new_layout >> ModeShift; |
262 | conf_t *conf = mddev->private; | 262 | struct faulty_conf *conf = mddev->private; |
263 | 263 | ||
264 | if (mddev->new_layout < 0) | 264 | if (mddev->new_layout < 0) |
265 | return 0; | 265 | return 0; |
@@ -284,7 +284,7 @@ static int reshape(mddev_t *mddev) | |||
284 | return 0; | 284 | return 0; |
285 | } | 285 | } |
286 | 286 | ||
287 | static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 287 | static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
288 | { | 288 | { |
289 | WARN_ONCE(raid_disks, | 289 | WARN_ONCE(raid_disks, |
290 | "%s does not support generic reshape\n", __func__); | 290 | "%s does not support generic reshape\n", __func__); |
@@ -295,11 +295,11 @@ static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
295 | return sectors; | 295 | return sectors; |
296 | } | 296 | } |
297 | 297 | ||
298 | static int run(mddev_t *mddev) | 298 | static int run(struct mddev *mddev) |
299 | { | 299 | { |
300 | mdk_rdev_t *rdev; | 300 | struct md_rdev *rdev; |
301 | int i; | 301 | int i; |
302 | conf_t *conf; | 302 | struct faulty_conf *conf; |
303 | 303 | ||
304 | if (md_check_no_bitmap(mddev)) | 304 | if (md_check_no_bitmap(mddev)) |
305 | return -EINVAL; | 305 | return -EINVAL; |
@@ -325,16 +325,16 @@ static int run(mddev_t *mddev) | |||
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
327 | 327 | ||
328 | static int stop(mddev_t *mddev) | 328 | static int stop(struct mddev *mddev) |
329 | { | 329 | { |
330 | conf_t *conf = mddev->private; | 330 | struct faulty_conf *conf = mddev->private; |
331 | 331 | ||
332 | kfree(conf); | 332 | kfree(conf); |
333 | mddev->private = NULL; | 333 | mddev->private = NULL; |
334 | return 0; | 334 | return 0; |
335 | } | 335 | } |
336 | 336 | ||
337 | static struct mdk_personality faulty_personality = | 337 | static struct md_personality faulty_personality = |
338 | { | 338 | { |
339 | .name = "faulty", | 339 | .name = "faulty", |
340 | .level = LEVEL_FAULTY, | 340 | .level = LEVEL_FAULTY, |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 6cd2c313e800..10c5844460cb 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -26,10 +26,10 @@ | |||
26 | /* | 26 | /* |
27 | * find which device holds a particular offset | 27 | * find which device holds a particular offset |
28 | */ | 28 | */ |
29 | static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) | 29 | static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) |
30 | { | 30 | { |
31 | int lo, mid, hi; | 31 | int lo, mid, hi; |
32 | linear_conf_t *conf; | 32 | struct linear_conf *conf; |
33 | 33 | ||
34 | lo = 0; | 34 | lo = 0; |
35 | hi = mddev->raid_disks - 1; | 35 | hi = mddev->raid_disks - 1; |
@@ -63,8 +63,8 @@ static int linear_mergeable_bvec(struct request_queue *q, | |||
63 | struct bvec_merge_data *bvm, | 63 | struct bvec_merge_data *bvm, |
64 | struct bio_vec *biovec) | 64 | struct bio_vec *biovec) |
65 | { | 65 | { |
66 | mddev_t *mddev = q->queuedata; | 66 | struct mddev *mddev = q->queuedata; |
67 | dev_info_t *dev0; | 67 | struct dev_info *dev0; |
68 | unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; | 68 | unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; |
69 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | 69 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
70 | 70 | ||
@@ -89,8 +89,8 @@ static int linear_mergeable_bvec(struct request_queue *q, | |||
89 | 89 | ||
90 | static int linear_congested(void *data, int bits) | 90 | static int linear_congested(void *data, int bits) |
91 | { | 91 | { |
92 | mddev_t *mddev = data; | 92 | struct mddev *mddev = data; |
93 | linear_conf_t *conf; | 93 | struct linear_conf *conf; |
94 | int i, ret = 0; | 94 | int i, ret = 0; |
95 | 95 | ||
96 | if (mddev_congested(mddev, bits)) | 96 | if (mddev_congested(mddev, bits)) |
@@ -108,9 +108,9 @@ static int linear_congested(void *data, int bits) | |||
108 | return ret; | 108 | return ret; |
109 | } | 109 | } |
110 | 110 | ||
111 | static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 111 | static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
112 | { | 112 | { |
113 | linear_conf_t *conf; | 113 | struct linear_conf *conf; |
114 | sector_t array_sectors; | 114 | sector_t array_sectors; |
115 | 115 | ||
116 | rcu_read_lock(); | 116 | rcu_read_lock(); |
@@ -123,13 +123,13 @@ static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
123 | return array_sectors; | 123 | return array_sectors; |
124 | } | 124 | } |
125 | 125 | ||
126 | static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | 126 | static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) |
127 | { | 127 | { |
128 | linear_conf_t *conf; | 128 | struct linear_conf *conf; |
129 | mdk_rdev_t *rdev; | 129 | struct md_rdev *rdev; |
130 | int i, cnt; | 130 | int i, cnt; |
131 | 131 | ||
132 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), | 132 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info), |
133 | GFP_KERNEL); | 133 | GFP_KERNEL); |
134 | if (!conf) | 134 | if (!conf) |
135 | return NULL; | 135 | return NULL; |
@@ -139,7 +139,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
139 | 139 | ||
140 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 140 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
141 | int j = rdev->raid_disk; | 141 | int j = rdev->raid_disk; |
142 | dev_info_t *disk = conf->disks + j; | 142 | struct dev_info *disk = conf->disks + j; |
143 | sector_t sectors; | 143 | sector_t sectors; |
144 | 144 | ||
145 | if (j < 0 || j >= raid_disks || disk->rdev) { | 145 | if (j < 0 || j >= raid_disks || disk->rdev) { |
@@ -194,9 +194,9 @@ out: | |||
194 | return NULL; | 194 | return NULL; |
195 | } | 195 | } |
196 | 196 | ||
197 | static int linear_run (mddev_t *mddev) | 197 | static int linear_run (struct mddev *mddev) |
198 | { | 198 | { |
199 | linear_conf_t *conf; | 199 | struct linear_conf *conf; |
200 | 200 | ||
201 | if (md_check_no_bitmap(mddev)) | 201 | if (md_check_no_bitmap(mddev)) |
202 | return -EINVAL; | 202 | return -EINVAL; |
@@ -213,7 +213,7 @@ static int linear_run (mddev_t *mddev) | |||
213 | return md_integrity_register(mddev); | 213 | return md_integrity_register(mddev); |
214 | } | 214 | } |
215 | 215 | ||
216 | static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) | 216 | static int linear_add(struct mddev *mddev, struct md_rdev *rdev) |
217 | { | 217 | { |
218 | /* Adding a drive to a linear array allows the array to grow. | 218 | /* Adding a drive to a linear array allows the array to grow. |
219 | * It is permitted if the new drive has a matching superblock | 219 | * It is permitted if the new drive has a matching superblock |
@@ -223,7 +223,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) | |||
223 | * The current one is never freed until the array is stopped. | 223 | * The current one is never freed until the array is stopped. |
224 | * This avoids races. | 224 | * This avoids races. |
225 | */ | 225 | */ |
226 | linear_conf_t *newconf, *oldconf; | 226 | struct linear_conf *newconf, *oldconf; |
227 | 227 | ||
228 | if (rdev->saved_raid_disk != mddev->raid_disks) | 228 | if (rdev->saved_raid_disk != mddev->raid_disks) |
229 | return -EINVAL; | 229 | return -EINVAL; |
@@ -245,9 +245,9 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) | |||
245 | return 0; | 245 | return 0; |
246 | } | 246 | } |
247 | 247 | ||
248 | static int linear_stop (mddev_t *mddev) | 248 | static int linear_stop (struct mddev *mddev) |
249 | { | 249 | { |
250 | linear_conf_t *conf = mddev->private; | 250 | struct linear_conf *conf = mddev->private; |
251 | 251 | ||
252 | /* | 252 | /* |
253 | * We do not require rcu protection here since | 253 | * We do not require rcu protection here since |
@@ -264,9 +264,9 @@ static int linear_stop (mddev_t *mddev) | |||
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | static int linear_make_request (mddev_t *mddev, struct bio *bio) | 267 | static int linear_make_request (struct mddev *mddev, struct bio *bio) |
268 | { | 268 | { |
269 | dev_info_t *tmp_dev; | 269 | struct dev_info *tmp_dev; |
270 | sector_t start_sector; | 270 | sector_t start_sector; |
271 | 271 | ||
272 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 272 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
@@ -323,14 +323,14 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) | |||
323 | return 1; | 323 | return 1; |
324 | } | 324 | } |
325 | 325 | ||
326 | static void linear_status (struct seq_file *seq, mddev_t *mddev) | 326 | static void linear_status (struct seq_file *seq, struct mddev *mddev) |
327 | { | 327 | { |
328 | 328 | ||
329 | seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); | 329 | seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); |
330 | } | 330 | } |
331 | 331 | ||
332 | 332 | ||
333 | static struct mdk_personality linear_personality = | 333 | static struct md_personality linear_personality = |
334 | { | 334 | { |
335 | .name = "linear", | 335 | .name = "linear", |
336 | .level = LEVEL_LINEAR, | 336 | .level = LEVEL_LINEAR, |
diff --git a/drivers/md/linear.h b/drivers/md/linear.h index 2f2da05b2ce9..b685ddd7d7f7 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h | |||
@@ -2,20 +2,14 @@ | |||
2 | #define _LINEAR_H | 2 | #define _LINEAR_H |
3 | 3 | ||
4 | struct dev_info { | 4 | struct dev_info { |
5 | mdk_rdev_t *rdev; | 5 | struct md_rdev *rdev; |
6 | sector_t end_sector; | 6 | sector_t end_sector; |
7 | }; | 7 | }; |
8 | 8 | ||
9 | typedef struct dev_info dev_info_t; | 9 | struct linear_conf |
10 | |||
11 | struct linear_private_data | ||
12 | { | 10 | { |
13 | struct rcu_head rcu; | 11 | struct rcu_head rcu; |
14 | sector_t array_sectors; | 12 | sector_t array_sectors; |
15 | dev_info_t disks[0]; | 13 | struct dev_info disks[0]; |
16 | }; | 14 | }; |
17 | |||
18 | |||
19 | typedef struct linear_private_data linear_conf_t; | ||
20 | |||
21 | #endif | 15 | #endif |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 5c95ccb59500..266e82ebaf11 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -54,9 +54,6 @@ | |||
54 | #include "md.h" | 54 | #include "md.h" |
55 | #include "bitmap.h" | 55 | #include "bitmap.h" |
56 | 56 | ||
57 | #define DEBUG 0 | ||
58 | #define dprintk(x...) ((void)(DEBUG && printk(x))) | ||
59 | |||
60 | #ifndef MODULE | 57 | #ifndef MODULE |
61 | static void autostart_arrays(int part); | 58 | static void autostart_arrays(int part); |
62 | #endif | 59 | #endif |
@@ -98,13 +95,13 @@ static struct workqueue_struct *md_misc_wq; | |||
98 | 95 | ||
99 | static int sysctl_speed_limit_min = 1000; | 96 | static int sysctl_speed_limit_min = 1000; |
100 | static int sysctl_speed_limit_max = 200000; | 97 | static int sysctl_speed_limit_max = 200000; |
101 | static inline int speed_min(mddev_t *mddev) | 98 | static inline int speed_min(struct mddev *mddev) |
102 | { | 99 | { |
103 | return mddev->sync_speed_min ? | 100 | return mddev->sync_speed_min ? |
104 | mddev->sync_speed_min : sysctl_speed_limit_min; | 101 | mddev->sync_speed_min : sysctl_speed_limit_min; |
105 | } | 102 | } |
106 | 103 | ||
107 | static inline int speed_max(mddev_t *mddev) | 104 | static inline int speed_max(struct mddev *mddev) |
108 | { | 105 | { |
109 | return mddev->sync_speed_max ? | 106 | return mddev->sync_speed_max ? |
110 | mddev->sync_speed_max : sysctl_speed_limit_max; | 107 | mddev->sync_speed_max : sysctl_speed_limit_max; |
@@ -160,7 +157,7 @@ static int start_readonly; | |||
160 | 157 | ||
161 | static void mddev_bio_destructor(struct bio *bio) | 158 | static void mddev_bio_destructor(struct bio *bio) |
162 | { | 159 | { |
163 | mddev_t *mddev, **mddevp; | 160 | struct mddev *mddev, **mddevp; |
164 | 161 | ||
165 | mddevp = (void*)bio; | 162 | mddevp = (void*)bio; |
166 | mddev = mddevp[-1]; | 163 | mddev = mddevp[-1]; |
@@ -169,10 +166,10 @@ static void mddev_bio_destructor(struct bio *bio) | |||
169 | } | 166 | } |
170 | 167 | ||
171 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 168 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
172 | mddev_t *mddev) | 169 | struct mddev *mddev) |
173 | { | 170 | { |
174 | struct bio *b; | 171 | struct bio *b; |
175 | mddev_t **mddevp; | 172 | struct mddev **mddevp; |
176 | 173 | ||
177 | if (!mddev || !mddev->bio_set) | 174 | if (!mddev || !mddev->bio_set) |
178 | return bio_alloc(gfp_mask, nr_iovecs); | 175 | return bio_alloc(gfp_mask, nr_iovecs); |
@@ -189,10 +186,10 @@ struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | |||
189 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); | 186 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); |
190 | 187 | ||
191 | struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, | 188 | struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, |
192 | mddev_t *mddev) | 189 | struct mddev *mddev) |
193 | { | 190 | { |
194 | struct bio *b; | 191 | struct bio *b; |
195 | mddev_t **mddevp; | 192 | struct mddev **mddevp; |
196 | 193 | ||
197 | if (!mddev || !mddev->bio_set) | 194 | if (!mddev || !mddev->bio_set) |
198 | return bio_clone(bio, gfp_mask); | 195 | return bio_clone(bio, gfp_mask); |
@@ -281,7 +278,7 @@ EXPORT_SYMBOL_GPL(md_trim_bio); | |||
281 | */ | 278 | */ |
282 | static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); | 279 | static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); |
283 | static atomic_t md_event_count; | 280 | static atomic_t md_event_count; |
284 | void md_new_event(mddev_t *mddev) | 281 | void md_new_event(struct mddev *mddev) |
285 | { | 282 | { |
286 | atomic_inc(&md_event_count); | 283 | atomic_inc(&md_event_count); |
287 | wake_up(&md_event_waiters); | 284 | wake_up(&md_event_waiters); |
@@ -291,7 +288,7 @@ EXPORT_SYMBOL_GPL(md_new_event); | |||
291 | /* Alternate version that can be called from interrupts | 288 | /* Alternate version that can be called from interrupts |
292 | * when calling sysfs_notify isn't needed. | 289 | * when calling sysfs_notify isn't needed. |
293 | */ | 290 | */ |
294 | static void md_new_event_inintr(mddev_t *mddev) | 291 | static void md_new_event_inintr(struct mddev *mddev) |
295 | { | 292 | { |
296 | atomic_inc(&md_event_count); | 293 | atomic_inc(&md_event_count); |
297 | wake_up(&md_event_waiters); | 294 | wake_up(&md_event_waiters); |
@@ -312,19 +309,19 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
312 | * Any code which breaks out of this loop while own | 309 | * Any code which breaks out of this loop while own |
313 | * a reference to the current mddev and must mddev_put it. | 310 | * a reference to the current mddev and must mddev_put it. |
314 | */ | 311 | */ |
315 | #define for_each_mddev(mddev,tmp) \ | 312 | #define for_each_mddev(_mddev,_tmp) \ |
316 | \ | 313 | \ |
317 | for (({ spin_lock(&all_mddevs_lock); \ | 314 | for (({ spin_lock(&all_mddevs_lock); \ |
318 | tmp = all_mddevs.next; \ | 315 | _tmp = all_mddevs.next; \ |
319 | mddev = NULL;}); \ | 316 | _mddev = NULL;}); \ |
320 | ({ if (tmp != &all_mddevs) \ | 317 | ({ if (_tmp != &all_mddevs) \ |
321 | mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ | 318 | mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ |
322 | spin_unlock(&all_mddevs_lock); \ | 319 | spin_unlock(&all_mddevs_lock); \ |
323 | if (mddev) mddev_put(mddev); \ | 320 | if (_mddev) mddev_put(_mddev); \ |
324 | mddev = list_entry(tmp, mddev_t, all_mddevs); \ | 321 | _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ |
325 | tmp != &all_mddevs;}); \ | 322 | _tmp != &all_mddevs;}); \ |
326 | ({ spin_lock(&all_mddevs_lock); \ | 323 | ({ spin_lock(&all_mddevs_lock); \ |
327 | tmp = tmp->next;}) \ | 324 | _tmp = _tmp->next;}) \ |
328 | ) | 325 | ) |
329 | 326 | ||
330 | 327 | ||
@@ -338,7 +335,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
338 | static int md_make_request(struct request_queue *q, struct bio *bio) | 335 | static int md_make_request(struct request_queue *q, struct bio *bio) |
339 | { | 336 | { |
340 | const int rw = bio_data_dir(bio); | 337 | const int rw = bio_data_dir(bio); |
341 | mddev_t *mddev = q->queuedata; | 338 | struct mddev *mddev = q->queuedata; |
342 | int rv; | 339 | int rv; |
343 | int cpu; | 340 | int cpu; |
344 | unsigned int sectors; | 341 | unsigned int sectors; |
@@ -390,7 +387,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
390 | * Once ->stop is called and completes, the module will be completely | 387 | * Once ->stop is called and completes, the module will be completely |
391 | * unused. | 388 | * unused. |
392 | */ | 389 | */ |
393 | void mddev_suspend(mddev_t *mddev) | 390 | void mddev_suspend(struct mddev *mddev) |
394 | { | 391 | { |
395 | BUG_ON(mddev->suspended); | 392 | BUG_ON(mddev->suspended); |
396 | mddev->suspended = 1; | 393 | mddev->suspended = 1; |
@@ -400,7 +397,7 @@ void mddev_suspend(mddev_t *mddev) | |||
400 | } | 397 | } |
401 | EXPORT_SYMBOL_GPL(mddev_suspend); | 398 | EXPORT_SYMBOL_GPL(mddev_suspend); |
402 | 399 | ||
403 | void mddev_resume(mddev_t *mddev) | 400 | void mddev_resume(struct mddev *mddev) |
404 | { | 401 | { |
405 | mddev->suspended = 0; | 402 | mddev->suspended = 0; |
406 | wake_up(&mddev->sb_wait); | 403 | wake_up(&mddev->sb_wait); |
@@ -411,7 +408,7 @@ void mddev_resume(mddev_t *mddev) | |||
411 | } | 408 | } |
412 | EXPORT_SYMBOL_GPL(mddev_resume); | 409 | EXPORT_SYMBOL_GPL(mddev_resume); |
413 | 410 | ||
414 | int mddev_congested(mddev_t *mddev, int bits) | 411 | int mddev_congested(struct mddev *mddev, int bits) |
415 | { | 412 | { |
416 | return mddev->suspended; | 413 | return mddev->suspended; |
417 | } | 414 | } |
@@ -423,8 +420,8 @@ EXPORT_SYMBOL(mddev_congested); | |||
423 | 420 | ||
424 | static void md_end_flush(struct bio *bio, int err) | 421 | static void md_end_flush(struct bio *bio, int err) |
425 | { | 422 | { |
426 | mdk_rdev_t *rdev = bio->bi_private; | 423 | struct md_rdev *rdev = bio->bi_private; |
427 | mddev_t *mddev = rdev->mddev; | 424 | struct mddev *mddev = rdev->mddev; |
428 | 425 | ||
429 | rdev_dec_pending(rdev, mddev); | 426 | rdev_dec_pending(rdev, mddev); |
430 | 427 | ||
@@ -439,8 +436,8 @@ static void md_submit_flush_data(struct work_struct *ws); | |||
439 | 436 | ||
440 | static void submit_flushes(struct work_struct *ws) | 437 | static void submit_flushes(struct work_struct *ws) |
441 | { | 438 | { |
442 | mddev_t *mddev = container_of(ws, mddev_t, flush_work); | 439 | struct mddev *mddev = container_of(ws, struct mddev, flush_work); |
443 | mdk_rdev_t *rdev; | 440 | struct md_rdev *rdev; |
444 | 441 | ||
445 | INIT_WORK(&mddev->flush_work, md_submit_flush_data); | 442 | INIT_WORK(&mddev->flush_work, md_submit_flush_data); |
446 | atomic_set(&mddev->flush_pending, 1); | 443 | atomic_set(&mddev->flush_pending, 1); |
@@ -472,7 +469,7 @@ static void submit_flushes(struct work_struct *ws) | |||
472 | 469 | ||
473 | static void md_submit_flush_data(struct work_struct *ws) | 470 | static void md_submit_flush_data(struct work_struct *ws) |
474 | { | 471 | { |
475 | mddev_t *mddev = container_of(ws, mddev_t, flush_work); | 472 | struct mddev *mddev = container_of(ws, struct mddev, flush_work); |
476 | struct bio *bio = mddev->flush_bio; | 473 | struct bio *bio = mddev->flush_bio; |
477 | 474 | ||
478 | if (bio->bi_size == 0) | 475 | if (bio->bi_size == 0) |
@@ -488,7 +485,7 @@ static void md_submit_flush_data(struct work_struct *ws) | |||
488 | wake_up(&mddev->sb_wait); | 485 | wake_up(&mddev->sb_wait); |
489 | } | 486 | } |
490 | 487 | ||
491 | void md_flush_request(mddev_t *mddev, struct bio *bio) | 488 | void md_flush_request(struct mddev *mddev, struct bio *bio) |
492 | { | 489 | { |
493 | spin_lock_irq(&mddev->write_lock); | 490 | spin_lock_irq(&mddev->write_lock); |
494 | wait_event_lock_irq(mddev->sb_wait, | 491 | wait_event_lock_irq(mddev->sb_wait, |
@@ -512,7 +509,7 @@ EXPORT_SYMBOL(md_flush_request); | |||
512 | */ | 509 | */ |
513 | struct md_plug_cb { | 510 | struct md_plug_cb { |
514 | struct blk_plug_cb cb; | 511 | struct blk_plug_cb cb; |
515 | mddev_t *mddev; | 512 | struct mddev *mddev; |
516 | }; | 513 | }; |
517 | 514 | ||
518 | static void plugger_unplug(struct blk_plug_cb *cb) | 515 | static void plugger_unplug(struct blk_plug_cb *cb) |
@@ -526,7 +523,7 @@ static void plugger_unplug(struct blk_plug_cb *cb) | |||
526 | /* Check that an unplug wakeup will come shortly. | 523 | /* Check that an unplug wakeup will come shortly. |
527 | * If not, wakeup the md thread immediately | 524 | * If not, wakeup the md thread immediately |
528 | */ | 525 | */ |
529 | int mddev_check_plugged(mddev_t *mddev) | 526 | int mddev_check_plugged(struct mddev *mddev) |
530 | { | 527 | { |
531 | struct blk_plug *plug = current->plug; | 528 | struct blk_plug *plug = current->plug; |
532 | struct md_plug_cb *mdcb; | 529 | struct md_plug_cb *mdcb; |
@@ -558,7 +555,7 @@ int mddev_check_plugged(mddev_t *mddev) | |||
558 | } | 555 | } |
559 | EXPORT_SYMBOL_GPL(mddev_check_plugged); | 556 | EXPORT_SYMBOL_GPL(mddev_check_plugged); |
560 | 557 | ||
561 | static inline mddev_t *mddev_get(mddev_t *mddev) | 558 | static inline struct mddev *mddev_get(struct mddev *mddev) |
562 | { | 559 | { |
563 | atomic_inc(&mddev->active); | 560 | atomic_inc(&mddev->active); |
564 | return mddev; | 561 | return mddev; |
@@ -566,7 +563,7 @@ static inline mddev_t *mddev_get(mddev_t *mddev) | |||
566 | 563 | ||
567 | static void mddev_delayed_delete(struct work_struct *ws); | 564 | static void mddev_delayed_delete(struct work_struct *ws); |
568 | 565 | ||
569 | static void mddev_put(mddev_t *mddev) | 566 | static void mddev_put(struct mddev *mddev) |
570 | { | 567 | { |
571 | struct bio_set *bs = NULL; | 568 | struct bio_set *bs = NULL; |
572 | 569 | ||
@@ -595,7 +592,7 @@ static void mddev_put(mddev_t *mddev) | |||
595 | bioset_free(bs); | 592 | bioset_free(bs); |
596 | } | 593 | } |
597 | 594 | ||
598 | void mddev_init(mddev_t *mddev) | 595 | void mddev_init(struct mddev *mddev) |
599 | { | 596 | { |
600 | mutex_init(&mddev->open_mutex); | 597 | mutex_init(&mddev->open_mutex); |
601 | mutex_init(&mddev->reconfig_mutex); | 598 | mutex_init(&mddev->reconfig_mutex); |
@@ -618,9 +615,9 @@ void mddev_init(mddev_t *mddev) | |||
618 | } | 615 | } |
619 | EXPORT_SYMBOL_GPL(mddev_init); | 616 | EXPORT_SYMBOL_GPL(mddev_init); |
620 | 617 | ||
621 | static mddev_t * mddev_find(dev_t unit) | 618 | static struct mddev * mddev_find(dev_t unit) |
622 | { | 619 | { |
623 | mddev_t *mddev, *new = NULL; | 620 | struct mddev *mddev, *new = NULL; |
624 | 621 | ||
625 | if (unit && MAJOR(unit) != MD_MAJOR) | 622 | if (unit && MAJOR(unit) != MD_MAJOR) |
626 | unit &= ~((1<<MdpMinorShift)-1); | 623 | unit &= ~((1<<MdpMinorShift)-1); |
@@ -692,24 +689,24 @@ static mddev_t * mddev_find(dev_t unit) | |||
692 | goto retry; | 689 | goto retry; |
693 | } | 690 | } |
694 | 691 | ||
695 | static inline int mddev_lock(mddev_t * mddev) | 692 | static inline int mddev_lock(struct mddev * mddev) |
696 | { | 693 | { |
697 | return mutex_lock_interruptible(&mddev->reconfig_mutex); | 694 | return mutex_lock_interruptible(&mddev->reconfig_mutex); |
698 | } | 695 | } |
699 | 696 | ||
700 | static inline int mddev_is_locked(mddev_t *mddev) | 697 | static inline int mddev_is_locked(struct mddev *mddev) |
701 | { | 698 | { |
702 | return mutex_is_locked(&mddev->reconfig_mutex); | 699 | return mutex_is_locked(&mddev->reconfig_mutex); |
703 | } | 700 | } |
704 | 701 | ||
705 | static inline int mddev_trylock(mddev_t * mddev) | 702 | static inline int mddev_trylock(struct mddev * mddev) |
706 | { | 703 | { |
707 | return mutex_trylock(&mddev->reconfig_mutex); | 704 | return mutex_trylock(&mddev->reconfig_mutex); |
708 | } | 705 | } |
709 | 706 | ||
710 | static struct attribute_group md_redundancy_group; | 707 | static struct attribute_group md_redundancy_group; |
711 | 708 | ||
712 | static void mddev_unlock(mddev_t * mddev) | 709 | static void mddev_unlock(struct mddev * mddev) |
713 | { | 710 | { |
714 | if (mddev->to_remove) { | 711 | if (mddev->to_remove) { |
715 | /* These cannot be removed under reconfig_mutex as | 712 | /* These cannot be removed under reconfig_mutex as |
@@ -744,17 +741,17 @@ static void mddev_unlock(mddev_t * mddev) | |||
744 | } else | 741 | } else |
745 | mutex_unlock(&mddev->reconfig_mutex); | 742 | mutex_unlock(&mddev->reconfig_mutex); |
746 | 743 | ||
747 | /* was we've dropped the mutex we need a spinlock to | 744 | /* As we've dropped the mutex we need a spinlock to |
748 | * make sur the thread doesn't disappear | 745 | * make sure the thread doesn't disappear |
749 | */ | 746 | */ |
750 | spin_lock(&pers_lock); | 747 | spin_lock(&pers_lock); |
751 | md_wakeup_thread(mddev->thread); | 748 | md_wakeup_thread(mddev->thread); |
752 | spin_unlock(&pers_lock); | 749 | spin_unlock(&pers_lock); |
753 | } | 750 | } |
754 | 751 | ||
755 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | 752 | static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr) |
756 | { | 753 | { |
757 | mdk_rdev_t *rdev; | 754 | struct md_rdev *rdev; |
758 | 755 | ||
759 | list_for_each_entry(rdev, &mddev->disks, same_set) | 756 | list_for_each_entry(rdev, &mddev->disks, same_set) |
760 | if (rdev->desc_nr == nr) | 757 | if (rdev->desc_nr == nr) |
@@ -763,9 +760,9 @@ static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | |||
763 | return NULL; | 760 | return NULL; |
764 | } | 761 | } |
765 | 762 | ||
766 | static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) | 763 | static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev) |
767 | { | 764 | { |
768 | mdk_rdev_t *rdev; | 765 | struct md_rdev *rdev; |
769 | 766 | ||
770 | list_for_each_entry(rdev, &mddev->disks, same_set) | 767 | list_for_each_entry(rdev, &mddev->disks, same_set) |
771 | if (rdev->bdev->bd_dev == dev) | 768 | if (rdev->bdev->bd_dev == dev) |
@@ -774,9 +771,9 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) | |||
774 | return NULL; | 771 | return NULL; |
775 | } | 772 | } |
776 | 773 | ||
777 | static struct mdk_personality *find_pers(int level, char *clevel) | 774 | static struct md_personality *find_pers(int level, char *clevel) |
778 | { | 775 | { |
779 | struct mdk_personality *pers; | 776 | struct md_personality *pers; |
780 | list_for_each_entry(pers, &pers_list, list) { | 777 | list_for_each_entry(pers, &pers_list, list) { |
781 | if (level != LEVEL_NONE && pers->level == level) | 778 | if (level != LEVEL_NONE && pers->level == level) |
782 | return pers; | 779 | return pers; |
@@ -787,13 +784,13 @@ static struct mdk_personality *find_pers(int level, char *clevel) | |||
787 | } | 784 | } |
788 | 785 | ||
789 | /* return the offset of the super block in 512byte sectors */ | 786 | /* return the offset of the super block in 512byte sectors */ |
790 | static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev) | 787 | static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) |
791 | { | 788 | { |
792 | sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; | 789 | sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; |
793 | return MD_NEW_SIZE_SECTORS(num_sectors); | 790 | return MD_NEW_SIZE_SECTORS(num_sectors); |
794 | } | 791 | } |
795 | 792 | ||
796 | static int alloc_disk_sb(mdk_rdev_t * rdev) | 793 | static int alloc_disk_sb(struct md_rdev * rdev) |
797 | { | 794 | { |
798 | if (rdev->sb_page) | 795 | if (rdev->sb_page) |
799 | MD_BUG(); | 796 | MD_BUG(); |
@@ -807,7 +804,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev) | |||
807 | return 0; | 804 | return 0; |
808 | } | 805 | } |
809 | 806 | ||
810 | static void free_disk_sb(mdk_rdev_t * rdev) | 807 | static void free_disk_sb(struct md_rdev * rdev) |
811 | { | 808 | { |
812 | if (rdev->sb_page) { | 809 | if (rdev->sb_page) { |
813 | put_page(rdev->sb_page); | 810 | put_page(rdev->sb_page); |
@@ -825,8 +822,8 @@ static void free_disk_sb(mdk_rdev_t * rdev) | |||
825 | 822 | ||
826 | static void super_written(struct bio *bio, int error) | 823 | static void super_written(struct bio *bio, int error) |
827 | { | 824 | { |
828 | mdk_rdev_t *rdev = bio->bi_private; | 825 | struct md_rdev *rdev = bio->bi_private; |
829 | mddev_t *mddev = rdev->mddev; | 826 | struct mddev *mddev = rdev->mddev; |
830 | 827 | ||
831 | if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { | 828 | if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { |
832 | printk("md: super_written gets error=%d, uptodate=%d\n", | 829 | printk("md: super_written gets error=%d, uptodate=%d\n", |
@@ -840,7 +837,7 @@ static void super_written(struct bio *bio, int error) | |||
840 | bio_put(bio); | 837 | bio_put(bio); |
841 | } | 838 | } |
842 | 839 | ||
843 | void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | 840 | void md_super_write(struct mddev *mddev, struct md_rdev *rdev, |
844 | sector_t sector, int size, struct page *page) | 841 | sector_t sector, int size, struct page *page) |
845 | { | 842 | { |
846 | /* write first size bytes of page to sector of rdev | 843 | /* write first size bytes of page to sector of rdev |
@@ -861,7 +858,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
861 | submit_bio(WRITE_FLUSH_FUA, bio); | 858 | submit_bio(WRITE_FLUSH_FUA, bio); |
862 | } | 859 | } |
863 | 860 | ||
864 | void md_super_wait(mddev_t *mddev) | 861 | void md_super_wait(struct mddev *mddev) |
865 | { | 862 | { |
866 | /* wait for all superblock writes that were scheduled to complete */ | 863 | /* wait for all superblock writes that were scheduled to complete */ |
867 | DEFINE_WAIT(wq); | 864 | DEFINE_WAIT(wq); |
@@ -879,7 +876,7 @@ static void bi_complete(struct bio *bio, int error) | |||
879 | complete((struct completion*)bio->bi_private); | 876 | complete((struct completion*)bio->bi_private); |
880 | } | 877 | } |
881 | 878 | ||
882 | int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, | 879 | int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, |
883 | struct page *page, int rw, bool metadata_op) | 880 | struct page *page, int rw, bool metadata_op) |
884 | { | 881 | { |
885 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); | 882 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); |
@@ -907,7 +904,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, | |||
907 | } | 904 | } |
908 | EXPORT_SYMBOL_GPL(sync_page_io); | 905 | EXPORT_SYMBOL_GPL(sync_page_io); |
909 | 906 | ||
910 | static int read_disk_sb(mdk_rdev_t * rdev, int size) | 907 | static int read_disk_sb(struct md_rdev * rdev, int size) |
911 | { | 908 | { |
912 | char b[BDEVNAME_SIZE]; | 909 | char b[BDEVNAME_SIZE]; |
913 | if (!rdev->sb_page) { | 910 | if (!rdev->sb_page) { |
@@ -1014,7 +1011,7 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) | |||
1014 | * We rely on user-space to write the initial superblock, and support | 1011 | * We rely on user-space to write the initial superblock, and support |
1015 | * reading and updating of superblocks. | 1012 | * reading and updating of superblocks. |
1016 | * Interface methods are: | 1013 | * Interface methods are: |
1017 | * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) | 1014 | * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) |
1018 | * loads and validates a superblock on dev. | 1015 | * loads and validates a superblock on dev. |
1019 | * if refdev != NULL, compare superblocks on both devices | 1016 | * if refdev != NULL, compare superblocks on both devices |
1020 | * Return: | 1017 | * Return: |
@@ -1024,13 +1021,13 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) | |||
1024 | * -EINVAL superblock incompatible or invalid | 1021 | * -EINVAL superblock incompatible or invalid |
1025 | * -othererror e.g. -EIO | 1022 | * -othererror e.g. -EIO |
1026 | * | 1023 | * |
1027 | * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) | 1024 | * int validate_super(struct mddev *mddev, struct md_rdev *dev) |
1028 | * Verify that dev is acceptable into mddev. | 1025 | * Verify that dev is acceptable into mddev. |
1029 | * The first time, mddev->raid_disks will be 0, and data from | 1026 | * The first time, mddev->raid_disks will be 0, and data from |
1030 | * dev should be merged in. Subsequent calls check that dev | 1027 | * dev should be merged in. Subsequent calls check that dev |
1031 | * is new enough. Return 0 or -EINVAL | 1028 | * is new enough. Return 0 or -EINVAL |
1032 | * | 1029 | * |
1033 | * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) | 1030 | * void sync_super(struct mddev *mddev, struct md_rdev *dev) |
1034 | * Update the superblock for rdev with data in mddev | 1031 | * Update the superblock for rdev with data in mddev |
1035 | * This does not write to disc. | 1032 | * This does not write to disc. |
1036 | * | 1033 | * |
@@ -1039,11 +1036,11 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) | |||
1039 | struct super_type { | 1036 | struct super_type { |
1040 | char *name; | 1037 | char *name; |
1041 | struct module *owner; | 1038 | struct module *owner; |
1042 | int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, | 1039 | int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev, |
1043 | int minor_version); | 1040 | int minor_version); |
1044 | int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); | 1041 | int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev); |
1045 | void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); | 1042 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); |
1046 | unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, | 1043 | unsigned long long (*rdev_size_change)(struct md_rdev *rdev, |
1047 | sector_t num_sectors); | 1044 | sector_t num_sectors); |
1048 | }; | 1045 | }; |
1049 | 1046 | ||
@@ -1055,7 +1052,7 @@ struct super_type { | |||
1055 | * has a bitmap. Otherwise, it returns 0. | 1052 | * has a bitmap. Otherwise, it returns 0. |
1056 | * | 1053 | * |
1057 | */ | 1054 | */ |
1058 | int md_check_no_bitmap(mddev_t *mddev) | 1055 | int md_check_no_bitmap(struct mddev *mddev) |
1059 | { | 1056 | { |
1060 | if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) | 1057 | if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) |
1061 | return 0; | 1058 | return 0; |
@@ -1068,7 +1065,7 @@ EXPORT_SYMBOL(md_check_no_bitmap); | |||
1068 | /* | 1065 | /* |
1069 | * load_super for 0.90.0 | 1066 | * load_super for 0.90.0 |
1070 | */ | 1067 | */ |
1071 | static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | 1068 | static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) |
1072 | { | 1069 | { |
1073 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; | 1070 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; |
1074 | mdp_super_t *sb; | 1071 | mdp_super_t *sb; |
@@ -1163,7 +1160,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version | |||
1163 | /* | 1160 | /* |
1164 | * validate_super for 0.90.0 | 1161 | * validate_super for 0.90.0 |
1165 | */ | 1162 | */ |
1166 | static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) | 1163 | static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) |
1167 | { | 1164 | { |
1168 | mdp_disk_t *desc; | 1165 | mdp_disk_t *desc; |
1169 | mdp_super_t *sb = page_address(rdev->sb_page); | 1166 | mdp_super_t *sb = page_address(rdev->sb_page); |
@@ -1275,10 +1272,10 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1275 | /* | 1272 | /* |
1276 | * sync_super for 0.90.0 | 1273 | * sync_super for 0.90.0 |
1277 | */ | 1274 | */ |
1278 | static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) | 1275 | static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) |
1279 | { | 1276 | { |
1280 | mdp_super_t *sb; | 1277 | mdp_super_t *sb; |
1281 | mdk_rdev_t *rdev2; | 1278 | struct md_rdev *rdev2; |
1282 | int next_spare = mddev->raid_disks; | 1279 | int next_spare = mddev->raid_disks; |
1283 | 1280 | ||
1284 | 1281 | ||
@@ -1419,7 +1416,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1419 | * rdev_size_change for 0.90.0 | 1416 | * rdev_size_change for 0.90.0 |
1420 | */ | 1417 | */ |
1421 | static unsigned long long | 1418 | static unsigned long long |
1422 | super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | 1419 | super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) |
1423 | { | 1420 | { |
1424 | if (num_sectors && num_sectors < rdev->mddev->dev_sectors) | 1421 | if (num_sectors && num_sectors < rdev->mddev->dev_sectors) |
1425 | return 0; /* component must fit device */ | 1422 | return 0; /* component must fit device */ |
@@ -1469,7 +1466,7 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) | |||
1469 | 1466 | ||
1470 | static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, | 1467 | static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, |
1471 | int acknowledged); | 1468 | int acknowledged); |
1472 | static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | 1469 | static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) |
1473 | { | 1470 | { |
1474 | struct mdp_superblock_1 *sb; | 1471 | struct mdp_superblock_1 *sb; |
1475 | int ret; | 1472 | int ret; |
@@ -1625,7 +1622,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1625 | return ret; | 1622 | return ret; |
1626 | } | 1623 | } |
1627 | 1624 | ||
1628 | static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | 1625 | static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) |
1629 | { | 1626 | { |
1630 | struct mdp_superblock_1 *sb = page_address(rdev->sb_page); | 1627 | struct mdp_superblock_1 *sb = page_address(rdev->sb_page); |
1631 | __u64 ev1 = le64_to_cpu(sb->events); | 1628 | __u64 ev1 = le64_to_cpu(sb->events); |
@@ -1726,10 +1723,10 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1726 | return 0; | 1723 | return 0; |
1727 | } | 1724 | } |
1728 | 1725 | ||
1729 | static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | 1726 | static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) |
1730 | { | 1727 | { |
1731 | struct mdp_superblock_1 *sb; | 1728 | struct mdp_superblock_1 *sb; |
1732 | mdk_rdev_t *rdev2; | 1729 | struct md_rdev *rdev2; |
1733 | int max_dev, i; | 1730 | int max_dev, i; |
1734 | /* make rdev->sb match mddev and rdev data. */ | 1731 | /* make rdev->sb match mddev and rdev data. */ |
1735 | 1732 | ||
@@ -1851,7 +1848,7 @@ retry: | |||
1851 | } | 1848 | } |
1852 | 1849 | ||
1853 | static unsigned long long | 1850 | static unsigned long long |
1854 | super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | 1851 | super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) |
1855 | { | 1852 | { |
1856 | struct mdp_superblock_1 *sb; | 1853 | struct mdp_superblock_1 *sb; |
1857 | sector_t max_sectors; | 1854 | sector_t max_sectors; |
@@ -1905,7 +1902,7 @@ static struct super_type super_types[] = { | |||
1905 | }, | 1902 | }, |
1906 | }; | 1903 | }; |
1907 | 1904 | ||
1908 | static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev) | 1905 | static void sync_super(struct mddev *mddev, struct md_rdev *rdev) |
1909 | { | 1906 | { |
1910 | if (mddev->sync_super) { | 1907 | if (mddev->sync_super) { |
1911 | mddev->sync_super(mddev, rdev); | 1908 | mddev->sync_super(mddev, rdev); |
@@ -1917,9 +1914,9 @@ static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1917 | super_types[mddev->major_version].sync_super(mddev, rdev); | 1914 | super_types[mddev->major_version].sync_super(mddev, rdev); |
1918 | } | 1915 | } |
1919 | 1916 | ||
1920 | static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) | 1917 | static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) |
1921 | { | 1918 | { |
1922 | mdk_rdev_t *rdev, *rdev2; | 1919 | struct md_rdev *rdev, *rdev2; |
1923 | 1920 | ||
1924 | rcu_read_lock(); | 1921 | rcu_read_lock(); |
1925 | rdev_for_each_rcu(rdev, mddev1) | 1922 | rdev_for_each_rcu(rdev, mddev1) |
@@ -1942,9 +1939,9 @@ static LIST_HEAD(pending_raid_disks); | |||
1942 | * from the array. It only succeeds if all working and active component devices | 1939 | * from the array. It only succeeds if all working and active component devices |
1943 | * are integrity capable with matching profiles. | 1940 | * are integrity capable with matching profiles. |
1944 | */ | 1941 | */ |
1945 | int md_integrity_register(mddev_t *mddev) | 1942 | int md_integrity_register(struct mddev *mddev) |
1946 | { | 1943 | { |
1947 | mdk_rdev_t *rdev, *reference = NULL; | 1944 | struct md_rdev *rdev, *reference = NULL; |
1948 | 1945 | ||
1949 | if (list_empty(&mddev->disks)) | 1946 | if (list_empty(&mddev->disks)) |
1950 | return 0; /* nothing to do */ | 1947 | return 0; /* nothing to do */ |
@@ -1989,7 +1986,7 @@ int md_integrity_register(mddev_t *mddev) | |||
1989 | EXPORT_SYMBOL(md_integrity_register); | 1986 | EXPORT_SYMBOL(md_integrity_register); |
1990 | 1987 | ||
1991 | /* Disable data integrity if non-capable/non-matching disk is being added */ | 1988 | /* Disable data integrity if non-capable/non-matching disk is being added */ |
1992 | void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | 1989 | void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) |
1993 | { | 1990 | { |
1994 | struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); | 1991 | struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); |
1995 | struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); | 1992 | struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); |
@@ -2006,7 +2003,7 @@ void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | |||
2006 | } | 2003 | } |
2007 | EXPORT_SYMBOL(md_integrity_add_rdev); | 2004 | EXPORT_SYMBOL(md_integrity_add_rdev); |
2008 | 2005 | ||
2009 | static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | 2006 | static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev) |
2010 | { | 2007 | { |
2011 | char b[BDEVNAME_SIZE]; | 2008 | char b[BDEVNAME_SIZE]; |
2012 | struct kobject *ko; | 2009 | struct kobject *ko; |
@@ -2086,12 +2083,12 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | |||
2086 | 2083 | ||
2087 | static void md_delayed_delete(struct work_struct *ws) | 2084 | static void md_delayed_delete(struct work_struct *ws) |
2088 | { | 2085 | { |
2089 | mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); | 2086 | struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); |
2090 | kobject_del(&rdev->kobj); | 2087 | kobject_del(&rdev->kobj); |
2091 | kobject_put(&rdev->kobj); | 2088 | kobject_put(&rdev->kobj); |
2092 | } | 2089 | } |
2093 | 2090 | ||
2094 | static void unbind_rdev_from_array(mdk_rdev_t * rdev) | 2091 | static void unbind_rdev_from_array(struct md_rdev * rdev) |
2095 | { | 2092 | { |
2096 | char b[BDEVNAME_SIZE]; | 2093 | char b[BDEVNAME_SIZE]; |
2097 | if (!rdev->mddev) { | 2094 | if (!rdev->mddev) { |
@@ -2123,14 +2120,14 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev) | |||
2123 | * otherwise reused by a RAID array (or any other kernel | 2120 | * otherwise reused by a RAID array (or any other kernel |
2124 | * subsystem), by bd_claiming the device. | 2121 | * subsystem), by bd_claiming the device. |
2125 | */ | 2122 | */ |
2126 | static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) | 2123 | static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) |
2127 | { | 2124 | { |
2128 | int err = 0; | 2125 | int err = 0; |
2129 | struct block_device *bdev; | 2126 | struct block_device *bdev; |
2130 | char b[BDEVNAME_SIZE]; | 2127 | char b[BDEVNAME_SIZE]; |
2131 | 2128 | ||
2132 | bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, | 2129 | bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, |
2133 | shared ? (mdk_rdev_t *)lock_rdev : rdev); | 2130 | shared ? (struct md_rdev *)lock_rdev : rdev); |
2134 | if (IS_ERR(bdev)) { | 2131 | if (IS_ERR(bdev)) { |
2135 | printk(KERN_ERR "md: could not open %s.\n", | 2132 | printk(KERN_ERR "md: could not open %s.\n", |
2136 | __bdevname(dev, b)); | 2133 | __bdevname(dev, b)); |
@@ -2140,7 +2137,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) | |||
2140 | return err; | 2137 | return err; |
2141 | } | 2138 | } |
2142 | 2139 | ||
2143 | static void unlock_rdev(mdk_rdev_t *rdev) | 2140 | static void unlock_rdev(struct md_rdev *rdev) |
2144 | { | 2141 | { |
2145 | struct block_device *bdev = rdev->bdev; | 2142 | struct block_device *bdev = rdev->bdev; |
2146 | rdev->bdev = NULL; | 2143 | rdev->bdev = NULL; |
@@ -2151,7 +2148,7 @@ static void unlock_rdev(mdk_rdev_t *rdev) | |||
2151 | 2148 | ||
2152 | void md_autodetect_dev(dev_t dev); | 2149 | void md_autodetect_dev(dev_t dev); |
2153 | 2150 | ||
2154 | static void export_rdev(mdk_rdev_t * rdev) | 2151 | static void export_rdev(struct md_rdev * rdev) |
2155 | { | 2152 | { |
2156 | char b[BDEVNAME_SIZE]; | 2153 | char b[BDEVNAME_SIZE]; |
2157 | printk(KERN_INFO "md: export_rdev(%s)\n", | 2154 | printk(KERN_INFO "md: export_rdev(%s)\n", |
@@ -2167,15 +2164,15 @@ static void export_rdev(mdk_rdev_t * rdev) | |||
2167 | kobject_put(&rdev->kobj); | 2164 | kobject_put(&rdev->kobj); |
2168 | } | 2165 | } |
2169 | 2166 | ||
2170 | static void kick_rdev_from_array(mdk_rdev_t * rdev) | 2167 | static void kick_rdev_from_array(struct md_rdev * rdev) |
2171 | { | 2168 | { |
2172 | unbind_rdev_from_array(rdev); | 2169 | unbind_rdev_from_array(rdev); |
2173 | export_rdev(rdev); | 2170 | export_rdev(rdev); |
2174 | } | 2171 | } |
2175 | 2172 | ||
2176 | static void export_array(mddev_t *mddev) | 2173 | static void export_array(struct mddev *mddev) |
2177 | { | 2174 | { |
2178 | mdk_rdev_t *rdev, *tmp; | 2175 | struct md_rdev *rdev, *tmp; |
2179 | 2176 | ||
2180 | rdev_for_each(rdev, tmp, mddev) { | 2177 | rdev_for_each(rdev, tmp, mddev) { |
2181 | if (!rdev->mddev) { | 2178 | if (!rdev->mddev) { |
@@ -2271,7 +2268,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb) | |||
2271 | ); | 2268 | ); |
2272 | } | 2269 | } |
2273 | 2270 | ||
2274 | static void print_rdev(mdk_rdev_t *rdev, int major_version) | 2271 | static void print_rdev(struct md_rdev *rdev, int major_version) |
2275 | { | 2272 | { |
2276 | char b[BDEVNAME_SIZE]; | 2273 | char b[BDEVNAME_SIZE]; |
2277 | printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", | 2274 | printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", |
@@ -2295,8 +2292,8 @@ static void print_rdev(mdk_rdev_t *rdev, int major_version) | |||
2295 | static void md_print_devices(void) | 2292 | static void md_print_devices(void) |
2296 | { | 2293 | { |
2297 | struct list_head *tmp; | 2294 | struct list_head *tmp; |
2298 | mdk_rdev_t *rdev; | 2295 | struct md_rdev *rdev; |
2299 | mddev_t *mddev; | 2296 | struct mddev *mddev; |
2300 | char b[BDEVNAME_SIZE]; | 2297 | char b[BDEVNAME_SIZE]; |
2301 | 2298 | ||
2302 | printk("\n"); | 2299 | printk("\n"); |
@@ -2321,7 +2318,7 @@ static void md_print_devices(void) | |||
2321 | } | 2318 | } |
2322 | 2319 | ||
2323 | 2320 | ||
2324 | static void sync_sbs(mddev_t * mddev, int nospares) | 2321 | static void sync_sbs(struct mddev * mddev, int nospares) |
2325 | { | 2322 | { |
2326 | /* Update each superblock (in-memory image), but | 2323 | /* Update each superblock (in-memory image), but |
2327 | * if we are allowed to, skip spares which already | 2324 | * if we are allowed to, skip spares which already |
@@ -2329,7 +2326,7 @@ static void sync_sbs(mddev_t * mddev, int nospares) | |||
2329 | * (which would mean they aren't being marked as dirty | 2326 | * (which would mean they aren't being marked as dirty |
2330 | * with the rest of the array) | 2327 | * with the rest of the array) |
2331 | */ | 2328 | */ |
2332 | mdk_rdev_t *rdev; | 2329 | struct md_rdev *rdev; |
2333 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2330 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2334 | if (rdev->sb_events == mddev->events || | 2331 | if (rdev->sb_events == mddev->events || |
2335 | (nospares && | 2332 | (nospares && |
@@ -2344,9 +2341,9 @@ static void sync_sbs(mddev_t * mddev, int nospares) | |||
2344 | } | 2341 | } |
2345 | } | 2342 | } |
2346 | 2343 | ||
2347 | static void md_update_sb(mddev_t * mddev, int force_change) | 2344 | static void md_update_sb(struct mddev * mddev, int force_change) |
2348 | { | 2345 | { |
2349 | mdk_rdev_t *rdev; | 2346 | struct md_rdev *rdev; |
2350 | int sync_req; | 2347 | int sync_req; |
2351 | int nospares = 0; | 2348 | int nospares = 0; |
2352 | int any_badblocks_changed = 0; | 2349 | int any_badblocks_changed = 0; |
@@ -2442,27 +2439,24 @@ repeat: | |||
2442 | sync_sbs(mddev, nospares); | 2439 | sync_sbs(mddev, nospares); |
2443 | spin_unlock_irq(&mddev->write_lock); | 2440 | spin_unlock_irq(&mddev->write_lock); |
2444 | 2441 | ||
2445 | dprintk(KERN_INFO | 2442 | pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", |
2446 | "md: updating %s RAID superblock on device (in sync %d)\n", | 2443 | mdname(mddev), mddev->in_sync); |
2447 | mdname(mddev),mddev->in_sync); | ||
2448 | 2444 | ||
2449 | bitmap_update_sb(mddev->bitmap); | 2445 | bitmap_update_sb(mddev->bitmap); |
2450 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2446 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2451 | char b[BDEVNAME_SIZE]; | 2447 | char b[BDEVNAME_SIZE]; |
2452 | dprintk(KERN_INFO "md: "); | 2448 | |
2453 | if (rdev->sb_loaded != 1) | 2449 | if (rdev->sb_loaded != 1) |
2454 | continue; /* no noise on spare devices */ | 2450 | continue; /* no noise on spare devices */ |
2455 | if (test_bit(Faulty, &rdev->flags)) | ||
2456 | dprintk("(skipping faulty "); | ||
2457 | 2451 | ||
2458 | dprintk("%s ", bdevname(rdev->bdev,b)); | 2452 | if (!test_bit(Faulty, &rdev->flags) && |
2459 | if (!test_bit(Faulty, &rdev->flags)) { | 2453 | rdev->saved_raid_disk == -1) { |
2460 | md_super_write(mddev,rdev, | 2454 | md_super_write(mddev,rdev, |
2461 | rdev->sb_start, rdev->sb_size, | 2455 | rdev->sb_start, rdev->sb_size, |
2462 | rdev->sb_page); | 2456 | rdev->sb_page); |
2463 | dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", | 2457 | pr_debug("md: (write) %s's sb offset: %llu\n", |
2464 | bdevname(rdev->bdev,b), | 2458 | bdevname(rdev->bdev, b), |
2465 | (unsigned long long)rdev->sb_start); | 2459 | (unsigned long long)rdev->sb_start); |
2466 | rdev->sb_events = mddev->events; | 2460 | rdev->sb_events = mddev->events; |
2467 | if (rdev->badblocks.size) { | 2461 | if (rdev->badblocks.size) { |
2468 | md_super_write(mddev, rdev, | 2462 | md_super_write(mddev, rdev, |
@@ -2472,8 +2466,12 @@ repeat: | |||
2472 | rdev->badblocks.size = 0; | 2466 | rdev->badblocks.size = 0; |
2473 | } | 2467 | } |
2474 | 2468 | ||
2475 | } else | 2469 | } else if (test_bit(Faulty, &rdev->flags)) |
2476 | dprintk(")\n"); | 2470 | pr_debug("md: %s (skipping faulty)\n", |
2471 | bdevname(rdev->bdev, b)); | ||
2472 | else | ||
2473 | pr_debug("(skipping incremental s/r "); | ||
2474 | |||
2477 | if (mddev->level == LEVEL_MULTIPATH) | 2475 | if (mddev->level == LEVEL_MULTIPATH) |
2478 | /* only need to write one superblock... */ | 2476 | /* only need to write one superblock... */ |
2479 | break; | 2477 | break; |
@@ -2527,12 +2525,12 @@ static int cmd_match(const char *cmd, const char *str) | |||
2527 | 2525 | ||
2528 | struct rdev_sysfs_entry { | 2526 | struct rdev_sysfs_entry { |
2529 | struct attribute attr; | 2527 | struct attribute attr; |
2530 | ssize_t (*show)(mdk_rdev_t *, char *); | 2528 | ssize_t (*show)(struct md_rdev *, char *); |
2531 | ssize_t (*store)(mdk_rdev_t *, const char *, size_t); | 2529 | ssize_t (*store)(struct md_rdev *, const char *, size_t); |
2532 | }; | 2530 | }; |
2533 | 2531 | ||
2534 | static ssize_t | 2532 | static ssize_t |
2535 | state_show(mdk_rdev_t *rdev, char *page) | 2533 | state_show(struct md_rdev *rdev, char *page) |
2536 | { | 2534 | { |
2537 | char *sep = ""; | 2535 | char *sep = ""; |
2538 | size_t len = 0; | 2536 | size_t len = 0; |
@@ -2568,7 +2566,7 @@ state_show(mdk_rdev_t *rdev, char *page) | |||
2568 | } | 2566 | } |
2569 | 2567 | ||
2570 | static ssize_t | 2568 | static ssize_t |
2571 | state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2569 | state_store(struct md_rdev *rdev, const char *buf, size_t len) |
2572 | { | 2570 | { |
2573 | /* can write | 2571 | /* can write |
2574 | * faulty - simulates an error | 2572 | * faulty - simulates an error |
@@ -2592,7 +2590,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2592 | if (rdev->raid_disk >= 0) | 2590 | if (rdev->raid_disk >= 0) |
2593 | err = -EBUSY; | 2591 | err = -EBUSY; |
2594 | else { | 2592 | else { |
2595 | mddev_t *mddev = rdev->mddev; | 2593 | struct mddev *mddev = rdev->mddev; |
2596 | kick_rdev_from_array(rdev); | 2594 | kick_rdev_from_array(rdev); |
2597 | if (mddev->pers) | 2595 | if (mddev->pers) |
2598 | md_update_sb(mddev, 1); | 2596 | md_update_sb(mddev, 1); |
@@ -2641,13 +2639,13 @@ static struct rdev_sysfs_entry rdev_state = | |||
2641 | __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); | 2639 | __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); |
2642 | 2640 | ||
2643 | static ssize_t | 2641 | static ssize_t |
2644 | errors_show(mdk_rdev_t *rdev, char *page) | 2642 | errors_show(struct md_rdev *rdev, char *page) |
2645 | { | 2643 | { |
2646 | return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); | 2644 | return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); |
2647 | } | 2645 | } |
2648 | 2646 | ||
2649 | static ssize_t | 2647 | static ssize_t |
2650 | errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2648 | errors_store(struct md_rdev *rdev, const char *buf, size_t len) |
2651 | { | 2649 | { |
2652 | char *e; | 2650 | char *e; |
2653 | unsigned long n = simple_strtoul(buf, &e, 10); | 2651 | unsigned long n = simple_strtoul(buf, &e, 10); |
@@ -2661,7 +2659,7 @@ static struct rdev_sysfs_entry rdev_errors = | |||
2661 | __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); | 2659 | __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); |
2662 | 2660 | ||
2663 | static ssize_t | 2661 | static ssize_t |
2664 | slot_show(mdk_rdev_t *rdev, char *page) | 2662 | slot_show(struct md_rdev *rdev, char *page) |
2665 | { | 2663 | { |
2666 | if (rdev->raid_disk < 0) | 2664 | if (rdev->raid_disk < 0) |
2667 | return sprintf(page, "none\n"); | 2665 | return sprintf(page, "none\n"); |
@@ -2670,7 +2668,7 @@ slot_show(mdk_rdev_t *rdev, char *page) | |||
2670 | } | 2668 | } |
2671 | 2669 | ||
2672 | static ssize_t | 2670 | static ssize_t |
2673 | slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2671 | slot_store(struct md_rdev *rdev, const char *buf, size_t len) |
2674 | { | 2672 | { |
2675 | char *e; | 2673 | char *e; |
2676 | int err; | 2674 | int err; |
@@ -2701,7 +2699,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2701 | set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); | 2699 | set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); |
2702 | md_wakeup_thread(rdev->mddev->thread); | 2700 | md_wakeup_thread(rdev->mddev->thread); |
2703 | } else if (rdev->mddev->pers) { | 2701 | } else if (rdev->mddev->pers) { |
2704 | mdk_rdev_t *rdev2; | 2702 | struct md_rdev *rdev2; |
2705 | /* Activating a spare .. or possibly reactivating | 2703 | /* Activating a spare .. or possibly reactivating |
2706 | * if we ever get bitmaps working here. | 2704 | * if we ever get bitmaps working here. |
2707 | */ | 2705 | */ |
@@ -2728,6 +2726,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2728 | rdev->saved_raid_disk = slot; | 2726 | rdev->saved_raid_disk = slot; |
2729 | else | 2727 | else |
2730 | rdev->saved_raid_disk = -1; | 2728 | rdev->saved_raid_disk = -1; |
2729 | clear_bit(In_sync, &rdev->flags); | ||
2731 | err = rdev->mddev->pers-> | 2730 | err = rdev->mddev->pers-> |
2732 | hot_add_disk(rdev->mddev, rdev); | 2731 | hot_add_disk(rdev->mddev, rdev); |
2733 | if (err) { | 2732 | if (err) { |
@@ -2757,13 +2756,13 @@ static struct rdev_sysfs_entry rdev_slot = | |||
2757 | __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); | 2756 | __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); |
2758 | 2757 | ||
2759 | static ssize_t | 2758 | static ssize_t |
2760 | offset_show(mdk_rdev_t *rdev, char *page) | 2759 | offset_show(struct md_rdev *rdev, char *page) |
2761 | { | 2760 | { |
2762 | return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); | 2761 | return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); |
2763 | } | 2762 | } |
2764 | 2763 | ||
2765 | static ssize_t | 2764 | static ssize_t |
2766 | offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2765 | offset_store(struct md_rdev *rdev, const char *buf, size_t len) |
2767 | { | 2766 | { |
2768 | char *e; | 2767 | char *e; |
2769 | unsigned long long offset = simple_strtoull(buf, &e, 10); | 2768 | unsigned long long offset = simple_strtoull(buf, &e, 10); |
@@ -2783,7 +2782,7 @@ static struct rdev_sysfs_entry rdev_offset = | |||
2783 | __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); | 2782 | __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); |
2784 | 2783 | ||
2785 | static ssize_t | 2784 | static ssize_t |
2786 | rdev_size_show(mdk_rdev_t *rdev, char *page) | 2785 | rdev_size_show(struct md_rdev *rdev, char *page) |
2787 | { | 2786 | { |
2788 | return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); | 2787 | return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); |
2789 | } | 2788 | } |
@@ -2818,9 +2817,9 @@ static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) | |||
2818 | } | 2817 | } |
2819 | 2818 | ||
2820 | static ssize_t | 2819 | static ssize_t |
2821 | rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2820 | rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) |
2822 | { | 2821 | { |
2823 | mddev_t *my_mddev = rdev->mddev; | 2822 | struct mddev *my_mddev = rdev->mddev; |
2824 | sector_t oldsectors = rdev->sectors; | 2823 | sector_t oldsectors = rdev->sectors; |
2825 | sector_t sectors; | 2824 | sector_t sectors; |
2826 | 2825 | ||
@@ -2846,13 +2845,13 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2846 | * a deadlock. We have already changed rdev->sectors, and if | 2845 | * a deadlock. We have already changed rdev->sectors, and if |
2847 | * we have to change it back, we will have the lock again. | 2846 | * we have to change it back, we will have the lock again. |
2848 | */ | 2847 | */ |
2849 | mddev_t *mddev; | 2848 | struct mddev *mddev; |
2850 | int overlap = 0; | 2849 | int overlap = 0; |
2851 | struct list_head *tmp; | 2850 | struct list_head *tmp; |
2852 | 2851 | ||
2853 | mddev_unlock(my_mddev); | 2852 | mddev_unlock(my_mddev); |
2854 | for_each_mddev(mddev, tmp) { | 2853 | for_each_mddev(mddev, tmp) { |
2855 | mdk_rdev_t *rdev2; | 2854 | struct md_rdev *rdev2; |
2856 | 2855 | ||
2857 | mddev_lock(mddev); | 2856 | mddev_lock(mddev); |
2858 | list_for_each_entry(rdev2, &mddev->disks, same_set) | 2857 | list_for_each_entry(rdev2, &mddev->disks, same_set) |
@@ -2889,7 +2888,7 @@ static struct rdev_sysfs_entry rdev_size = | |||
2889 | __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); | 2888 | __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); |
2890 | 2889 | ||
2891 | 2890 | ||
2892 | static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) | 2891 | static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) |
2893 | { | 2892 | { |
2894 | unsigned long long recovery_start = rdev->recovery_offset; | 2893 | unsigned long long recovery_start = rdev->recovery_offset; |
2895 | 2894 | ||
@@ -2900,7 +2899,7 @@ static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) | |||
2900 | return sprintf(page, "%llu\n", recovery_start); | 2899 | return sprintf(page, "%llu\n", recovery_start); |
2901 | } | 2900 | } |
2902 | 2901 | ||
2903 | static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2902 | static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) |
2904 | { | 2903 | { |
2905 | unsigned long long recovery_start; | 2904 | unsigned long long recovery_start; |
2906 | 2905 | ||
@@ -2930,11 +2929,11 @@ badblocks_show(struct badblocks *bb, char *page, int unack); | |||
2930 | static ssize_t | 2929 | static ssize_t |
2931 | badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); | 2930 | badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); |
2932 | 2931 | ||
2933 | static ssize_t bb_show(mdk_rdev_t *rdev, char *page) | 2932 | static ssize_t bb_show(struct md_rdev *rdev, char *page) |
2934 | { | 2933 | { |
2935 | return badblocks_show(&rdev->badblocks, page, 0); | 2934 | return badblocks_show(&rdev->badblocks, page, 0); |
2936 | } | 2935 | } |
2937 | static ssize_t bb_store(mdk_rdev_t *rdev, const char *page, size_t len) | 2936 | static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) |
2938 | { | 2937 | { |
2939 | int rv = badblocks_store(&rdev->badblocks, page, len, 0); | 2938 | int rv = badblocks_store(&rdev->badblocks, page, len, 0); |
2940 | /* Maybe that ack was all we needed */ | 2939 | /* Maybe that ack was all we needed */ |
@@ -2946,11 +2945,11 @@ static struct rdev_sysfs_entry rdev_bad_blocks = | |||
2946 | __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); | 2945 | __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); |
2947 | 2946 | ||
2948 | 2947 | ||
2949 | static ssize_t ubb_show(mdk_rdev_t *rdev, char *page) | 2948 | static ssize_t ubb_show(struct md_rdev *rdev, char *page) |
2950 | { | 2949 | { |
2951 | return badblocks_show(&rdev->badblocks, page, 1); | 2950 | return badblocks_show(&rdev->badblocks, page, 1); |
2952 | } | 2951 | } |
2953 | static ssize_t ubb_store(mdk_rdev_t *rdev, const char *page, size_t len) | 2952 | static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) |
2954 | { | 2953 | { |
2955 | return badblocks_store(&rdev->badblocks, page, len, 1); | 2954 | return badblocks_store(&rdev->badblocks, page, len, 1); |
2956 | } | 2955 | } |
@@ -2972,8 +2971,8 @@ static ssize_t | |||
2972 | rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 2971 | rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
2973 | { | 2972 | { |
2974 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2973 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2975 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2974 | struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); |
2976 | mddev_t *mddev = rdev->mddev; | 2975 | struct mddev *mddev = rdev->mddev; |
2977 | ssize_t rv; | 2976 | ssize_t rv; |
2978 | 2977 | ||
2979 | if (!entry->show) | 2978 | if (!entry->show) |
@@ -2995,9 +2994,9 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
2995 | const char *page, size_t length) | 2994 | const char *page, size_t length) |
2996 | { | 2995 | { |
2997 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2996 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2998 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2997 | struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); |
2999 | ssize_t rv; | 2998 | ssize_t rv; |
3000 | mddev_t *mddev = rdev->mddev; | 2999 | struct mddev *mddev = rdev->mddev; |
3001 | 3000 | ||
3002 | if (!entry->store) | 3001 | if (!entry->store) |
3003 | return -EIO; | 3002 | return -EIO; |
@@ -3016,7 +3015,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
3016 | 3015 | ||
3017 | static void rdev_free(struct kobject *ko) | 3016 | static void rdev_free(struct kobject *ko) |
3018 | { | 3017 | { |
3019 | mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); | 3018 | struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); |
3020 | kfree(rdev); | 3019 | kfree(rdev); |
3021 | } | 3020 | } |
3022 | static const struct sysfs_ops rdev_sysfs_ops = { | 3021 | static const struct sysfs_ops rdev_sysfs_ops = { |
@@ -3029,7 +3028,7 @@ static struct kobj_type rdev_ktype = { | |||
3029 | .default_attrs = rdev_default_attrs, | 3028 | .default_attrs = rdev_default_attrs, |
3030 | }; | 3029 | }; |
3031 | 3030 | ||
3032 | int md_rdev_init(mdk_rdev_t *rdev) | 3031 | int md_rdev_init(struct md_rdev *rdev) |
3033 | { | 3032 | { |
3034 | rdev->desc_nr = -1; | 3033 | rdev->desc_nr = -1; |
3035 | rdev->saved_raid_disk = -1; | 3034 | rdev->saved_raid_disk = -1; |
@@ -3072,11 +3071,11 @@ EXPORT_SYMBOL_GPL(md_rdev_init); | |||
3072 | * | 3071 | * |
3073 | * a faulty rdev _never_ has rdev->sb set. | 3072 | * a faulty rdev _never_ has rdev->sb set. |
3074 | */ | 3073 | */ |
3075 | static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) | 3074 | static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) |
3076 | { | 3075 | { |
3077 | char b[BDEVNAME_SIZE]; | 3076 | char b[BDEVNAME_SIZE]; |
3078 | int err; | 3077 | int err; |
3079 | mdk_rdev_t *rdev; | 3078 | struct md_rdev *rdev; |
3080 | sector_t size; | 3079 | sector_t size; |
3081 | 3080 | ||
3082 | rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); | 3081 | rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); |
@@ -3145,10 +3144,10 @@ abort_free: | |||
3145 | */ | 3144 | */ |
3146 | 3145 | ||
3147 | 3146 | ||
3148 | static void analyze_sbs(mddev_t * mddev) | 3147 | static void analyze_sbs(struct mddev * mddev) |
3149 | { | 3148 | { |
3150 | int i; | 3149 | int i; |
3151 | mdk_rdev_t *rdev, *freshest, *tmp; | 3150 | struct md_rdev *rdev, *freshest, *tmp; |
3152 | char b[BDEVNAME_SIZE]; | 3151 | char b[BDEVNAME_SIZE]; |
3153 | 3152 | ||
3154 | freshest = NULL; | 3153 | freshest = NULL; |
@@ -3248,13 +3247,13 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) | |||
3248 | static void md_safemode_timeout(unsigned long data); | 3247 | static void md_safemode_timeout(unsigned long data); |
3249 | 3248 | ||
3250 | static ssize_t | 3249 | static ssize_t |
3251 | safe_delay_show(mddev_t *mddev, char *page) | 3250 | safe_delay_show(struct mddev *mddev, char *page) |
3252 | { | 3251 | { |
3253 | int msec = (mddev->safemode_delay*1000)/HZ; | 3252 | int msec = (mddev->safemode_delay*1000)/HZ; |
3254 | return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); | 3253 | return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); |
3255 | } | 3254 | } |
3256 | static ssize_t | 3255 | static ssize_t |
3257 | safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) | 3256 | safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) |
3258 | { | 3257 | { |
3259 | unsigned long msec; | 3258 | unsigned long msec; |
3260 | 3259 | ||
@@ -3276,9 +3275,9 @@ static struct md_sysfs_entry md_safe_delay = | |||
3276 | __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); | 3275 | __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); |
3277 | 3276 | ||
3278 | static ssize_t | 3277 | static ssize_t |
3279 | level_show(mddev_t *mddev, char *page) | 3278 | level_show(struct mddev *mddev, char *page) |
3280 | { | 3279 | { |
3281 | struct mdk_personality *p = mddev->pers; | 3280 | struct md_personality *p = mddev->pers; |
3282 | if (p) | 3281 | if (p) |
3283 | return sprintf(page, "%s\n", p->name); | 3282 | return sprintf(page, "%s\n", p->name); |
3284 | else if (mddev->clevel[0]) | 3283 | else if (mddev->clevel[0]) |
@@ -3290,14 +3289,14 @@ level_show(mddev_t *mddev, char *page) | |||
3290 | } | 3289 | } |
3291 | 3290 | ||
3292 | static ssize_t | 3291 | static ssize_t |
3293 | level_store(mddev_t *mddev, const char *buf, size_t len) | 3292 | level_store(struct mddev *mddev, const char *buf, size_t len) |
3294 | { | 3293 | { |
3295 | char clevel[16]; | 3294 | char clevel[16]; |
3296 | ssize_t rv = len; | 3295 | ssize_t rv = len; |
3297 | struct mdk_personality *pers; | 3296 | struct md_personality *pers; |
3298 | long level; | 3297 | long level; |
3299 | void *priv; | 3298 | void *priv; |
3300 | mdk_rdev_t *rdev; | 3299 | struct md_rdev *rdev; |
3301 | 3300 | ||
3302 | if (mddev->pers == NULL) { | 3301 | if (mddev->pers == NULL) { |
3303 | if (len == 0) | 3302 | if (len == 0) |
@@ -3471,7 +3470,7 @@ __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); | |||
3471 | 3470 | ||
3472 | 3471 | ||
3473 | static ssize_t | 3472 | static ssize_t |
3474 | layout_show(mddev_t *mddev, char *page) | 3473 | layout_show(struct mddev *mddev, char *page) |
3475 | { | 3474 | { |
3476 | /* just a number, not meaningful for all levels */ | 3475 | /* just a number, not meaningful for all levels */ |
3477 | if (mddev->reshape_position != MaxSector && | 3476 | if (mddev->reshape_position != MaxSector && |
@@ -3482,7 +3481,7 @@ layout_show(mddev_t *mddev, char *page) | |||
3482 | } | 3481 | } |
3483 | 3482 | ||
3484 | static ssize_t | 3483 | static ssize_t |
3485 | layout_store(mddev_t *mddev, const char *buf, size_t len) | 3484 | layout_store(struct mddev *mddev, const char *buf, size_t len) |
3486 | { | 3485 | { |
3487 | char *e; | 3486 | char *e; |
3488 | unsigned long n = simple_strtoul(buf, &e, 10); | 3487 | unsigned long n = simple_strtoul(buf, &e, 10); |
@@ -3512,7 +3511,7 @@ __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); | |||
3512 | 3511 | ||
3513 | 3512 | ||
3514 | static ssize_t | 3513 | static ssize_t |
3515 | raid_disks_show(mddev_t *mddev, char *page) | 3514 | raid_disks_show(struct mddev *mddev, char *page) |
3516 | { | 3515 | { |
3517 | if (mddev->raid_disks == 0) | 3516 | if (mddev->raid_disks == 0) |
3518 | return 0; | 3517 | return 0; |
@@ -3523,10 +3522,10 @@ raid_disks_show(mddev_t *mddev, char *page) | |||
3523 | return sprintf(page, "%d\n", mddev->raid_disks); | 3522 | return sprintf(page, "%d\n", mddev->raid_disks); |
3524 | } | 3523 | } |
3525 | 3524 | ||
3526 | static int update_raid_disks(mddev_t *mddev, int raid_disks); | 3525 | static int update_raid_disks(struct mddev *mddev, int raid_disks); |
3527 | 3526 | ||
3528 | static ssize_t | 3527 | static ssize_t |
3529 | raid_disks_store(mddev_t *mddev, const char *buf, size_t len) | 3528 | raid_disks_store(struct mddev *mddev, const char *buf, size_t len) |
3530 | { | 3529 | { |
3531 | char *e; | 3530 | char *e; |
3532 | int rv = 0; | 3531 | int rv = 0; |
@@ -3549,7 +3548,7 @@ static struct md_sysfs_entry md_raid_disks = | |||
3549 | __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); | 3548 | __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); |
3550 | 3549 | ||
3551 | static ssize_t | 3550 | static ssize_t |
3552 | chunk_size_show(mddev_t *mddev, char *page) | 3551 | chunk_size_show(struct mddev *mddev, char *page) |
3553 | { | 3552 | { |
3554 | if (mddev->reshape_position != MaxSector && | 3553 | if (mddev->reshape_position != MaxSector && |
3555 | mddev->chunk_sectors != mddev->new_chunk_sectors) | 3554 | mddev->chunk_sectors != mddev->new_chunk_sectors) |
@@ -3560,7 +3559,7 @@ chunk_size_show(mddev_t *mddev, char *page) | |||
3560 | } | 3559 | } |
3561 | 3560 | ||
3562 | static ssize_t | 3561 | static ssize_t |
3563 | chunk_size_store(mddev_t *mddev, const char *buf, size_t len) | 3562 | chunk_size_store(struct mddev *mddev, const char *buf, size_t len) |
3564 | { | 3563 | { |
3565 | char *e; | 3564 | char *e; |
3566 | unsigned long n = simple_strtoul(buf, &e, 10); | 3565 | unsigned long n = simple_strtoul(buf, &e, 10); |
@@ -3589,7 +3588,7 @@ static struct md_sysfs_entry md_chunk_size = | |||
3589 | __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); | 3588 | __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); |
3590 | 3589 | ||
3591 | static ssize_t | 3590 | static ssize_t |
3592 | resync_start_show(mddev_t *mddev, char *page) | 3591 | resync_start_show(struct mddev *mddev, char *page) |
3593 | { | 3592 | { |
3594 | if (mddev->recovery_cp == MaxSector) | 3593 | if (mddev->recovery_cp == MaxSector) |
3595 | return sprintf(page, "none\n"); | 3594 | return sprintf(page, "none\n"); |
@@ -3597,7 +3596,7 @@ resync_start_show(mddev_t *mddev, char *page) | |||
3597 | } | 3596 | } |
3598 | 3597 | ||
3599 | static ssize_t | 3598 | static ssize_t |
3600 | resync_start_store(mddev_t *mddev, const char *buf, size_t len) | 3599 | resync_start_store(struct mddev *mddev, const char *buf, size_t len) |
3601 | { | 3600 | { |
3602 | char *e; | 3601 | char *e; |
3603 | unsigned long long n = simple_strtoull(buf, &e, 10); | 3602 | unsigned long long n = simple_strtoull(buf, &e, 10); |
@@ -3667,7 +3666,7 @@ static int match_word(const char *word, char **list) | |||
3667 | } | 3666 | } |
3668 | 3667 | ||
3669 | static ssize_t | 3668 | static ssize_t |
3670 | array_state_show(mddev_t *mddev, char *page) | 3669 | array_state_show(struct mddev *mddev, char *page) |
3671 | { | 3670 | { |
3672 | enum array_state st = inactive; | 3671 | enum array_state st = inactive; |
3673 | 3672 | ||
@@ -3700,13 +3699,13 @@ array_state_show(mddev_t *mddev, char *page) | |||
3700 | return sprintf(page, "%s\n", array_states[st]); | 3699 | return sprintf(page, "%s\n", array_states[st]); |
3701 | } | 3700 | } |
3702 | 3701 | ||
3703 | static int do_md_stop(mddev_t * mddev, int ro, int is_open); | 3702 | static int do_md_stop(struct mddev * mddev, int ro, int is_open); |
3704 | static int md_set_readonly(mddev_t * mddev, int is_open); | 3703 | static int md_set_readonly(struct mddev * mddev, int is_open); |
3705 | static int do_md_run(mddev_t * mddev); | 3704 | static int do_md_run(struct mddev * mddev); |
3706 | static int restart_array(mddev_t *mddev); | 3705 | static int restart_array(struct mddev *mddev); |
3707 | 3706 | ||
3708 | static ssize_t | 3707 | static ssize_t |
3709 | array_state_store(mddev_t *mddev, const char *buf, size_t len) | 3708 | array_state_store(struct mddev *mddev, const char *buf, size_t len) |
3710 | { | 3709 | { |
3711 | int err = -EINVAL; | 3710 | int err = -EINVAL; |
3712 | enum array_state st = match_word(buf, array_states); | 3711 | enum array_state st = match_word(buf, array_states); |
@@ -3800,13 +3799,13 @@ static struct md_sysfs_entry md_array_state = | |||
3800 | __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); | 3799 | __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); |
3801 | 3800 | ||
3802 | static ssize_t | 3801 | static ssize_t |
3803 | max_corrected_read_errors_show(mddev_t *mddev, char *page) { | 3802 | max_corrected_read_errors_show(struct mddev *mddev, char *page) { |
3804 | return sprintf(page, "%d\n", | 3803 | return sprintf(page, "%d\n", |
3805 | atomic_read(&mddev->max_corr_read_errors)); | 3804 | atomic_read(&mddev->max_corr_read_errors)); |
3806 | } | 3805 | } |
3807 | 3806 | ||
3808 | static ssize_t | 3807 | static ssize_t |
3809 | max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len) | 3808 | max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) |
3810 | { | 3809 | { |
3811 | char *e; | 3810 | char *e; |
3812 | unsigned long n = simple_strtoul(buf, &e, 10); | 3811 | unsigned long n = simple_strtoul(buf, &e, 10); |
@@ -3823,13 +3822,13 @@ __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, | |||
3823 | max_corrected_read_errors_store); | 3822 | max_corrected_read_errors_store); |
3824 | 3823 | ||
3825 | static ssize_t | 3824 | static ssize_t |
3826 | null_show(mddev_t *mddev, char *page) | 3825 | null_show(struct mddev *mddev, char *page) |
3827 | { | 3826 | { |
3828 | return -EINVAL; | 3827 | return -EINVAL; |
3829 | } | 3828 | } |
3830 | 3829 | ||
3831 | static ssize_t | 3830 | static ssize_t |
3832 | new_dev_store(mddev_t *mddev, const char *buf, size_t len) | 3831 | new_dev_store(struct mddev *mddev, const char *buf, size_t len) |
3833 | { | 3832 | { |
3834 | /* buf must be %d:%d\n? giving major and minor numbers */ | 3833 | /* buf must be %d:%d\n? giving major and minor numbers */ |
3835 | /* The new device is added to the array. | 3834 | /* The new device is added to the array. |
@@ -3842,7 +3841,7 @@ new_dev_store(mddev_t *mddev, const char *buf, size_t len) | |||
3842 | int major = simple_strtoul(buf, &e, 10); | 3841 | int major = simple_strtoul(buf, &e, 10); |
3843 | int minor; | 3842 | int minor; |
3844 | dev_t dev; | 3843 | dev_t dev; |
3845 | mdk_rdev_t *rdev; | 3844 | struct md_rdev *rdev; |
3846 | int err; | 3845 | int err; |
3847 | 3846 | ||
3848 | if (!*buf || *e != ':' || !e[1] || e[1] == '\n') | 3847 | if (!*buf || *e != ':' || !e[1] || e[1] == '\n') |
@@ -3860,8 +3859,9 @@ new_dev_store(mddev_t *mddev, const char *buf, size_t len) | |||
3860 | rdev = md_import_device(dev, mddev->major_version, | 3859 | rdev = md_import_device(dev, mddev->major_version, |
3861 | mddev->minor_version); | 3860 | mddev->minor_version); |
3862 | if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { | 3861 | if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { |
3863 | mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, | 3862 | struct md_rdev *rdev0 |
3864 | mdk_rdev_t, same_set); | 3863 | = list_entry(mddev->disks.next, |
3864 | struct md_rdev, same_set); | ||
3865 | err = super_types[mddev->major_version] | 3865 | err = super_types[mddev->major_version] |
3866 | .load_super(rdev, rdev0, mddev->minor_version); | 3866 | .load_super(rdev, rdev0, mddev->minor_version); |
3867 | if (err < 0) | 3867 | if (err < 0) |
@@ -3885,7 +3885,7 @@ static struct md_sysfs_entry md_new_device = | |||
3885 | __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); | 3885 | __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); |
3886 | 3886 | ||
3887 | static ssize_t | 3887 | static ssize_t |
3888 | bitmap_store(mddev_t *mddev, const char *buf, size_t len) | 3888 | bitmap_store(struct mddev *mddev, const char *buf, size_t len) |
3889 | { | 3889 | { |
3890 | char *end; | 3890 | char *end; |
3891 | unsigned long chunk, end_chunk; | 3891 | unsigned long chunk, end_chunk; |
@@ -3914,16 +3914,16 @@ static struct md_sysfs_entry md_bitmap = | |||
3914 | __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); | 3914 | __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); |
3915 | 3915 | ||
3916 | static ssize_t | 3916 | static ssize_t |
3917 | size_show(mddev_t *mddev, char *page) | 3917 | size_show(struct mddev *mddev, char *page) |
3918 | { | 3918 | { |
3919 | return sprintf(page, "%llu\n", | 3919 | return sprintf(page, "%llu\n", |
3920 | (unsigned long long)mddev->dev_sectors / 2); | 3920 | (unsigned long long)mddev->dev_sectors / 2); |
3921 | } | 3921 | } |
3922 | 3922 | ||
3923 | static int update_size(mddev_t *mddev, sector_t num_sectors); | 3923 | static int update_size(struct mddev *mddev, sector_t num_sectors); |
3924 | 3924 | ||
3925 | static ssize_t | 3925 | static ssize_t |
3926 | size_store(mddev_t *mddev, const char *buf, size_t len) | 3926 | size_store(struct mddev *mddev, const char *buf, size_t len) |
3927 | { | 3927 | { |
3928 | /* If array is inactive, we can reduce the component size, but | 3928 | /* If array is inactive, we can reduce the component size, but |
3929 | * not increase it (except from 0). | 3929 | * not increase it (except from 0). |
@@ -3958,7 +3958,7 @@ __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); | |||
3958 | * or N.M for internally known formats | 3958 | * or N.M for internally known formats |
3959 | */ | 3959 | */ |
3960 | static ssize_t | 3960 | static ssize_t |
3961 | metadata_show(mddev_t *mddev, char *page) | 3961 | metadata_show(struct mddev *mddev, char *page) |
3962 | { | 3962 | { |
3963 | if (mddev->persistent) | 3963 | if (mddev->persistent) |
3964 | return sprintf(page, "%d.%d\n", | 3964 | return sprintf(page, "%d.%d\n", |
@@ -3970,7 +3970,7 @@ metadata_show(mddev_t *mddev, char *page) | |||
3970 | } | 3970 | } |
3971 | 3971 | ||
3972 | static ssize_t | 3972 | static ssize_t |
3973 | metadata_store(mddev_t *mddev, const char *buf, size_t len) | 3973 | metadata_store(struct mddev *mddev, const char *buf, size_t len) |
3974 | { | 3974 | { |
3975 | int major, minor; | 3975 | int major, minor; |
3976 | char *e; | 3976 | char *e; |
@@ -4024,7 +4024,7 @@ static struct md_sysfs_entry md_metadata = | |||
4024 | __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); | 4024 | __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); |
4025 | 4025 | ||
4026 | static ssize_t | 4026 | static ssize_t |
4027 | action_show(mddev_t *mddev, char *page) | 4027 | action_show(struct mddev *mddev, char *page) |
4028 | { | 4028 | { |
4029 | char *type = "idle"; | 4029 | char *type = "idle"; |
4030 | if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) | 4030 | if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) |
@@ -4046,10 +4046,10 @@ action_show(mddev_t *mddev, char *page) | |||
4046 | return sprintf(page, "%s\n", type); | 4046 | return sprintf(page, "%s\n", type); |
4047 | } | 4047 | } |
4048 | 4048 | ||
4049 | static void reap_sync_thread(mddev_t *mddev); | 4049 | static void reap_sync_thread(struct mddev *mddev); |
4050 | 4050 | ||
4051 | static ssize_t | 4051 | static ssize_t |
4052 | action_store(mddev_t *mddev, const char *page, size_t len) | 4052 | action_store(struct mddev *mddev, const char *page, size_t len) |
4053 | { | 4053 | { |
4054 | if (!mddev->pers || !mddev->pers->sync_request) | 4054 | if (!mddev->pers || !mddev->pers->sync_request) |
4055 | return -EINVAL; | 4055 | return -EINVAL; |
@@ -4095,7 +4095,7 @@ action_store(mddev_t *mddev, const char *page, size_t len) | |||
4095 | } | 4095 | } |
4096 | 4096 | ||
4097 | static ssize_t | 4097 | static ssize_t |
4098 | mismatch_cnt_show(mddev_t *mddev, char *page) | 4098 | mismatch_cnt_show(struct mddev *mddev, char *page) |
4099 | { | 4099 | { |
4100 | return sprintf(page, "%llu\n", | 4100 | return sprintf(page, "%llu\n", |
4101 | (unsigned long long) mddev->resync_mismatches); | 4101 | (unsigned long long) mddev->resync_mismatches); |
@@ -4108,14 +4108,14 @@ __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); | |||
4108 | static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); | 4108 | static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); |
4109 | 4109 | ||
4110 | static ssize_t | 4110 | static ssize_t |
4111 | sync_min_show(mddev_t *mddev, char *page) | 4111 | sync_min_show(struct mddev *mddev, char *page) |
4112 | { | 4112 | { |
4113 | return sprintf(page, "%d (%s)\n", speed_min(mddev), | 4113 | return sprintf(page, "%d (%s)\n", speed_min(mddev), |
4114 | mddev->sync_speed_min ? "local": "system"); | 4114 | mddev->sync_speed_min ? "local": "system"); |
4115 | } | 4115 | } |
4116 | 4116 | ||
4117 | static ssize_t | 4117 | static ssize_t |
4118 | sync_min_store(mddev_t *mddev, const char *buf, size_t len) | 4118 | sync_min_store(struct mddev *mddev, const char *buf, size_t len) |
4119 | { | 4119 | { |
4120 | int min; | 4120 | int min; |
4121 | char *e; | 4121 | char *e; |
@@ -4134,14 +4134,14 @@ static struct md_sysfs_entry md_sync_min = | |||
4134 | __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); | 4134 | __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); |
4135 | 4135 | ||
4136 | static ssize_t | 4136 | static ssize_t |
4137 | sync_max_show(mddev_t *mddev, char *page) | 4137 | sync_max_show(struct mddev *mddev, char *page) |
4138 | { | 4138 | { |
4139 | return sprintf(page, "%d (%s)\n", speed_max(mddev), | 4139 | return sprintf(page, "%d (%s)\n", speed_max(mddev), |
4140 | mddev->sync_speed_max ? "local": "system"); | 4140 | mddev->sync_speed_max ? "local": "system"); |
4141 | } | 4141 | } |
4142 | 4142 | ||
4143 | static ssize_t | 4143 | static ssize_t |
4144 | sync_max_store(mddev_t *mddev, const char *buf, size_t len) | 4144 | sync_max_store(struct mddev *mddev, const char *buf, size_t len) |
4145 | { | 4145 | { |
4146 | int max; | 4146 | int max; |
4147 | char *e; | 4147 | char *e; |
@@ -4160,20 +4160,20 @@ static struct md_sysfs_entry md_sync_max = | |||
4160 | __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); | 4160 | __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); |
4161 | 4161 | ||
4162 | static ssize_t | 4162 | static ssize_t |
4163 | degraded_show(mddev_t *mddev, char *page) | 4163 | degraded_show(struct mddev *mddev, char *page) |
4164 | { | 4164 | { |
4165 | return sprintf(page, "%d\n", mddev->degraded); | 4165 | return sprintf(page, "%d\n", mddev->degraded); |
4166 | } | 4166 | } |
4167 | static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); | 4167 | static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); |
4168 | 4168 | ||
4169 | static ssize_t | 4169 | static ssize_t |
4170 | sync_force_parallel_show(mddev_t *mddev, char *page) | 4170 | sync_force_parallel_show(struct mddev *mddev, char *page) |
4171 | { | 4171 | { |
4172 | return sprintf(page, "%d\n", mddev->parallel_resync); | 4172 | return sprintf(page, "%d\n", mddev->parallel_resync); |
4173 | } | 4173 | } |
4174 | 4174 | ||
4175 | static ssize_t | 4175 | static ssize_t |
4176 | sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) | 4176 | sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) |
4177 | { | 4177 | { |
4178 | long n; | 4178 | long n; |
4179 | 4179 | ||
@@ -4197,7 +4197,7 @@ __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, | |||
4197 | sync_force_parallel_show, sync_force_parallel_store); | 4197 | sync_force_parallel_show, sync_force_parallel_store); |
4198 | 4198 | ||
4199 | static ssize_t | 4199 | static ssize_t |
4200 | sync_speed_show(mddev_t *mddev, char *page) | 4200 | sync_speed_show(struct mddev *mddev, char *page) |
4201 | { | 4201 | { |
4202 | unsigned long resync, dt, db; | 4202 | unsigned long resync, dt, db; |
4203 | if (mddev->curr_resync == 0) | 4203 | if (mddev->curr_resync == 0) |
@@ -4212,7 +4212,7 @@ sync_speed_show(mddev_t *mddev, char *page) | |||
4212 | static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); | 4212 | static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); |
4213 | 4213 | ||
4214 | static ssize_t | 4214 | static ssize_t |
4215 | sync_completed_show(mddev_t *mddev, char *page) | 4215 | sync_completed_show(struct mddev *mddev, char *page) |
4216 | { | 4216 | { |
4217 | unsigned long long max_sectors, resync; | 4217 | unsigned long long max_sectors, resync; |
4218 | 4218 | ||
@@ -4231,13 +4231,13 @@ sync_completed_show(mddev_t *mddev, char *page) | |||
4231 | static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); | 4231 | static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); |
4232 | 4232 | ||
4233 | static ssize_t | 4233 | static ssize_t |
4234 | min_sync_show(mddev_t *mddev, char *page) | 4234 | min_sync_show(struct mddev *mddev, char *page) |
4235 | { | 4235 | { |
4236 | return sprintf(page, "%llu\n", | 4236 | return sprintf(page, "%llu\n", |
4237 | (unsigned long long)mddev->resync_min); | 4237 | (unsigned long long)mddev->resync_min); |
4238 | } | 4238 | } |
4239 | static ssize_t | 4239 | static ssize_t |
4240 | min_sync_store(mddev_t *mddev, const char *buf, size_t len) | 4240 | min_sync_store(struct mddev *mddev, const char *buf, size_t len) |
4241 | { | 4241 | { |
4242 | unsigned long long min; | 4242 | unsigned long long min; |
4243 | if (strict_strtoull(buf, 10, &min)) | 4243 | if (strict_strtoull(buf, 10, &min)) |
@@ -4262,7 +4262,7 @@ static struct md_sysfs_entry md_min_sync = | |||
4262 | __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); | 4262 | __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); |
4263 | 4263 | ||
4264 | static ssize_t | 4264 | static ssize_t |
4265 | max_sync_show(mddev_t *mddev, char *page) | 4265 | max_sync_show(struct mddev *mddev, char *page) |
4266 | { | 4266 | { |
4267 | if (mddev->resync_max == MaxSector) | 4267 | if (mddev->resync_max == MaxSector) |
4268 | return sprintf(page, "max\n"); | 4268 | return sprintf(page, "max\n"); |
@@ -4271,7 +4271,7 @@ max_sync_show(mddev_t *mddev, char *page) | |||
4271 | (unsigned long long)mddev->resync_max); | 4271 | (unsigned long long)mddev->resync_max); |
4272 | } | 4272 | } |
4273 | static ssize_t | 4273 | static ssize_t |
4274 | max_sync_store(mddev_t *mddev, const char *buf, size_t len) | 4274 | max_sync_store(struct mddev *mddev, const char *buf, size_t len) |
4275 | { | 4275 | { |
4276 | if (strncmp(buf, "max", 3) == 0) | 4276 | if (strncmp(buf, "max", 3) == 0) |
4277 | mddev->resync_max = MaxSector; | 4277 | mddev->resync_max = MaxSector; |
@@ -4302,13 +4302,13 @@ static struct md_sysfs_entry md_max_sync = | |||
4302 | __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); | 4302 | __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); |
4303 | 4303 | ||
4304 | static ssize_t | 4304 | static ssize_t |
4305 | suspend_lo_show(mddev_t *mddev, char *page) | 4305 | suspend_lo_show(struct mddev *mddev, char *page) |
4306 | { | 4306 | { |
4307 | return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); | 4307 | return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); |
4308 | } | 4308 | } |
4309 | 4309 | ||
4310 | static ssize_t | 4310 | static ssize_t |
4311 | suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) | 4311 | suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) |
4312 | { | 4312 | { |
4313 | char *e; | 4313 | char *e; |
4314 | unsigned long long new = simple_strtoull(buf, &e, 10); | 4314 | unsigned long long new = simple_strtoull(buf, &e, 10); |
@@ -4336,13 +4336,13 @@ __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); | |||
4336 | 4336 | ||
4337 | 4337 | ||
4338 | static ssize_t | 4338 | static ssize_t |
4339 | suspend_hi_show(mddev_t *mddev, char *page) | 4339 | suspend_hi_show(struct mddev *mddev, char *page) |
4340 | { | 4340 | { |
4341 | return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); | 4341 | return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); |
4342 | } | 4342 | } |
4343 | 4343 | ||
4344 | static ssize_t | 4344 | static ssize_t |
4345 | suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) | 4345 | suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) |
4346 | { | 4346 | { |
4347 | char *e; | 4347 | char *e; |
4348 | unsigned long long new = simple_strtoull(buf, &e, 10); | 4348 | unsigned long long new = simple_strtoull(buf, &e, 10); |
@@ -4369,7 +4369,7 @@ static struct md_sysfs_entry md_suspend_hi = | |||
4369 | __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); | 4369 | __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); |
4370 | 4370 | ||
4371 | static ssize_t | 4371 | static ssize_t |
4372 | reshape_position_show(mddev_t *mddev, char *page) | 4372 | reshape_position_show(struct mddev *mddev, char *page) |
4373 | { | 4373 | { |
4374 | if (mddev->reshape_position != MaxSector) | 4374 | if (mddev->reshape_position != MaxSector) |
4375 | return sprintf(page, "%llu\n", | 4375 | return sprintf(page, "%llu\n", |
@@ -4379,7 +4379,7 @@ reshape_position_show(mddev_t *mddev, char *page) | |||
4379 | } | 4379 | } |
4380 | 4380 | ||
4381 | static ssize_t | 4381 | static ssize_t |
4382 | reshape_position_store(mddev_t *mddev, const char *buf, size_t len) | 4382 | reshape_position_store(struct mddev *mddev, const char *buf, size_t len) |
4383 | { | 4383 | { |
4384 | char *e; | 4384 | char *e; |
4385 | unsigned long long new = simple_strtoull(buf, &e, 10); | 4385 | unsigned long long new = simple_strtoull(buf, &e, 10); |
@@ -4400,7 +4400,7 @@ __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, | |||
4400 | reshape_position_store); | 4400 | reshape_position_store); |
4401 | 4401 | ||
4402 | static ssize_t | 4402 | static ssize_t |
4403 | array_size_show(mddev_t *mddev, char *page) | 4403 | array_size_show(struct mddev *mddev, char *page) |
4404 | { | 4404 | { |
4405 | if (mddev->external_size) | 4405 | if (mddev->external_size) |
4406 | return sprintf(page, "%llu\n", | 4406 | return sprintf(page, "%llu\n", |
@@ -4410,7 +4410,7 @@ array_size_show(mddev_t *mddev, char *page) | |||
4410 | } | 4410 | } |
4411 | 4411 | ||
4412 | static ssize_t | 4412 | static ssize_t |
4413 | array_size_store(mddev_t *mddev, const char *buf, size_t len) | 4413 | array_size_store(struct mddev *mddev, const char *buf, size_t len) |
4414 | { | 4414 | { |
4415 | sector_t sectors; | 4415 | sector_t sectors; |
4416 | 4416 | ||
@@ -4485,7 +4485,7 @@ static ssize_t | |||
4485 | md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 4485 | md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
4486 | { | 4486 | { |
4487 | struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); | 4487 | struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); |
4488 | mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); | 4488 | struct mddev *mddev = container_of(kobj, struct mddev, kobj); |
4489 | ssize_t rv; | 4489 | ssize_t rv; |
4490 | 4490 | ||
4491 | if (!entry->show) | 4491 | if (!entry->show) |
@@ -4503,7 +4503,7 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, | |||
4503 | const char *page, size_t length) | 4503 | const char *page, size_t length) |
4504 | { | 4504 | { |
4505 | struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); | 4505 | struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); |
4506 | mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); | 4506 | struct mddev *mddev = container_of(kobj, struct mddev, kobj); |
4507 | ssize_t rv; | 4507 | ssize_t rv; |
4508 | 4508 | ||
4509 | if (!entry->store) | 4509 | if (!entry->store) |
@@ -4522,7 +4522,7 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, | |||
4522 | 4522 | ||
4523 | static void md_free(struct kobject *ko) | 4523 | static void md_free(struct kobject *ko) |
4524 | { | 4524 | { |
4525 | mddev_t *mddev = container_of(ko, mddev_t, kobj); | 4525 | struct mddev *mddev = container_of(ko, struct mddev, kobj); |
4526 | 4526 | ||
4527 | if (mddev->sysfs_state) | 4527 | if (mddev->sysfs_state) |
4528 | sysfs_put(mddev->sysfs_state); | 4528 | sysfs_put(mddev->sysfs_state); |
@@ -4551,7 +4551,7 @@ int mdp_major = 0; | |||
4551 | 4551 | ||
4552 | static void mddev_delayed_delete(struct work_struct *ws) | 4552 | static void mddev_delayed_delete(struct work_struct *ws) |
4553 | { | 4553 | { |
4554 | mddev_t *mddev = container_of(ws, mddev_t, del_work); | 4554 | struct mddev *mddev = container_of(ws, struct mddev, del_work); |
4555 | 4555 | ||
4556 | sysfs_remove_group(&mddev->kobj, &md_bitmap_group); | 4556 | sysfs_remove_group(&mddev->kobj, &md_bitmap_group); |
4557 | kobject_del(&mddev->kobj); | 4557 | kobject_del(&mddev->kobj); |
@@ -4561,7 +4561,7 @@ static void mddev_delayed_delete(struct work_struct *ws) | |||
4561 | static int md_alloc(dev_t dev, char *name) | 4561 | static int md_alloc(dev_t dev, char *name) |
4562 | { | 4562 | { |
4563 | static DEFINE_MUTEX(disks_mutex); | 4563 | static DEFINE_MUTEX(disks_mutex); |
4564 | mddev_t *mddev = mddev_find(dev); | 4564 | struct mddev *mddev = mddev_find(dev); |
4565 | struct gendisk *disk; | 4565 | struct gendisk *disk; |
4566 | int partitioned; | 4566 | int partitioned; |
4567 | int shift; | 4567 | int shift; |
@@ -4588,7 +4588,7 @@ static int md_alloc(dev_t dev, char *name) | |||
4588 | if (name) { | 4588 | if (name) { |
4589 | /* Need to ensure that 'name' is not a duplicate. | 4589 | /* Need to ensure that 'name' is not a duplicate. |
4590 | */ | 4590 | */ |
4591 | mddev_t *mddev2; | 4591 | struct mddev *mddev2; |
4592 | spin_lock(&all_mddevs_lock); | 4592 | spin_lock(&all_mddevs_lock); |
4593 | 4593 | ||
4594 | list_for_each_entry(mddev2, &all_mddevs, all_mddevs) | 4594 | list_for_each_entry(mddev2, &all_mddevs, all_mddevs) |
@@ -4689,7 +4689,7 @@ static int add_named_array(const char *val, struct kernel_param *kp) | |||
4689 | 4689 | ||
4690 | static void md_safemode_timeout(unsigned long data) | 4690 | static void md_safemode_timeout(unsigned long data) |
4691 | { | 4691 | { |
4692 | mddev_t *mddev = (mddev_t *) data; | 4692 | struct mddev *mddev = (struct mddev *) data; |
4693 | 4693 | ||
4694 | if (!atomic_read(&mddev->writes_pending)) { | 4694 | if (!atomic_read(&mddev->writes_pending)) { |
4695 | mddev->safemode = 1; | 4695 | mddev->safemode = 1; |
@@ -4701,11 +4701,11 @@ static void md_safemode_timeout(unsigned long data) | |||
4701 | 4701 | ||
4702 | static int start_dirty_degraded; | 4702 | static int start_dirty_degraded; |
4703 | 4703 | ||
4704 | int md_run(mddev_t *mddev) | 4704 | int md_run(struct mddev *mddev) |
4705 | { | 4705 | { |
4706 | int err; | 4706 | int err; |
4707 | mdk_rdev_t *rdev; | 4707 | struct md_rdev *rdev; |
4708 | struct mdk_personality *pers; | 4708 | struct md_personality *pers; |
4709 | 4709 | ||
4710 | if (list_empty(&mddev->disks)) | 4710 | if (list_empty(&mddev->disks)) |
4711 | /* cannot run an array with no devices.. */ | 4711 | /* cannot run an array with no devices.. */ |
@@ -4769,7 +4769,7 @@ int md_run(mddev_t *mddev) | |||
4769 | 4769 | ||
4770 | if (mddev->bio_set == NULL) | 4770 | if (mddev->bio_set == NULL) |
4771 | mddev->bio_set = bioset_create(BIO_POOL_SIZE, | 4771 | mddev->bio_set = bioset_create(BIO_POOL_SIZE, |
4772 | sizeof(mddev_t *)); | 4772 | sizeof(struct mddev *)); |
4773 | 4773 | ||
4774 | spin_lock(&pers_lock); | 4774 | spin_lock(&pers_lock); |
4775 | pers = find_pers(mddev->level, mddev->clevel); | 4775 | pers = find_pers(mddev->level, mddev->clevel); |
@@ -4804,7 +4804,7 @@ int md_run(mddev_t *mddev) | |||
4804 | * configuration. | 4804 | * configuration. |
4805 | */ | 4805 | */ |
4806 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; | 4806 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; |
4807 | mdk_rdev_t *rdev2; | 4807 | struct md_rdev *rdev2; |
4808 | int warned = 0; | 4808 | int warned = 0; |
4809 | 4809 | ||
4810 | list_for_each_entry(rdev, &mddev->disks, same_set) | 4810 | list_for_each_entry(rdev, &mddev->disks, same_set) |
@@ -4903,7 +4903,7 @@ int md_run(mddev_t *mddev) | |||
4903 | } | 4903 | } |
4904 | EXPORT_SYMBOL_GPL(md_run); | 4904 | EXPORT_SYMBOL_GPL(md_run); |
4905 | 4905 | ||
4906 | static int do_md_run(mddev_t *mddev) | 4906 | static int do_md_run(struct mddev *mddev) |
4907 | { | 4907 | { |
4908 | int err; | 4908 | int err; |
4909 | 4909 | ||
@@ -4927,7 +4927,7 @@ out: | |||
4927 | return err; | 4927 | return err; |
4928 | } | 4928 | } |
4929 | 4929 | ||
4930 | static int restart_array(mddev_t *mddev) | 4930 | static int restart_array(struct mddev *mddev) |
4931 | { | 4931 | { |
4932 | struct gendisk *disk = mddev->gendisk; | 4932 | struct gendisk *disk = mddev->gendisk; |
4933 | 4933 | ||
@@ -4977,7 +4977,7 @@ void restore_bitmap_write_access(struct file *file) | |||
4977 | spin_unlock(&inode->i_lock); | 4977 | spin_unlock(&inode->i_lock); |
4978 | } | 4978 | } |
4979 | 4979 | ||
4980 | static void md_clean(mddev_t *mddev) | 4980 | static void md_clean(struct mddev *mddev) |
4981 | { | 4981 | { |
4982 | mddev->array_sectors = 0; | 4982 | mddev->array_sectors = 0; |
4983 | mddev->external_size = 0; | 4983 | mddev->external_size = 0; |
@@ -5020,7 +5020,7 @@ static void md_clean(mddev_t *mddev) | |||
5020 | mddev->bitmap_info.max_write_behind = 0; | 5020 | mddev->bitmap_info.max_write_behind = 0; |
5021 | } | 5021 | } |
5022 | 5022 | ||
5023 | static void __md_stop_writes(mddev_t *mddev) | 5023 | static void __md_stop_writes(struct mddev *mddev) |
5024 | { | 5024 | { |
5025 | if (mddev->sync_thread) { | 5025 | if (mddev->sync_thread) { |
5026 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 5026 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
@@ -5040,7 +5040,7 @@ static void __md_stop_writes(mddev_t *mddev) | |||
5040 | } | 5040 | } |
5041 | } | 5041 | } |
5042 | 5042 | ||
5043 | void md_stop_writes(mddev_t *mddev) | 5043 | void md_stop_writes(struct mddev *mddev) |
5044 | { | 5044 | { |
5045 | mddev_lock(mddev); | 5045 | mddev_lock(mddev); |
5046 | __md_stop_writes(mddev); | 5046 | __md_stop_writes(mddev); |
@@ -5048,7 +5048,7 @@ void md_stop_writes(mddev_t *mddev) | |||
5048 | } | 5048 | } |
5049 | EXPORT_SYMBOL_GPL(md_stop_writes); | 5049 | EXPORT_SYMBOL_GPL(md_stop_writes); |
5050 | 5050 | ||
5051 | void md_stop(mddev_t *mddev) | 5051 | void md_stop(struct mddev *mddev) |
5052 | { | 5052 | { |
5053 | mddev->ready = 0; | 5053 | mddev->ready = 0; |
5054 | mddev->pers->stop(mddev); | 5054 | mddev->pers->stop(mddev); |
@@ -5060,7 +5060,7 @@ void md_stop(mddev_t *mddev) | |||
5060 | } | 5060 | } |
5061 | EXPORT_SYMBOL_GPL(md_stop); | 5061 | EXPORT_SYMBOL_GPL(md_stop); |
5062 | 5062 | ||
5063 | static int md_set_readonly(mddev_t *mddev, int is_open) | 5063 | static int md_set_readonly(struct mddev *mddev, int is_open) |
5064 | { | 5064 | { |
5065 | int err = 0; | 5065 | int err = 0; |
5066 | mutex_lock(&mddev->open_mutex); | 5066 | mutex_lock(&mddev->open_mutex); |
@@ -5090,10 +5090,10 @@ out: | |||
5090 | * 0 - completely stop and dis-assemble array | 5090 | * 0 - completely stop and dis-assemble array |
5091 | * 2 - stop but do not disassemble array | 5091 | * 2 - stop but do not disassemble array |
5092 | */ | 5092 | */ |
5093 | static int do_md_stop(mddev_t * mddev, int mode, int is_open) | 5093 | static int do_md_stop(struct mddev * mddev, int mode, int is_open) |
5094 | { | 5094 | { |
5095 | struct gendisk *disk = mddev->gendisk; | 5095 | struct gendisk *disk = mddev->gendisk; |
5096 | mdk_rdev_t *rdev; | 5096 | struct md_rdev *rdev; |
5097 | 5097 | ||
5098 | mutex_lock(&mddev->open_mutex); | 5098 | mutex_lock(&mddev->open_mutex); |
5099 | if (atomic_read(&mddev->openers) > is_open || | 5099 | if (atomic_read(&mddev->openers) > is_open || |
@@ -5156,9 +5156,9 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
5156 | } | 5156 | } |
5157 | 5157 | ||
5158 | #ifndef MODULE | 5158 | #ifndef MODULE |
5159 | static void autorun_array(mddev_t *mddev) | 5159 | static void autorun_array(struct mddev *mddev) |
5160 | { | 5160 | { |
5161 | mdk_rdev_t *rdev; | 5161 | struct md_rdev *rdev; |
5162 | int err; | 5162 | int err; |
5163 | 5163 | ||
5164 | if (list_empty(&mddev->disks)) | 5164 | if (list_empty(&mddev->disks)) |
@@ -5193,8 +5193,8 @@ static void autorun_array(mddev_t *mddev) | |||
5193 | */ | 5193 | */ |
5194 | static void autorun_devices(int part) | 5194 | static void autorun_devices(int part) |
5195 | { | 5195 | { |
5196 | mdk_rdev_t *rdev0, *rdev, *tmp; | 5196 | struct md_rdev *rdev0, *rdev, *tmp; |
5197 | mddev_t *mddev; | 5197 | struct mddev *mddev; |
5198 | char b[BDEVNAME_SIZE]; | 5198 | char b[BDEVNAME_SIZE]; |
5199 | 5199 | ||
5200 | printk(KERN_INFO "md: autorun ...\n"); | 5200 | printk(KERN_INFO "md: autorun ...\n"); |
@@ -5203,7 +5203,7 @@ static void autorun_devices(int part) | |||
5203 | dev_t dev; | 5203 | dev_t dev; |
5204 | LIST_HEAD(candidates); | 5204 | LIST_HEAD(candidates); |
5205 | rdev0 = list_entry(pending_raid_disks.next, | 5205 | rdev0 = list_entry(pending_raid_disks.next, |
5206 | mdk_rdev_t, same_set); | 5206 | struct md_rdev, same_set); |
5207 | 5207 | ||
5208 | printk(KERN_INFO "md: considering %s ...\n", | 5208 | printk(KERN_INFO "md: considering %s ...\n", |
5209 | bdevname(rdev0->bdev,b)); | 5209 | bdevname(rdev0->bdev,b)); |
@@ -5289,11 +5289,11 @@ static int get_version(void __user * arg) | |||
5289 | return 0; | 5289 | return 0; |
5290 | } | 5290 | } |
5291 | 5291 | ||
5292 | static int get_array_info(mddev_t * mddev, void __user * arg) | 5292 | static int get_array_info(struct mddev * mddev, void __user * arg) |
5293 | { | 5293 | { |
5294 | mdu_array_info_t info; | 5294 | mdu_array_info_t info; |
5295 | int nr,working,insync,failed,spare; | 5295 | int nr,working,insync,failed,spare; |
5296 | mdk_rdev_t *rdev; | 5296 | struct md_rdev *rdev; |
5297 | 5297 | ||
5298 | nr=working=insync=failed=spare=0; | 5298 | nr=working=insync=failed=spare=0; |
5299 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 5299 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
@@ -5342,7 +5342,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) | |||
5342 | return 0; | 5342 | return 0; |
5343 | } | 5343 | } |
5344 | 5344 | ||
5345 | static int get_bitmap_file(mddev_t * mddev, void __user * arg) | 5345 | static int get_bitmap_file(struct mddev * mddev, void __user * arg) |
5346 | { | 5346 | { |
5347 | mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ | 5347 | mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ |
5348 | char *ptr, *buf = NULL; | 5348 | char *ptr, *buf = NULL; |
@@ -5382,10 +5382,10 @@ out: | |||
5382 | return err; | 5382 | return err; |
5383 | } | 5383 | } |
5384 | 5384 | ||
5385 | static int get_disk_info(mddev_t * mddev, void __user * arg) | 5385 | static int get_disk_info(struct mddev * mddev, void __user * arg) |
5386 | { | 5386 | { |
5387 | mdu_disk_info_t info; | 5387 | mdu_disk_info_t info; |
5388 | mdk_rdev_t *rdev; | 5388 | struct md_rdev *rdev; |
5389 | 5389 | ||
5390 | if (copy_from_user(&info, arg, sizeof(info))) | 5390 | if (copy_from_user(&info, arg, sizeof(info))) |
5391 | return -EFAULT; | 5391 | return -EFAULT; |
@@ -5416,10 +5416,10 @@ static int get_disk_info(mddev_t * mddev, void __user * arg) | |||
5416 | return 0; | 5416 | return 0; |
5417 | } | 5417 | } |
5418 | 5418 | ||
5419 | static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | 5419 | static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) |
5420 | { | 5420 | { |
5421 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; | 5421 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; |
5422 | mdk_rdev_t *rdev; | 5422 | struct md_rdev *rdev; |
5423 | dev_t dev = MKDEV(info->major,info->minor); | 5423 | dev_t dev = MKDEV(info->major,info->minor); |
5424 | 5424 | ||
5425 | if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) | 5425 | if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) |
@@ -5436,8 +5436,9 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | |||
5436 | return PTR_ERR(rdev); | 5436 | return PTR_ERR(rdev); |
5437 | } | 5437 | } |
5438 | if (!list_empty(&mddev->disks)) { | 5438 | if (!list_empty(&mddev->disks)) { |
5439 | mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, | 5439 | struct md_rdev *rdev0 |
5440 | mdk_rdev_t, same_set); | 5440 | = list_entry(mddev->disks.next, |
5441 | struct md_rdev, same_set); | ||
5441 | err = super_types[mddev->major_version] | 5442 | err = super_types[mddev->major_version] |
5442 | .load_super(rdev, rdev0, mddev->minor_version); | 5443 | .load_super(rdev, rdev0, mddev->minor_version); |
5443 | if (err < 0) { | 5444 | if (err < 0) { |
@@ -5587,10 +5588,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | |||
5587 | return 0; | 5588 | return 0; |
5588 | } | 5589 | } |
5589 | 5590 | ||
5590 | static int hot_remove_disk(mddev_t * mddev, dev_t dev) | 5591 | static int hot_remove_disk(struct mddev * mddev, dev_t dev) |
5591 | { | 5592 | { |
5592 | char b[BDEVNAME_SIZE]; | 5593 | char b[BDEVNAME_SIZE]; |
5593 | mdk_rdev_t *rdev; | 5594 | struct md_rdev *rdev; |
5594 | 5595 | ||
5595 | rdev = find_rdev(mddev, dev); | 5596 | rdev = find_rdev(mddev, dev); |
5596 | if (!rdev) | 5597 | if (!rdev) |
@@ -5610,11 +5611,11 @@ busy: | |||
5610 | return -EBUSY; | 5611 | return -EBUSY; |
5611 | } | 5612 | } |
5612 | 5613 | ||
5613 | static int hot_add_disk(mddev_t * mddev, dev_t dev) | 5614 | static int hot_add_disk(struct mddev * mddev, dev_t dev) |
5614 | { | 5615 | { |
5615 | char b[BDEVNAME_SIZE]; | 5616 | char b[BDEVNAME_SIZE]; |
5616 | int err; | 5617 | int err; |
5617 | mdk_rdev_t *rdev; | 5618 | struct md_rdev *rdev; |
5618 | 5619 | ||
5619 | if (!mddev->pers) | 5620 | if (!mddev->pers) |
5620 | return -ENODEV; | 5621 | return -ENODEV; |
@@ -5684,7 +5685,7 @@ abort_export: | |||
5684 | return err; | 5685 | return err; |
5685 | } | 5686 | } |
5686 | 5687 | ||
5687 | static int set_bitmap_file(mddev_t *mddev, int fd) | 5688 | static int set_bitmap_file(struct mddev *mddev, int fd) |
5688 | { | 5689 | { |
5689 | int err; | 5690 | int err; |
5690 | 5691 | ||
@@ -5757,7 +5758,7 @@ static int set_bitmap_file(mddev_t *mddev, int fd) | |||
5757 | * The minor and patch _version numbers are also kept incase the | 5758 | * The minor and patch _version numbers are also kept incase the |
5758 | * super_block handler wishes to interpret them. | 5759 | * super_block handler wishes to interpret them. |
5759 | */ | 5760 | */ |
5760 | static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) | 5761 | static int set_array_info(struct mddev * mddev, mdu_array_info_t *info) |
5761 | { | 5762 | { |
5762 | 5763 | ||
5763 | if (info->raid_disks == 0) { | 5764 | if (info->raid_disks == 0) { |
@@ -5827,7 +5828,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) | |||
5827 | return 0; | 5828 | return 0; |
5828 | } | 5829 | } |
5829 | 5830 | ||
5830 | void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) | 5831 | void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) |
5831 | { | 5832 | { |
5832 | WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); | 5833 | WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); |
5833 | 5834 | ||
@@ -5838,9 +5839,9 @@ void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) | |||
5838 | } | 5839 | } |
5839 | EXPORT_SYMBOL(md_set_array_sectors); | 5840 | EXPORT_SYMBOL(md_set_array_sectors); |
5840 | 5841 | ||
5841 | static int update_size(mddev_t *mddev, sector_t num_sectors) | 5842 | static int update_size(struct mddev *mddev, sector_t num_sectors) |
5842 | { | 5843 | { |
5843 | mdk_rdev_t *rdev; | 5844 | struct md_rdev *rdev; |
5844 | int rv; | 5845 | int rv; |
5845 | int fit = (num_sectors == 0); | 5846 | int fit = (num_sectors == 0); |
5846 | 5847 | ||
@@ -5876,7 +5877,7 @@ static int update_size(mddev_t *mddev, sector_t num_sectors) | |||
5876 | return rv; | 5877 | return rv; |
5877 | } | 5878 | } |
5878 | 5879 | ||
5879 | static int update_raid_disks(mddev_t *mddev, int raid_disks) | 5880 | static int update_raid_disks(struct mddev *mddev, int raid_disks) |
5880 | { | 5881 | { |
5881 | int rv; | 5882 | int rv; |
5882 | /* change the number of raid disks */ | 5883 | /* change the number of raid disks */ |
@@ -5904,7 +5905,7 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks) | |||
5904 | * Any differences that cannot be handled will cause an error. | 5905 | * Any differences that cannot be handled will cause an error. |
5905 | * Normally, only one change can be managed at a time. | 5906 | * Normally, only one change can be managed at a time. |
5906 | */ | 5907 | */ |
5907 | static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) | 5908 | static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) |
5908 | { | 5909 | { |
5909 | int rv = 0; | 5910 | int rv = 0; |
5910 | int cnt = 0; | 5911 | int cnt = 0; |
@@ -5997,9 +5998,9 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) | |||
5997 | return rv; | 5998 | return rv; |
5998 | } | 5999 | } |
5999 | 6000 | ||
6000 | static int set_disk_faulty(mddev_t *mddev, dev_t dev) | 6001 | static int set_disk_faulty(struct mddev *mddev, dev_t dev) |
6001 | { | 6002 | { |
6002 | mdk_rdev_t *rdev; | 6003 | struct md_rdev *rdev; |
6003 | 6004 | ||
6004 | if (mddev->pers == NULL) | 6005 | if (mddev->pers == NULL) |
6005 | return -ENODEV; | 6006 | return -ENODEV; |
@@ -6022,7 +6023,7 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev) | |||
6022 | */ | 6023 | */ |
6023 | static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 6024 | static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
6024 | { | 6025 | { |
6025 | mddev_t *mddev = bdev->bd_disk->private_data; | 6026 | struct mddev *mddev = bdev->bd_disk->private_data; |
6026 | 6027 | ||
6027 | geo->heads = 2; | 6028 | geo->heads = 2; |
6028 | geo->sectors = 4; | 6029 | geo->sectors = 4; |
@@ -6035,7 +6036,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, | |||
6035 | { | 6036 | { |
6036 | int err = 0; | 6037 | int err = 0; |
6037 | void __user *argp = (void __user *)arg; | 6038 | void __user *argp = (void __user *)arg; |
6038 | mddev_t *mddev = NULL; | 6039 | struct mddev *mddev = NULL; |
6039 | int ro; | 6040 | int ro; |
6040 | 6041 | ||
6041 | if (!capable(CAP_SYS_ADMIN)) | 6042 | if (!capable(CAP_SYS_ADMIN)) |
@@ -6298,7 +6299,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
6298 | * Succeed if we can lock the mddev, which confirms that | 6299 | * Succeed if we can lock the mddev, which confirms that |
6299 | * it isn't being stopped right now. | 6300 | * it isn't being stopped right now. |
6300 | */ | 6301 | */ |
6301 | mddev_t *mddev = mddev_find(bdev->bd_dev); | 6302 | struct mddev *mddev = mddev_find(bdev->bd_dev); |
6302 | int err; | 6303 | int err; |
6303 | 6304 | ||
6304 | if (mddev->gendisk != bdev->bd_disk) { | 6305 | if (mddev->gendisk != bdev->bd_disk) { |
@@ -6327,7 +6328,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
6327 | 6328 | ||
6328 | static int md_release(struct gendisk *disk, fmode_t mode) | 6329 | static int md_release(struct gendisk *disk, fmode_t mode) |
6329 | { | 6330 | { |
6330 | mddev_t *mddev = disk->private_data; | 6331 | struct mddev *mddev = disk->private_data; |
6331 | 6332 | ||
6332 | BUG_ON(!mddev); | 6333 | BUG_ON(!mddev); |
6333 | atomic_dec(&mddev->openers); | 6334 | atomic_dec(&mddev->openers); |
@@ -6338,14 +6339,14 @@ static int md_release(struct gendisk *disk, fmode_t mode) | |||
6338 | 6339 | ||
6339 | static int md_media_changed(struct gendisk *disk) | 6340 | static int md_media_changed(struct gendisk *disk) |
6340 | { | 6341 | { |
6341 | mddev_t *mddev = disk->private_data; | 6342 | struct mddev *mddev = disk->private_data; |
6342 | 6343 | ||
6343 | return mddev->changed; | 6344 | return mddev->changed; |
6344 | } | 6345 | } |
6345 | 6346 | ||
6346 | static int md_revalidate(struct gendisk *disk) | 6347 | static int md_revalidate(struct gendisk *disk) |
6347 | { | 6348 | { |
6348 | mddev_t *mddev = disk->private_data; | 6349 | struct mddev *mddev = disk->private_data; |
6349 | 6350 | ||
6350 | mddev->changed = 0; | 6351 | mddev->changed = 0; |
6351 | return 0; | 6352 | return 0; |
@@ -6366,7 +6367,7 @@ static const struct block_device_operations md_fops = | |||
6366 | 6367 | ||
6367 | static int md_thread(void * arg) | 6368 | static int md_thread(void * arg) |
6368 | { | 6369 | { |
6369 | mdk_thread_t *thread = arg; | 6370 | struct md_thread *thread = arg; |
6370 | 6371 | ||
6371 | /* | 6372 | /* |
6372 | * md_thread is a 'system-thread', it's priority should be very | 6373 | * md_thread is a 'system-thread', it's priority should be very |
@@ -6405,21 +6406,21 @@ static int md_thread(void * arg) | |||
6405 | return 0; | 6406 | return 0; |
6406 | } | 6407 | } |
6407 | 6408 | ||
6408 | void md_wakeup_thread(mdk_thread_t *thread) | 6409 | void md_wakeup_thread(struct md_thread *thread) |
6409 | { | 6410 | { |
6410 | if (thread) { | 6411 | if (thread) { |
6411 | dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); | 6412 | pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); |
6412 | set_bit(THREAD_WAKEUP, &thread->flags); | 6413 | set_bit(THREAD_WAKEUP, &thread->flags); |
6413 | wake_up(&thread->wqueue); | 6414 | wake_up(&thread->wqueue); |
6414 | } | 6415 | } |
6415 | } | 6416 | } |
6416 | 6417 | ||
6417 | mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, | 6418 | struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev, |
6418 | const char *name) | 6419 | const char *name) |
6419 | { | 6420 | { |
6420 | mdk_thread_t *thread; | 6421 | struct md_thread *thread; |
6421 | 6422 | ||
6422 | thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); | 6423 | thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); |
6423 | if (!thread) | 6424 | if (!thread) |
6424 | return NULL; | 6425 | return NULL; |
6425 | 6426 | ||
@@ -6439,12 +6440,12 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, | |||
6439 | return thread; | 6440 | return thread; |
6440 | } | 6441 | } |
6441 | 6442 | ||
6442 | void md_unregister_thread(mdk_thread_t **threadp) | 6443 | void md_unregister_thread(struct md_thread **threadp) |
6443 | { | 6444 | { |
6444 | mdk_thread_t *thread = *threadp; | 6445 | struct md_thread *thread = *threadp; |
6445 | if (!thread) | 6446 | if (!thread) |
6446 | return; | 6447 | return; |
6447 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); | 6448 | pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); |
6448 | /* Locking ensures that mddev_unlock does not wake_up a | 6449 | /* Locking ensures that mddev_unlock does not wake_up a |
6449 | * non-existent thread | 6450 | * non-existent thread |
6450 | */ | 6451 | */ |
@@ -6456,7 +6457,7 @@ void md_unregister_thread(mdk_thread_t **threadp) | |||
6456 | kfree(thread); | 6457 | kfree(thread); |
6457 | } | 6458 | } |
6458 | 6459 | ||
6459 | void md_error(mddev_t *mddev, mdk_rdev_t *rdev) | 6460 | void md_error(struct mddev *mddev, struct md_rdev *rdev) |
6460 | { | 6461 | { |
6461 | if (!mddev) { | 6462 | if (!mddev) { |
6462 | MD_BUG(); | 6463 | MD_BUG(); |
@@ -6485,7 +6486,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
6485 | static void status_unused(struct seq_file *seq) | 6486 | static void status_unused(struct seq_file *seq) |
6486 | { | 6487 | { |
6487 | int i = 0; | 6488 | int i = 0; |
6488 | mdk_rdev_t *rdev; | 6489 | struct md_rdev *rdev; |
6489 | 6490 | ||
6490 | seq_printf(seq, "unused devices: "); | 6491 | seq_printf(seq, "unused devices: "); |
6491 | 6492 | ||
@@ -6502,7 +6503,7 @@ static void status_unused(struct seq_file *seq) | |||
6502 | } | 6503 | } |
6503 | 6504 | ||
6504 | 6505 | ||
6505 | static void status_resync(struct seq_file *seq, mddev_t * mddev) | 6506 | static void status_resync(struct seq_file *seq, struct mddev * mddev) |
6506 | { | 6507 | { |
6507 | sector_t max_sectors, resync, res; | 6508 | sector_t max_sectors, resync, res; |
6508 | unsigned long dt, db; | 6509 | unsigned long dt, db; |
@@ -6593,7 +6594,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos) | |||
6593 | { | 6594 | { |
6594 | struct list_head *tmp; | 6595 | struct list_head *tmp; |
6595 | loff_t l = *pos; | 6596 | loff_t l = *pos; |
6596 | mddev_t *mddev; | 6597 | struct mddev *mddev; |
6597 | 6598 | ||
6598 | if (l >= 0x10000) | 6599 | if (l >= 0x10000) |
6599 | return NULL; | 6600 | return NULL; |
@@ -6604,7 +6605,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos) | |||
6604 | spin_lock(&all_mddevs_lock); | 6605 | spin_lock(&all_mddevs_lock); |
6605 | list_for_each(tmp,&all_mddevs) | 6606 | list_for_each(tmp,&all_mddevs) |
6606 | if (!l--) { | 6607 | if (!l--) { |
6607 | mddev = list_entry(tmp, mddev_t, all_mddevs); | 6608 | mddev = list_entry(tmp, struct mddev, all_mddevs); |
6608 | mddev_get(mddev); | 6609 | mddev_get(mddev); |
6609 | spin_unlock(&all_mddevs_lock); | 6610 | spin_unlock(&all_mddevs_lock); |
6610 | return mddev; | 6611 | return mddev; |
@@ -6618,7 +6619,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos) | |||
6618 | static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 6619 | static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
6619 | { | 6620 | { |
6620 | struct list_head *tmp; | 6621 | struct list_head *tmp; |
6621 | mddev_t *next_mddev, *mddev = v; | 6622 | struct mddev *next_mddev, *mddev = v; |
6622 | 6623 | ||
6623 | ++*pos; | 6624 | ++*pos; |
6624 | if (v == (void*)2) | 6625 | if (v == (void*)2) |
@@ -6630,7 +6631,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
6630 | else | 6631 | else |
6631 | tmp = mddev->all_mddevs.next; | 6632 | tmp = mddev->all_mddevs.next; |
6632 | if (tmp != &all_mddevs) | 6633 | if (tmp != &all_mddevs) |
6633 | next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); | 6634 | next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); |
6634 | else { | 6635 | else { |
6635 | next_mddev = (void*)2; | 6636 | next_mddev = (void*)2; |
6636 | *pos = 0x10000; | 6637 | *pos = 0x10000; |
@@ -6645,7 +6646,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
6645 | 6646 | ||
6646 | static void md_seq_stop(struct seq_file *seq, void *v) | 6647 | static void md_seq_stop(struct seq_file *seq, void *v) |
6647 | { | 6648 | { |
6648 | mddev_t *mddev = v; | 6649 | struct mddev *mddev = v; |
6649 | 6650 | ||
6650 | if (mddev && v != (void*)1 && v != (void*)2) | 6651 | if (mddev && v != (void*)1 && v != (void*)2) |
6651 | mddev_put(mddev); | 6652 | mddev_put(mddev); |
@@ -6653,13 +6654,13 @@ static void md_seq_stop(struct seq_file *seq, void *v) | |||
6653 | 6654 | ||
6654 | static int md_seq_show(struct seq_file *seq, void *v) | 6655 | static int md_seq_show(struct seq_file *seq, void *v) |
6655 | { | 6656 | { |
6656 | mddev_t *mddev = v; | 6657 | struct mddev *mddev = v; |
6657 | sector_t sectors; | 6658 | sector_t sectors; |
6658 | mdk_rdev_t *rdev; | 6659 | struct md_rdev *rdev; |
6659 | struct bitmap *bitmap; | 6660 | struct bitmap *bitmap; |
6660 | 6661 | ||
6661 | if (v == (void*)1) { | 6662 | if (v == (void*)1) { |
6662 | struct mdk_personality *pers; | 6663 | struct md_personality *pers; |
6663 | seq_printf(seq, "Personalities : "); | 6664 | seq_printf(seq, "Personalities : "); |
6664 | spin_lock(&pers_lock); | 6665 | spin_lock(&pers_lock); |
6665 | list_for_each_entry(pers, &pers_list, list) | 6666 | list_for_each_entry(pers, &pers_list, list) |
@@ -6815,7 +6816,7 @@ static const struct file_operations md_seq_fops = { | |||
6815 | .poll = mdstat_poll, | 6816 | .poll = mdstat_poll, |
6816 | }; | 6817 | }; |
6817 | 6818 | ||
6818 | int register_md_personality(struct mdk_personality *p) | 6819 | int register_md_personality(struct md_personality *p) |
6819 | { | 6820 | { |
6820 | spin_lock(&pers_lock); | 6821 | spin_lock(&pers_lock); |
6821 | list_add_tail(&p->list, &pers_list); | 6822 | list_add_tail(&p->list, &pers_list); |
@@ -6824,7 +6825,7 @@ int register_md_personality(struct mdk_personality *p) | |||
6824 | return 0; | 6825 | return 0; |
6825 | } | 6826 | } |
6826 | 6827 | ||
6827 | int unregister_md_personality(struct mdk_personality *p) | 6828 | int unregister_md_personality(struct md_personality *p) |
6828 | { | 6829 | { |
6829 | printk(KERN_INFO "md: %s personality unregistered\n", p->name); | 6830 | printk(KERN_INFO "md: %s personality unregistered\n", p->name); |
6830 | spin_lock(&pers_lock); | 6831 | spin_lock(&pers_lock); |
@@ -6833,9 +6834,9 @@ int unregister_md_personality(struct mdk_personality *p) | |||
6833 | return 0; | 6834 | return 0; |
6834 | } | 6835 | } |
6835 | 6836 | ||
6836 | static int is_mddev_idle(mddev_t *mddev, int init) | 6837 | static int is_mddev_idle(struct mddev *mddev, int init) |
6837 | { | 6838 | { |
6838 | mdk_rdev_t * rdev; | 6839 | struct md_rdev * rdev; |
6839 | int idle; | 6840 | int idle; |
6840 | int curr_events; | 6841 | int curr_events; |
6841 | 6842 | ||
@@ -6877,7 +6878,7 @@ static int is_mddev_idle(mddev_t *mddev, int init) | |||
6877 | return idle; | 6878 | return idle; |
6878 | } | 6879 | } |
6879 | 6880 | ||
6880 | void md_done_sync(mddev_t *mddev, int blocks, int ok) | 6881 | void md_done_sync(struct mddev *mddev, int blocks, int ok) |
6881 | { | 6882 | { |
6882 | /* another "blocks" (512byte) blocks have been synced */ | 6883 | /* another "blocks" (512byte) blocks have been synced */ |
6883 | atomic_sub(blocks, &mddev->recovery_active); | 6884 | atomic_sub(blocks, &mddev->recovery_active); |
@@ -6895,7 +6896,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok) | |||
6895 | * in superblock) before writing, schedule a superblock update | 6896 | * in superblock) before writing, schedule a superblock update |
6896 | * and wait for it to complete. | 6897 | * and wait for it to complete. |
6897 | */ | 6898 | */ |
6898 | void md_write_start(mddev_t *mddev, struct bio *bi) | 6899 | void md_write_start(struct mddev *mddev, struct bio *bi) |
6899 | { | 6900 | { |
6900 | int did_change = 0; | 6901 | int did_change = 0; |
6901 | if (bio_data_dir(bi) != WRITE) | 6902 | if (bio_data_dir(bi) != WRITE) |
@@ -6930,7 +6931,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
6930 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | 6931 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); |
6931 | } | 6932 | } |
6932 | 6933 | ||
6933 | void md_write_end(mddev_t *mddev) | 6934 | void md_write_end(struct mddev *mddev) |
6934 | { | 6935 | { |
6935 | if (atomic_dec_and_test(&mddev->writes_pending)) { | 6936 | if (atomic_dec_and_test(&mddev->writes_pending)) { |
6936 | if (mddev->safemode == 2) | 6937 | if (mddev->safemode == 2) |
@@ -6949,7 +6950,7 @@ void md_write_end(mddev_t *mddev) | |||
6949 | * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock | 6950 | * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock |
6950 | * is dropped, so return -EAGAIN after notifying userspace. | 6951 | * is dropped, so return -EAGAIN after notifying userspace. |
6951 | */ | 6952 | */ |
6952 | int md_allow_write(mddev_t *mddev) | 6953 | int md_allow_write(struct mddev *mddev) |
6953 | { | 6954 | { |
6954 | if (!mddev->pers) | 6955 | if (!mddev->pers) |
6955 | return 0; | 6956 | return 0; |
@@ -6981,9 +6982,9 @@ EXPORT_SYMBOL_GPL(md_allow_write); | |||
6981 | 6982 | ||
6982 | #define SYNC_MARKS 10 | 6983 | #define SYNC_MARKS 10 |
6983 | #define SYNC_MARK_STEP (3*HZ) | 6984 | #define SYNC_MARK_STEP (3*HZ) |
6984 | void md_do_sync(mddev_t *mddev) | 6985 | void md_do_sync(struct mddev *mddev) |
6985 | { | 6986 | { |
6986 | mddev_t *mddev2; | 6987 | struct mddev *mddev2; |
6987 | unsigned int currspeed = 0, | 6988 | unsigned int currspeed = 0, |
6988 | window; | 6989 | window; |
6989 | sector_t max_sectors,j, io_sectors; | 6990 | sector_t max_sectors,j, io_sectors; |
@@ -6993,7 +6994,7 @@ void md_do_sync(mddev_t *mddev) | |||
6993 | struct list_head *tmp; | 6994 | struct list_head *tmp; |
6994 | sector_t last_check; | 6995 | sector_t last_check; |
6995 | int skipped = 0; | 6996 | int skipped = 0; |
6996 | mdk_rdev_t *rdev; | 6997 | struct md_rdev *rdev; |
6997 | char *desc; | 6998 | char *desc; |
6998 | 6999 | ||
6999 | /* just incase thread restarts... */ | 7000 | /* just incase thread restarts... */ |
@@ -7308,9 +7309,9 @@ void md_do_sync(mddev_t *mddev) | |||
7308 | } | 7309 | } |
7309 | EXPORT_SYMBOL_GPL(md_do_sync); | 7310 | EXPORT_SYMBOL_GPL(md_do_sync); |
7310 | 7311 | ||
7311 | static int remove_and_add_spares(mddev_t *mddev) | 7312 | static int remove_and_add_spares(struct mddev *mddev) |
7312 | { | 7313 | { |
7313 | mdk_rdev_t *rdev; | 7314 | struct md_rdev *rdev; |
7314 | int spares = 0; | 7315 | int spares = 0; |
7315 | 7316 | ||
7316 | mddev->curr_resync_completed = 0; | 7317 | mddev->curr_resync_completed = 0; |
@@ -7352,9 +7353,9 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
7352 | return spares; | 7353 | return spares; |
7353 | } | 7354 | } |
7354 | 7355 | ||
7355 | static void reap_sync_thread(mddev_t *mddev) | 7356 | static void reap_sync_thread(struct mddev *mddev) |
7356 | { | 7357 | { |
7357 | mdk_rdev_t *rdev; | 7358 | struct md_rdev *rdev; |
7358 | 7359 | ||
7359 | /* resync has finished, collect result */ | 7360 | /* resync has finished, collect result */ |
7360 | md_unregister_thread(&mddev->sync_thread); | 7361 | md_unregister_thread(&mddev->sync_thread); |
@@ -7369,15 +7370,19 @@ static void reap_sync_thread(mddev_t *mddev) | |||
7369 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | 7370 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && |
7370 | mddev->pers->finish_reshape) | 7371 | mddev->pers->finish_reshape) |
7371 | mddev->pers->finish_reshape(mddev); | 7372 | mddev->pers->finish_reshape(mddev); |
7372 | md_update_sb(mddev, 1); | ||
7373 | 7373 | ||
7374 | /* if array is no-longer degraded, then any saved_raid_disk | 7374 | /* If array is no-longer degraded, then any saved_raid_disk |
7375 | * information must be scrapped | 7375 | * information must be scrapped. Also if any device is now |
7376 | * In_sync we must scrape the saved_raid_disk for that device | ||
7377 | * do the superblock for an incrementally recovered device | ||
7378 | * written out. | ||
7376 | */ | 7379 | */ |
7377 | if (!mddev->degraded) | 7380 | list_for_each_entry(rdev, &mddev->disks, same_set) |
7378 | list_for_each_entry(rdev, &mddev->disks, same_set) | 7381 | if (!mddev->degraded || |
7382 | test_bit(In_sync, &rdev->flags)) | ||
7379 | rdev->saved_raid_disk = -1; | 7383 | rdev->saved_raid_disk = -1; |
7380 | 7384 | ||
7385 | md_update_sb(mddev, 1); | ||
7381 | clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 7386 | clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
7382 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 7387 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
7383 | clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 7388 | clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
@@ -7413,7 +7418,7 @@ static void reap_sync_thread(mddev_t *mddev) | |||
7413 | * 5/ If array is degraded, try to add spares devices | 7418 | * 5/ If array is degraded, try to add spares devices |
7414 | * 6/ If array has spares or is not in-sync, start a resync thread. | 7419 | * 6/ If array has spares or is not in-sync, start a resync thread. |
7415 | */ | 7420 | */ |
7416 | void md_check_recovery(mddev_t *mddev) | 7421 | void md_check_recovery(struct mddev *mddev) |
7417 | { | 7422 | { |
7418 | if (mddev->suspended) | 7423 | if (mddev->suspended) |
7419 | return; | 7424 | return; |
@@ -7449,7 +7454,7 @@ void md_check_recovery(mddev_t *mddev) | |||
7449 | /* Only thing we do on a ro array is remove | 7454 | /* Only thing we do on a ro array is remove |
7450 | * failed devices. | 7455 | * failed devices. |
7451 | */ | 7456 | */ |
7452 | mdk_rdev_t *rdev; | 7457 | struct md_rdev *rdev; |
7453 | list_for_each_entry(rdev, &mddev->disks, same_set) | 7458 | list_for_each_entry(rdev, &mddev->disks, same_set) |
7454 | if (rdev->raid_disk >= 0 && | 7459 | if (rdev->raid_disk >= 0 && |
7455 | !test_bit(Blocked, &rdev->flags) && | 7460 | !test_bit(Blocked, &rdev->flags) && |
@@ -7573,7 +7578,7 @@ void md_check_recovery(mddev_t *mddev) | |||
7573 | } | 7578 | } |
7574 | } | 7579 | } |
7575 | 7580 | ||
7576 | void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | 7581 | void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) |
7577 | { | 7582 | { |
7578 | sysfs_notify_dirent_safe(rdev->sysfs_state); | 7583 | sysfs_notify_dirent_safe(rdev->sysfs_state); |
7579 | wait_event_timeout(rdev->blocked_wait, | 7584 | wait_event_timeout(rdev->blocked_wait, |
@@ -7831,7 +7836,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, | |||
7831 | return rv; | 7836 | return rv; |
7832 | } | 7837 | } |
7833 | 7838 | ||
7834 | int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors, | 7839 | int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, |
7835 | int acknowledged) | 7840 | int acknowledged) |
7836 | { | 7841 | { |
7837 | int rv = md_set_badblocks(&rdev->badblocks, | 7842 | int rv = md_set_badblocks(&rdev->badblocks, |
@@ -7940,7 +7945,7 @@ out: | |||
7940 | return rv; | 7945 | return rv; |
7941 | } | 7946 | } |
7942 | 7947 | ||
7943 | int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors) | 7948 | int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors) |
7944 | { | 7949 | { |
7945 | return md_clear_badblocks(&rdev->badblocks, | 7950 | return md_clear_badblocks(&rdev->badblocks, |
7946 | s + rdev->data_offset, | 7951 | s + rdev->data_offset, |
@@ -8074,13 +8079,14 @@ static int md_notify_reboot(struct notifier_block *this, | |||
8074 | unsigned long code, void *x) | 8079 | unsigned long code, void *x) |
8075 | { | 8080 | { |
8076 | struct list_head *tmp; | 8081 | struct list_head *tmp; |
8077 | mddev_t *mddev; | 8082 | struct mddev *mddev; |
8083 | int need_delay = 0; | ||
8078 | 8084 | ||
8079 | if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { | 8085 | if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { |
8080 | 8086 | ||
8081 | printk(KERN_INFO "md: stopping all md devices.\n"); | 8087 | printk(KERN_INFO "md: stopping all md devices.\n"); |
8082 | 8088 | ||
8083 | for_each_mddev(mddev, tmp) | 8089 | for_each_mddev(mddev, tmp) { |
8084 | if (mddev_trylock(mddev)) { | 8090 | if (mddev_trylock(mddev)) { |
8085 | /* Force a switch to readonly even array | 8091 | /* Force a switch to readonly even array |
8086 | * appears to still be in use. Hence | 8092 | * appears to still be in use. Hence |
@@ -8089,13 +8095,16 @@ static int md_notify_reboot(struct notifier_block *this, | |||
8089 | md_set_readonly(mddev, 100); | 8095 | md_set_readonly(mddev, 100); |
8090 | mddev_unlock(mddev); | 8096 | mddev_unlock(mddev); |
8091 | } | 8097 | } |
8098 | need_delay = 1; | ||
8099 | } | ||
8092 | /* | 8100 | /* |
8093 | * certain more exotic SCSI devices are known to be | 8101 | * certain more exotic SCSI devices are known to be |
8094 | * volatile wrt too early system reboots. While the | 8102 | * volatile wrt too early system reboots. While the |
8095 | * right place to handle this issue is the given | 8103 | * right place to handle this issue is the given |
8096 | * driver, we do want to have a safe RAID driver ... | 8104 | * driver, we do want to have a safe RAID driver ... |
8097 | */ | 8105 | */ |
8098 | mdelay(1000*1); | 8106 | if (need_delay) |
8107 | mdelay(1000*1); | ||
8099 | } | 8108 | } |
8100 | return NOTIFY_DONE; | 8109 | return NOTIFY_DONE; |
8101 | } | 8110 | } |
@@ -8108,7 +8117,7 @@ static struct notifier_block md_notifier = { | |||
8108 | 8117 | ||
8109 | static void md_geninit(void) | 8118 | static void md_geninit(void) |
8110 | { | 8119 | { |
8111 | dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); | 8120 | pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); |
8112 | 8121 | ||
8113 | proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); | 8122 | proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); |
8114 | } | 8123 | } |
@@ -8183,7 +8192,7 @@ void md_autodetect_dev(dev_t dev) | |||
8183 | 8192 | ||
8184 | static void autostart_arrays(int part) | 8193 | static void autostart_arrays(int part) |
8185 | { | 8194 | { |
8186 | mdk_rdev_t *rdev; | 8195 | struct md_rdev *rdev; |
8187 | struct detected_devices_node *node_detected_dev; | 8196 | struct detected_devices_node *node_detected_dev; |
8188 | dev_t dev; | 8197 | dev_t dev; |
8189 | int i_scanned, i_passed; | 8198 | int i_scanned, i_passed; |
@@ -8223,7 +8232,7 @@ static void autostart_arrays(int part) | |||
8223 | 8232 | ||
8224 | static __exit void md_exit(void) | 8233 | static __exit void md_exit(void) |
8225 | { | 8234 | { |
8226 | mddev_t *mddev; | 8235 | struct mddev *mddev; |
8227 | struct list_head *tmp; | 8236 | struct list_head *tmp; |
8228 | 8237 | ||
8229 | blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); | 8238 | blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 0a309dc29b45..51c1d91557e0 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | md_k.h : kernel internal structure of the Linux MD driver | 2 | md.h : kernel internal structure of the Linux MD driver |
3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman | 3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify | 5 | This program is free software; you can redistribute it and/or modify |
@@ -26,9 +26,6 @@ | |||
26 | 26 | ||
27 | #define MaxSector (~(sector_t)0) | 27 | #define MaxSector (~(sector_t)0) |
28 | 28 | ||
29 | typedef struct mddev_s mddev_t; | ||
30 | typedef struct mdk_rdev_s mdk_rdev_t; | ||
31 | |||
32 | /* Bad block numbers are stored sorted in a single page. | 29 | /* Bad block numbers are stored sorted in a single page. |
33 | * 64bits is used for each block or extent. | 30 | * 64bits is used for each block or extent. |
34 | * 54 bits are sector number, 9 bits are extent size, | 31 | * 54 bits are sector number, 9 bits are extent size, |
@@ -39,12 +36,11 @@ typedef struct mdk_rdev_s mdk_rdev_t; | |||
39 | /* | 36 | /* |
40 | * MD's 'extended' device | 37 | * MD's 'extended' device |
41 | */ | 38 | */ |
42 | struct mdk_rdev_s | 39 | struct md_rdev { |
43 | { | ||
44 | struct list_head same_set; /* RAID devices within the same set */ | 40 | struct list_head same_set; /* RAID devices within the same set */ |
45 | 41 | ||
46 | sector_t sectors; /* Device size (in 512bytes sectors) */ | 42 | sector_t sectors; /* Device size (in 512bytes sectors) */ |
47 | mddev_t *mddev; /* RAID array if running */ | 43 | struct mddev *mddev; /* RAID array if running */ |
48 | int last_events; /* IO event timestamp */ | 44 | int last_events; /* IO event timestamp */ |
49 | 45 | ||
50 | /* | 46 | /* |
@@ -168,7 +164,7 @@ struct mdk_rdev_s | |||
168 | 164 | ||
169 | extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, | 165 | extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, |
170 | sector_t *first_bad, int *bad_sectors); | 166 | sector_t *first_bad, int *bad_sectors); |
171 | static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors, | 167 | static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, |
172 | sector_t *first_bad, int *bad_sectors) | 168 | sector_t *first_bad, int *bad_sectors) |
173 | { | 169 | { |
174 | if (unlikely(rdev->badblocks.count)) { | 170 | if (unlikely(rdev->badblocks.count)) { |
@@ -181,15 +177,14 @@ static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors, | |||
181 | } | 177 | } |
182 | return 0; | 178 | return 0; |
183 | } | 179 | } |
184 | extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors, | 180 | extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, |
185 | int acknowledged); | 181 | int acknowledged); |
186 | extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors); | 182 | extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors); |
187 | extern void md_ack_all_badblocks(struct badblocks *bb); | 183 | extern void md_ack_all_badblocks(struct badblocks *bb); |
188 | 184 | ||
189 | struct mddev_s | 185 | struct mddev { |
190 | { | ||
191 | void *private; | 186 | void *private; |
192 | struct mdk_personality *pers; | 187 | struct md_personality *pers; |
193 | dev_t unit; | 188 | dev_t unit; |
194 | int md_minor; | 189 | int md_minor; |
195 | struct list_head disks; | 190 | struct list_head disks; |
@@ -256,8 +251,8 @@ struct mddev_s | |||
256 | atomic_t plug_cnt; /* If device is expecting | 251 | atomic_t plug_cnt; /* If device is expecting |
257 | * more bios soon. | 252 | * more bios soon. |
258 | */ | 253 | */ |
259 | struct mdk_thread_s *thread; /* management thread */ | 254 | struct md_thread *thread; /* management thread */ |
260 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ | 255 | struct md_thread *sync_thread; /* doing resync or reconstruct */ |
261 | sector_t curr_resync; /* last block scheduled */ | 256 | sector_t curr_resync; /* last block scheduled */ |
262 | /* As resync requests can complete out of order, we cannot easily track | 257 | /* As resync requests can complete out of order, we cannot easily track |
263 | * how much resync has been completed. So we occasionally pause until | 258 | * how much resync has been completed. So we occasionally pause until |
@@ -402,11 +397,11 @@ struct mddev_s | |||
402 | atomic_t flush_pending; | 397 | atomic_t flush_pending; |
403 | struct work_struct flush_work; | 398 | struct work_struct flush_work; |
404 | struct work_struct event_work; /* used by dm to report failure event */ | 399 | struct work_struct event_work; /* used by dm to report failure event */ |
405 | void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); | 400 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); |
406 | }; | 401 | }; |
407 | 402 | ||
408 | 403 | ||
409 | static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) | 404 | static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) |
410 | { | 405 | { |
411 | int faulty = test_bit(Faulty, &rdev->flags); | 406 | int faulty = test_bit(Faulty, &rdev->flags); |
412 | if (atomic_dec_and_test(&rdev->nr_pending) && faulty) | 407 | if (atomic_dec_and_test(&rdev->nr_pending) && faulty) |
@@ -418,35 +413,35 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect | |||
418 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); | 413 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); |
419 | } | 414 | } |
420 | 415 | ||
421 | struct mdk_personality | 416 | struct md_personality |
422 | { | 417 | { |
423 | char *name; | 418 | char *name; |
424 | int level; | 419 | int level; |
425 | struct list_head list; | 420 | struct list_head list; |
426 | struct module *owner; | 421 | struct module *owner; |
427 | int (*make_request)(mddev_t *mddev, struct bio *bio); | 422 | int (*make_request)(struct mddev *mddev, struct bio *bio); |
428 | int (*run)(mddev_t *mddev); | 423 | int (*run)(struct mddev *mddev); |
429 | int (*stop)(mddev_t *mddev); | 424 | int (*stop)(struct mddev *mddev); |
430 | void (*status)(struct seq_file *seq, mddev_t *mddev); | 425 | void (*status)(struct seq_file *seq, struct mddev *mddev); |
431 | /* error_handler must set ->faulty and clear ->in_sync | 426 | /* error_handler must set ->faulty and clear ->in_sync |
432 | * if appropriate, and should abort recovery if needed | 427 | * if appropriate, and should abort recovery if needed |
433 | */ | 428 | */ |
434 | void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); | 429 | void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); |
435 | int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); | 430 | int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); |
436 | int (*hot_remove_disk) (mddev_t *mddev, int number); | 431 | int (*hot_remove_disk) (struct mddev *mddev, int number); |
437 | int (*spare_active) (mddev_t *mddev); | 432 | int (*spare_active) (struct mddev *mddev); |
438 | sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); | 433 | sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster); |
439 | int (*resize) (mddev_t *mddev, sector_t sectors); | 434 | int (*resize) (struct mddev *mddev, sector_t sectors); |
440 | sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks); | 435 | sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); |
441 | int (*check_reshape) (mddev_t *mddev); | 436 | int (*check_reshape) (struct mddev *mddev); |
442 | int (*start_reshape) (mddev_t *mddev); | 437 | int (*start_reshape) (struct mddev *mddev); |
443 | void (*finish_reshape) (mddev_t *mddev); | 438 | void (*finish_reshape) (struct mddev *mddev); |
444 | /* quiesce moves between quiescence states | 439 | /* quiesce moves between quiescence states |
445 | * 0 - fully active | 440 | * 0 - fully active |
446 | * 1 - no new requests allowed | 441 | * 1 - no new requests allowed |
447 | * others - reserved | 442 | * others - reserved |
448 | */ | 443 | */ |
449 | void (*quiesce) (mddev_t *mddev, int state); | 444 | void (*quiesce) (struct mddev *mddev, int state); |
450 | /* takeover is used to transition an array from one | 445 | /* takeover is used to transition an array from one |
451 | * personality to another. The new personality must be able | 446 | * personality to another. The new personality must be able |
452 | * to handle the data in the current layout. | 447 | * to handle the data in the current layout. |
@@ -456,14 +451,14 @@ struct mdk_personality | |||
456 | * This needs to be installed and then ->run used to activate the | 451 | * This needs to be installed and then ->run used to activate the |
457 | * array. | 452 | * array. |
458 | */ | 453 | */ |
459 | void *(*takeover) (mddev_t *mddev); | 454 | void *(*takeover) (struct mddev *mddev); |
460 | }; | 455 | }; |
461 | 456 | ||
462 | 457 | ||
463 | struct md_sysfs_entry { | 458 | struct md_sysfs_entry { |
464 | struct attribute attr; | 459 | struct attribute attr; |
465 | ssize_t (*show)(mddev_t *, char *); | 460 | ssize_t (*show)(struct mddev *, char *); |
466 | ssize_t (*store)(mddev_t *, const char *, size_t); | 461 | ssize_t (*store)(struct mddev *, const char *, size_t); |
467 | }; | 462 | }; |
468 | extern struct attribute_group md_bitmap_group; | 463 | extern struct attribute_group md_bitmap_group; |
469 | 464 | ||
@@ -479,19 +474,19 @@ static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd) | |||
479 | sysfs_notify_dirent(sd); | 474 | sysfs_notify_dirent(sd); |
480 | } | 475 | } |
481 | 476 | ||
482 | static inline char * mdname (mddev_t * mddev) | 477 | static inline char * mdname (struct mddev * mddev) |
483 | { | 478 | { |
484 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; | 479 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; |
485 | } | 480 | } |
486 | 481 | ||
487 | static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev) | 482 | static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) |
488 | { | 483 | { |
489 | char nm[20]; | 484 | char nm[20]; |
490 | sprintf(nm, "rd%d", rdev->raid_disk); | 485 | sprintf(nm, "rd%d", rdev->raid_disk); |
491 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); | 486 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); |
492 | } | 487 | } |
493 | 488 | ||
494 | static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev) | 489 | static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) |
495 | { | 490 | { |
496 | char nm[20]; | 491 | char nm[20]; |
497 | sprintf(nm, "rd%d", rdev->raid_disk); | 492 | sprintf(nm, "rd%d", rdev->raid_disk); |
@@ -514,14 +509,14 @@ static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev) | |||
514 | #define rdev_for_each_rcu(rdev, mddev) \ | 509 | #define rdev_for_each_rcu(rdev, mddev) \ |
515 | list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) | 510 | list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) |
516 | 511 | ||
517 | typedef struct mdk_thread_s { | 512 | struct md_thread { |
518 | void (*run) (mddev_t *mddev); | 513 | void (*run) (struct mddev *mddev); |
519 | mddev_t *mddev; | 514 | struct mddev *mddev; |
520 | wait_queue_head_t wqueue; | 515 | wait_queue_head_t wqueue; |
521 | unsigned long flags; | 516 | unsigned long flags; |
522 | struct task_struct *tsk; | 517 | struct task_struct *tsk; |
523 | unsigned long timeout; | 518 | unsigned long timeout; |
524 | } mdk_thread_t; | 519 | }; |
525 | 520 | ||
526 | #define THREAD_WAKEUP 0 | 521 | #define THREAD_WAKEUP 0 |
527 | 522 | ||
@@ -556,48 +551,50 @@ static inline void safe_put_page(struct page *p) | |||
556 | if (p) put_page(p); | 551 | if (p) put_page(p); |
557 | } | 552 | } |
558 | 553 | ||
559 | extern int register_md_personality(struct mdk_personality *p); | 554 | extern int register_md_personality(struct md_personality *p); |
560 | extern int unregister_md_personality(struct mdk_personality *p); | 555 | extern int unregister_md_personality(struct md_personality *p); |
561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), | 556 | extern struct md_thread *md_register_thread( |
562 | mddev_t *mddev, const char *name); | 557 | void (*run)(struct mddev *mddev), |
563 | extern void md_unregister_thread(mdk_thread_t **threadp); | 558 | struct mddev *mddev, |
564 | extern void md_wakeup_thread(mdk_thread_t *thread); | 559 | const char *name); |
565 | extern void md_check_recovery(mddev_t *mddev); | 560 | extern void md_unregister_thread(struct md_thread **threadp); |
566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); | 561 | extern void md_wakeup_thread(struct md_thread *thread); |
567 | extern void md_write_end(mddev_t *mddev); | 562 | extern void md_check_recovery(struct mddev *mddev); |
568 | extern void md_done_sync(mddev_t *mddev, int blocks, int ok); | 563 | extern void md_write_start(struct mddev *mddev, struct bio *bi); |
569 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); | 564 | extern void md_write_end(struct mddev *mddev); |
570 | 565 | extern void md_done_sync(struct mddev *mddev, int blocks, int ok); | |
571 | extern int mddev_congested(mddev_t *mddev, int bits); | 566 | extern void md_error(struct mddev *mddev, struct md_rdev *rdev); |
572 | extern void md_flush_request(mddev_t *mddev, struct bio *bio); | 567 | |
573 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | 568 | extern int mddev_congested(struct mddev *mddev, int bits); |
569 | extern void md_flush_request(struct mddev *mddev, struct bio *bio); | ||
570 | extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | ||
574 | sector_t sector, int size, struct page *page); | 571 | sector_t sector, int size, struct page *page); |
575 | extern void md_super_wait(mddev_t *mddev); | 572 | extern void md_super_wait(struct mddev *mddev); |
576 | extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, | 573 | extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, |
577 | struct page *page, int rw, bool metadata_op); | 574 | struct page *page, int rw, bool metadata_op); |
578 | extern void md_do_sync(mddev_t *mddev); | 575 | extern void md_do_sync(struct mddev *mddev); |
579 | extern void md_new_event(mddev_t *mddev); | 576 | extern void md_new_event(struct mddev *mddev); |
580 | extern int md_allow_write(mddev_t *mddev); | 577 | extern int md_allow_write(struct mddev *mddev); |
581 | extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | 578 | extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); |
582 | extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); | 579 | extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); |
583 | extern int md_check_no_bitmap(mddev_t *mddev); | 580 | extern int md_check_no_bitmap(struct mddev *mddev); |
584 | extern int md_integrity_register(mddev_t *mddev); | 581 | extern int md_integrity_register(struct mddev *mddev); |
585 | extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | 582 | extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); |
586 | extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); | 583 | extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); |
587 | extern void restore_bitmap_write_access(struct file *file); | 584 | extern void restore_bitmap_write_access(struct file *file); |
588 | 585 | ||
589 | extern void mddev_init(mddev_t *mddev); | 586 | extern void mddev_init(struct mddev *mddev); |
590 | extern int md_run(mddev_t *mddev); | 587 | extern int md_run(struct mddev *mddev); |
591 | extern void md_stop(mddev_t *mddev); | 588 | extern void md_stop(struct mddev *mddev); |
592 | extern void md_stop_writes(mddev_t *mddev); | 589 | extern void md_stop_writes(struct mddev *mddev); |
593 | extern int md_rdev_init(mdk_rdev_t *rdev); | 590 | extern int md_rdev_init(struct md_rdev *rdev); |
594 | 591 | ||
595 | extern void mddev_suspend(mddev_t *mddev); | 592 | extern void mddev_suspend(struct mddev *mddev); |
596 | extern void mddev_resume(mddev_t *mddev); | 593 | extern void mddev_resume(struct mddev *mddev); |
597 | extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, | 594 | extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, |
598 | mddev_t *mddev); | 595 | struct mddev *mddev); |
599 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 596 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
600 | mddev_t *mddev); | 597 | struct mddev *mddev); |
601 | extern int mddev_check_plugged(mddev_t *mddev); | 598 | extern int mddev_check_plugged(struct mddev *mddev); |
602 | extern void md_trim_bio(struct bio *bio, int offset, int size); | 599 | extern void md_trim_bio(struct bio *bio, int offset, int size); |
603 | #endif /* _MD_MD_H */ | 600 | #endif /* _MD_MD_H */ |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d5b5fb300171..d32c785e17d4 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #define NR_RESERVED_BUFS 32 | 31 | #define NR_RESERVED_BUFS 32 |
32 | 32 | ||
33 | 33 | ||
34 | static int multipath_map (multipath_conf_t *conf) | 34 | static int multipath_map (struct mpconf *conf) |
35 | { | 35 | { |
36 | int i, disks = conf->raid_disks; | 36 | int i, disks = conf->raid_disks; |
37 | 37 | ||
@@ -42,7 +42,7 @@ static int multipath_map (multipath_conf_t *conf) | |||
42 | 42 | ||
43 | rcu_read_lock(); | 43 | rcu_read_lock(); |
44 | for (i = 0; i < disks; i++) { | 44 | for (i = 0; i < disks; i++) { |
45 | mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); | 45 | struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); |
46 | if (rdev && test_bit(In_sync, &rdev->flags)) { | 46 | if (rdev && test_bit(In_sync, &rdev->flags)) { |
47 | atomic_inc(&rdev->nr_pending); | 47 | atomic_inc(&rdev->nr_pending); |
48 | rcu_read_unlock(); | 48 | rcu_read_unlock(); |
@@ -58,8 +58,8 @@ static int multipath_map (multipath_conf_t *conf) | |||
58 | static void multipath_reschedule_retry (struct multipath_bh *mp_bh) | 58 | static void multipath_reschedule_retry (struct multipath_bh *mp_bh) |
59 | { | 59 | { |
60 | unsigned long flags; | 60 | unsigned long flags; |
61 | mddev_t *mddev = mp_bh->mddev; | 61 | struct mddev *mddev = mp_bh->mddev; |
62 | multipath_conf_t *conf = mddev->private; | 62 | struct mpconf *conf = mddev->private; |
63 | 63 | ||
64 | spin_lock_irqsave(&conf->device_lock, flags); | 64 | spin_lock_irqsave(&conf->device_lock, flags); |
65 | list_add(&mp_bh->retry_list, &conf->retry_list); | 65 | list_add(&mp_bh->retry_list, &conf->retry_list); |
@@ -76,7 +76,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) | |||
76 | static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) | 76 | static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) |
77 | { | 77 | { |
78 | struct bio *bio = mp_bh->master_bio; | 78 | struct bio *bio = mp_bh->master_bio; |
79 | multipath_conf_t *conf = mp_bh->mddev->private; | 79 | struct mpconf *conf = mp_bh->mddev->private; |
80 | 80 | ||
81 | bio_endio(bio, err); | 81 | bio_endio(bio, err); |
82 | mempool_free(mp_bh, conf->pool); | 82 | mempool_free(mp_bh, conf->pool); |
@@ -86,8 +86,8 @@ static void multipath_end_request(struct bio *bio, int error) | |||
86 | { | 86 | { |
87 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 87 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
88 | struct multipath_bh *mp_bh = bio->bi_private; | 88 | struct multipath_bh *mp_bh = bio->bi_private; |
89 | multipath_conf_t *conf = mp_bh->mddev->private; | 89 | struct mpconf *conf = mp_bh->mddev->private; |
90 | mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; | 90 | struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; |
91 | 91 | ||
92 | if (uptodate) | 92 | if (uptodate) |
93 | multipath_end_bh_io(mp_bh, 0); | 93 | multipath_end_bh_io(mp_bh, 0); |
@@ -106,9 +106,9 @@ static void multipath_end_request(struct bio *bio, int error) | |||
106 | rdev_dec_pending(rdev, conf->mddev); | 106 | rdev_dec_pending(rdev, conf->mddev); |
107 | } | 107 | } |
108 | 108 | ||
109 | static int multipath_make_request(mddev_t *mddev, struct bio * bio) | 109 | static int multipath_make_request(struct mddev *mddev, struct bio * bio) |
110 | { | 110 | { |
111 | multipath_conf_t *conf = mddev->private; | 111 | struct mpconf *conf = mddev->private; |
112 | struct multipath_bh * mp_bh; | 112 | struct multipath_bh * mp_bh; |
113 | struct multipath_info *multipath; | 113 | struct multipath_info *multipath; |
114 | 114 | ||
@@ -140,9 +140,9 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) | |||
140 | return 0; | 140 | return 0; |
141 | } | 141 | } |
142 | 142 | ||
143 | static void multipath_status (struct seq_file *seq, mddev_t *mddev) | 143 | static void multipath_status (struct seq_file *seq, struct mddev *mddev) |
144 | { | 144 | { |
145 | multipath_conf_t *conf = mddev->private; | 145 | struct mpconf *conf = mddev->private; |
146 | int i; | 146 | int i; |
147 | 147 | ||
148 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, | 148 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, |
@@ -156,8 +156,8 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) | |||
156 | 156 | ||
157 | static int multipath_congested(void *data, int bits) | 157 | static int multipath_congested(void *data, int bits) |
158 | { | 158 | { |
159 | mddev_t *mddev = data; | 159 | struct mddev *mddev = data; |
160 | multipath_conf_t *conf = mddev->private; | 160 | struct mpconf *conf = mddev->private; |
161 | int i, ret = 0; | 161 | int i, ret = 0; |
162 | 162 | ||
163 | if (mddev_congested(mddev, bits)) | 163 | if (mddev_congested(mddev, bits)) |
@@ -165,7 +165,7 @@ static int multipath_congested(void *data, int bits) | |||
165 | 165 | ||
166 | rcu_read_lock(); | 166 | rcu_read_lock(); |
167 | for (i = 0; i < mddev->raid_disks ; i++) { | 167 | for (i = 0; i < mddev->raid_disks ; i++) { |
168 | mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); | 168 | struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); |
169 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | 169 | if (rdev && !test_bit(Faulty, &rdev->flags)) { |
170 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 170 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
171 | 171 | ||
@@ -183,9 +183,9 @@ static int multipath_congested(void *data, int bits) | |||
183 | /* | 183 | /* |
184 | * Careful, this can execute in IRQ contexts as well! | 184 | * Careful, this can execute in IRQ contexts as well! |
185 | */ | 185 | */ |
186 | static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) | 186 | static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) |
187 | { | 187 | { |
188 | multipath_conf_t *conf = mddev->private; | 188 | struct mpconf *conf = mddev->private; |
189 | char b[BDEVNAME_SIZE]; | 189 | char b[BDEVNAME_SIZE]; |
190 | 190 | ||
191 | if (conf->raid_disks - mddev->degraded <= 1) { | 191 | if (conf->raid_disks - mddev->degraded <= 1) { |
@@ -218,7 +218,7 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) | |||
218 | conf->raid_disks - mddev->degraded); | 218 | conf->raid_disks - mddev->degraded); |
219 | } | 219 | } |
220 | 220 | ||
221 | static void print_multipath_conf (multipath_conf_t *conf) | 221 | static void print_multipath_conf (struct mpconf *conf) |
222 | { | 222 | { |
223 | int i; | 223 | int i; |
224 | struct multipath_info *tmp; | 224 | struct multipath_info *tmp; |
@@ -242,9 +242,9 @@ static void print_multipath_conf (multipath_conf_t *conf) | |||
242 | } | 242 | } |
243 | 243 | ||
244 | 244 | ||
245 | static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | 245 | static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
246 | { | 246 | { |
247 | multipath_conf_t *conf = mddev->private; | 247 | struct mpconf *conf = mddev->private; |
248 | struct request_queue *q; | 248 | struct request_queue *q; |
249 | int err = -EEXIST; | 249 | int err = -EEXIST; |
250 | int path; | 250 | int path; |
@@ -291,11 +291,11 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
291 | return err; | 291 | return err; |
292 | } | 292 | } |
293 | 293 | ||
294 | static int multipath_remove_disk(mddev_t *mddev, int number) | 294 | static int multipath_remove_disk(struct mddev *mddev, int number) |
295 | { | 295 | { |
296 | multipath_conf_t *conf = mddev->private; | 296 | struct mpconf *conf = mddev->private; |
297 | int err = 0; | 297 | int err = 0; |
298 | mdk_rdev_t *rdev; | 298 | struct md_rdev *rdev; |
299 | struct multipath_info *p = conf->multipaths + number; | 299 | struct multipath_info *p = conf->multipaths + number; |
300 | 300 | ||
301 | print_multipath_conf(conf); | 301 | print_multipath_conf(conf); |
@@ -335,12 +335,12 @@ abort: | |||
335 | * 3. Performs writes following reads for array syncronising. | 335 | * 3. Performs writes following reads for array syncronising. |
336 | */ | 336 | */ |
337 | 337 | ||
338 | static void multipathd (mddev_t *mddev) | 338 | static void multipathd (struct mddev *mddev) |
339 | { | 339 | { |
340 | struct multipath_bh *mp_bh; | 340 | struct multipath_bh *mp_bh; |
341 | struct bio *bio; | 341 | struct bio *bio; |
342 | unsigned long flags; | 342 | unsigned long flags; |
343 | multipath_conf_t *conf = mddev->private; | 343 | struct mpconf *conf = mddev->private; |
344 | struct list_head *head = &conf->retry_list; | 344 | struct list_head *head = &conf->retry_list; |
345 | 345 | ||
346 | md_check_recovery(mddev); | 346 | md_check_recovery(mddev); |
@@ -379,7 +379,7 @@ static void multipathd (mddev_t *mddev) | |||
379 | spin_unlock_irqrestore(&conf->device_lock, flags); | 379 | spin_unlock_irqrestore(&conf->device_lock, flags); |
380 | } | 380 | } |
381 | 381 | ||
382 | static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 382 | static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
383 | { | 383 | { |
384 | WARN_ONCE(sectors || raid_disks, | 384 | WARN_ONCE(sectors || raid_disks, |
385 | "%s does not support generic reshape\n", __func__); | 385 | "%s does not support generic reshape\n", __func__); |
@@ -387,12 +387,12 @@ static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
387 | return mddev->dev_sectors; | 387 | return mddev->dev_sectors; |
388 | } | 388 | } |
389 | 389 | ||
390 | static int multipath_run (mddev_t *mddev) | 390 | static int multipath_run (struct mddev *mddev) |
391 | { | 391 | { |
392 | multipath_conf_t *conf; | 392 | struct mpconf *conf; |
393 | int disk_idx; | 393 | int disk_idx; |
394 | struct multipath_info *disk; | 394 | struct multipath_info *disk; |
395 | mdk_rdev_t *rdev; | 395 | struct md_rdev *rdev; |
396 | int working_disks; | 396 | int working_disks; |
397 | 397 | ||
398 | if (md_check_no_bitmap(mddev)) | 398 | if (md_check_no_bitmap(mddev)) |
@@ -409,7 +409,7 @@ static int multipath_run (mddev_t *mddev) | |||
409 | * should be freed in multipath_stop()] | 409 | * should be freed in multipath_stop()] |
410 | */ | 410 | */ |
411 | 411 | ||
412 | conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); | 412 | conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL); |
413 | mddev->private = conf; | 413 | mddev->private = conf; |
414 | if (!conf) { | 414 | if (!conf) { |
415 | printk(KERN_ERR | 415 | printk(KERN_ERR |
@@ -510,9 +510,9 @@ out: | |||
510 | } | 510 | } |
511 | 511 | ||
512 | 512 | ||
513 | static int multipath_stop (mddev_t *mddev) | 513 | static int multipath_stop (struct mddev *mddev) |
514 | { | 514 | { |
515 | multipath_conf_t *conf = mddev->private; | 515 | struct mpconf *conf = mddev->private; |
516 | 516 | ||
517 | md_unregister_thread(&mddev->thread); | 517 | md_unregister_thread(&mddev->thread); |
518 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 518 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
@@ -523,7 +523,7 @@ static int multipath_stop (mddev_t *mddev) | |||
523 | return 0; | 523 | return 0; |
524 | } | 524 | } |
525 | 525 | ||
526 | static struct mdk_personality multipath_personality = | 526 | static struct md_personality multipath_personality = |
527 | { | 527 | { |
528 | .name = "multipath", | 528 | .name = "multipath", |
529 | .level = LEVEL_MULTIPATH, | 529 | .level = LEVEL_MULTIPATH, |
diff --git a/drivers/md/multipath.h b/drivers/md/multipath.h index 3c5a45eb5f8a..717c60f62898 100644 --- a/drivers/md/multipath.h +++ b/drivers/md/multipath.h | |||
@@ -2,11 +2,11 @@ | |||
2 | #define _MULTIPATH_H | 2 | #define _MULTIPATH_H |
3 | 3 | ||
4 | struct multipath_info { | 4 | struct multipath_info { |
5 | mdk_rdev_t *rdev; | 5 | struct md_rdev *rdev; |
6 | }; | 6 | }; |
7 | 7 | ||
8 | struct multipath_private_data { | 8 | struct mpconf { |
9 | mddev_t *mddev; | 9 | struct mddev *mddev; |
10 | struct multipath_info *multipaths; | 10 | struct multipath_info *multipaths; |
11 | int raid_disks; | 11 | int raid_disks; |
12 | spinlock_t device_lock; | 12 | spinlock_t device_lock; |
@@ -15,8 +15,6 @@ struct multipath_private_data { | |||
15 | mempool_t *pool; | 15 | mempool_t *pool; |
16 | }; | 16 | }; |
17 | 17 | ||
18 | typedef struct multipath_private_data multipath_conf_t; | ||
19 | |||
20 | /* | 18 | /* |
21 | * this is our 'private' 'collective' MULTIPATH buffer head. | 19 | * this is our 'private' 'collective' MULTIPATH buffer head. |
22 | * it contains information about what kind of IO operations were started | 20 | * it contains information about what kind of IO operations were started |
@@ -24,7 +22,7 @@ typedef struct multipath_private_data multipath_conf_t; | |||
24 | */ | 22 | */ |
25 | 23 | ||
26 | struct multipath_bh { | 24 | struct multipath_bh { |
27 | mddev_t *mddev; | 25 | struct mddev *mddev; |
28 | struct bio *master_bio; | 26 | struct bio *master_bio; |
29 | struct bio bio; | 27 | struct bio bio; |
30 | int path; | 28 | int path; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e86bf3682e1e..0eb08a4df759 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -27,9 +27,9 @@ | |||
27 | 27 | ||
28 | static int raid0_congested(void *data, int bits) | 28 | static int raid0_congested(void *data, int bits) |
29 | { | 29 | { |
30 | mddev_t *mddev = data; | 30 | struct mddev *mddev = data; |
31 | raid0_conf_t *conf = mddev->private; | 31 | struct r0conf *conf = mddev->private; |
32 | mdk_rdev_t **devlist = conf->devlist; | 32 | struct md_rdev **devlist = conf->devlist; |
33 | int raid_disks = conf->strip_zone[0].nb_dev; | 33 | int raid_disks = conf->strip_zone[0].nb_dev; |
34 | int i, ret = 0; | 34 | int i, ret = 0; |
35 | 35 | ||
@@ -47,52 +47,53 @@ static int raid0_congested(void *data, int bits) | |||
47 | /* | 47 | /* |
48 | * inform the user of the raid configuration | 48 | * inform the user of the raid configuration |
49 | */ | 49 | */ |
50 | static void dump_zones(mddev_t *mddev) | 50 | static void dump_zones(struct mddev *mddev) |
51 | { | 51 | { |
52 | int j, k, h; | 52 | int j, k; |
53 | sector_t zone_size = 0; | 53 | sector_t zone_size = 0; |
54 | sector_t zone_start = 0; | 54 | sector_t zone_start = 0; |
55 | char b[BDEVNAME_SIZE]; | 55 | char b[BDEVNAME_SIZE]; |
56 | raid0_conf_t *conf = mddev->private; | 56 | struct r0conf *conf = mddev->private; |
57 | int raid_disks = conf->strip_zone[0].nb_dev; | 57 | int raid_disks = conf->strip_zone[0].nb_dev; |
58 | printk(KERN_INFO "******* %s configuration *********\n", | 58 | printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n", |
59 | mdname(mddev)); | 59 | mdname(mddev), |
60 | h = 0; | 60 | conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); |
61 | for (j = 0; j < conf->nr_strip_zones; j++) { | 61 | for (j = 0; j < conf->nr_strip_zones; j++) { |
62 | printk(KERN_INFO "zone%d=[", j); | 62 | printk(KERN_INFO "md: zone%d=[", j); |
63 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) | 63 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) |
64 | printk(KERN_CONT "%s/", | 64 | printk(KERN_CONT "%s%s", k?"/":"", |
65 | bdevname(conf->devlist[j*raid_disks | 65 | bdevname(conf->devlist[j*raid_disks |
66 | + k]->bdev, b)); | 66 | + k]->bdev, b)); |
67 | printk(KERN_CONT "]\n"); | 67 | printk(KERN_CONT "]\n"); |
68 | 68 | ||
69 | zone_size = conf->strip_zone[j].zone_end - zone_start; | 69 | zone_size = conf->strip_zone[j].zone_end - zone_start; |
70 | printk(KERN_INFO " zone offset=%llukb " | 70 | printk(KERN_INFO " zone-offset=%10lluKB, " |
71 | "device offset=%llukb size=%llukb\n", | 71 | "device-offset=%10lluKB, size=%10lluKB\n", |
72 | (unsigned long long)zone_start>>1, | 72 | (unsigned long long)zone_start>>1, |
73 | (unsigned long long)conf->strip_zone[j].dev_start>>1, | 73 | (unsigned long long)conf->strip_zone[j].dev_start>>1, |
74 | (unsigned long long)zone_size>>1); | 74 | (unsigned long long)zone_size>>1); |
75 | zone_start = conf->strip_zone[j].zone_end; | 75 | zone_start = conf->strip_zone[j].zone_end; |
76 | } | 76 | } |
77 | printk(KERN_INFO "**********************************\n\n"); | 77 | printk(KERN_INFO "\n"); |
78 | } | 78 | } |
79 | 79 | ||
80 | static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | 80 | static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) |
81 | { | 81 | { |
82 | int i, c, err; | 82 | int i, c, err; |
83 | sector_t curr_zone_end, sectors; | 83 | sector_t curr_zone_end, sectors; |
84 | mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev; | 84 | struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev; |
85 | struct strip_zone *zone; | 85 | struct strip_zone *zone; |
86 | int cnt; | 86 | int cnt; |
87 | char b[BDEVNAME_SIZE]; | 87 | char b[BDEVNAME_SIZE]; |
88 | raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | 88 | char b2[BDEVNAME_SIZE]; |
89 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | ||
89 | 90 | ||
90 | if (!conf) | 91 | if (!conf) |
91 | return -ENOMEM; | 92 | return -ENOMEM; |
92 | list_for_each_entry(rdev1, &mddev->disks, same_set) { | 93 | list_for_each_entry(rdev1, &mddev->disks, same_set) { |
93 | printk(KERN_INFO "md/raid0:%s: looking at %s\n", | 94 | pr_debug("md/raid0:%s: looking at %s\n", |
94 | mdname(mddev), | 95 | mdname(mddev), |
95 | bdevname(rdev1->bdev, b)); | 96 | bdevname(rdev1->bdev, b)); |
96 | c = 0; | 97 | c = 0; |
97 | 98 | ||
98 | /* round size to chunk_size */ | 99 | /* round size to chunk_size */ |
@@ -101,16 +102,16 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
101 | rdev1->sectors = sectors * mddev->chunk_sectors; | 102 | rdev1->sectors = sectors * mddev->chunk_sectors; |
102 | 103 | ||
103 | list_for_each_entry(rdev2, &mddev->disks, same_set) { | 104 | list_for_each_entry(rdev2, &mddev->disks, same_set) { |
104 | printk(KERN_INFO "md/raid0:%s: comparing %s(%llu)", | 105 | pr_debug("md/raid0:%s: comparing %s(%llu)" |
105 | mdname(mddev), | 106 | " with %s(%llu)\n", |
106 | bdevname(rdev1->bdev,b), | 107 | mdname(mddev), |
107 | (unsigned long long)rdev1->sectors); | 108 | bdevname(rdev1->bdev,b), |
108 | printk(KERN_CONT " with %s(%llu)\n", | 109 | (unsigned long long)rdev1->sectors, |
109 | bdevname(rdev2->bdev,b), | 110 | bdevname(rdev2->bdev,b2), |
110 | (unsigned long long)rdev2->sectors); | 111 | (unsigned long long)rdev2->sectors); |
111 | if (rdev2 == rdev1) { | 112 | if (rdev2 == rdev1) { |
112 | printk(KERN_INFO "md/raid0:%s: END\n", | 113 | pr_debug("md/raid0:%s: END\n", |
113 | mdname(mddev)); | 114 | mdname(mddev)); |
114 | break; | 115 | break; |
115 | } | 116 | } |
116 | if (rdev2->sectors == rdev1->sectors) { | 117 | if (rdev2->sectors == rdev1->sectors) { |
@@ -118,30 +119,30 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
118 | * Not unique, don't count it as a new | 119 | * Not unique, don't count it as a new |
119 | * group | 120 | * group |
120 | */ | 121 | */ |
121 | printk(KERN_INFO "md/raid0:%s: EQUAL\n", | 122 | pr_debug("md/raid0:%s: EQUAL\n", |
122 | mdname(mddev)); | 123 | mdname(mddev)); |
123 | c = 1; | 124 | c = 1; |
124 | break; | 125 | break; |
125 | } | 126 | } |
126 | printk(KERN_INFO "md/raid0:%s: NOT EQUAL\n", | 127 | pr_debug("md/raid0:%s: NOT EQUAL\n", |
127 | mdname(mddev)); | 128 | mdname(mddev)); |
128 | } | 129 | } |
129 | if (!c) { | 130 | if (!c) { |
130 | printk(KERN_INFO "md/raid0:%s: ==> UNIQUE\n", | 131 | pr_debug("md/raid0:%s: ==> UNIQUE\n", |
131 | mdname(mddev)); | 132 | mdname(mddev)); |
132 | conf->nr_strip_zones++; | 133 | conf->nr_strip_zones++; |
133 | printk(KERN_INFO "md/raid0:%s: %d zones\n", | 134 | pr_debug("md/raid0:%s: %d zones\n", |
134 | mdname(mddev), conf->nr_strip_zones); | 135 | mdname(mddev), conf->nr_strip_zones); |
135 | } | 136 | } |
136 | } | 137 | } |
137 | printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n", | 138 | pr_debug("md/raid0:%s: FINAL %d zones\n", |
138 | mdname(mddev), conf->nr_strip_zones); | 139 | mdname(mddev), conf->nr_strip_zones); |
139 | err = -ENOMEM; | 140 | err = -ENOMEM; |
140 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* | 141 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* |
141 | conf->nr_strip_zones, GFP_KERNEL); | 142 | conf->nr_strip_zones, GFP_KERNEL); |
142 | if (!conf->strip_zone) | 143 | if (!conf->strip_zone) |
143 | goto abort; | 144 | goto abort; |
144 | conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* | 145 | conf->devlist = kzalloc(sizeof(struct md_rdev*)* |
145 | conf->nr_strip_zones*mddev->raid_disks, | 146 | conf->nr_strip_zones*mddev->raid_disks, |
146 | GFP_KERNEL); | 147 | GFP_KERNEL); |
147 | if (!conf->devlist) | 148 | if (!conf->devlist) |
@@ -218,44 +219,45 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
218 | zone = conf->strip_zone + i; | 219 | zone = conf->strip_zone + i; |
219 | dev = conf->devlist + i * mddev->raid_disks; | 220 | dev = conf->devlist + i * mddev->raid_disks; |
220 | 221 | ||
221 | printk(KERN_INFO "md/raid0:%s: zone %d\n", | 222 | pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i); |
222 | mdname(mddev), i); | ||
223 | zone->dev_start = smallest->sectors; | 223 | zone->dev_start = smallest->sectors; |
224 | smallest = NULL; | 224 | smallest = NULL; |
225 | c = 0; | 225 | c = 0; |
226 | 226 | ||
227 | for (j=0; j<cnt; j++) { | 227 | for (j=0; j<cnt; j++) { |
228 | rdev = conf->devlist[j]; | 228 | rdev = conf->devlist[j]; |
229 | printk(KERN_INFO "md/raid0:%s: checking %s ...", | ||
230 | mdname(mddev), | ||
231 | bdevname(rdev->bdev, b)); | ||
232 | if (rdev->sectors <= zone->dev_start) { | 229 | if (rdev->sectors <= zone->dev_start) { |
233 | printk(KERN_CONT " nope.\n"); | 230 | pr_debug("md/raid0:%s: checking %s ... nope\n", |
231 | mdname(mddev), | ||
232 | bdevname(rdev->bdev, b)); | ||
234 | continue; | 233 | continue; |
235 | } | 234 | } |
236 | printk(KERN_CONT " contained as device %d\n", c); | 235 | pr_debug("md/raid0:%s: checking %s ..." |
236 | " contained as device %d\n", | ||
237 | mdname(mddev), | ||
238 | bdevname(rdev->bdev, b), c); | ||
237 | dev[c] = rdev; | 239 | dev[c] = rdev; |
238 | c++; | 240 | c++; |
239 | if (!smallest || rdev->sectors < smallest->sectors) { | 241 | if (!smallest || rdev->sectors < smallest->sectors) { |
240 | smallest = rdev; | 242 | smallest = rdev; |
241 | printk(KERN_INFO "md/raid0:%s: (%llu) is smallest!.\n", | 243 | pr_debug("md/raid0:%s: (%llu) is smallest!.\n", |
242 | mdname(mddev), | 244 | mdname(mddev), |
243 | (unsigned long long)rdev->sectors); | 245 | (unsigned long long)rdev->sectors); |
244 | } | 246 | } |
245 | } | 247 | } |
246 | 248 | ||
247 | zone->nb_dev = c; | 249 | zone->nb_dev = c; |
248 | sectors = (smallest->sectors - zone->dev_start) * c; | 250 | sectors = (smallest->sectors - zone->dev_start) * c; |
249 | printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", | 251 | pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", |
250 | mdname(mddev), | 252 | mdname(mddev), |
251 | zone->nb_dev, (unsigned long long)sectors); | 253 | zone->nb_dev, (unsigned long long)sectors); |
252 | 254 | ||
253 | curr_zone_end += sectors; | 255 | curr_zone_end += sectors; |
254 | zone->zone_end = curr_zone_end; | 256 | zone->zone_end = curr_zone_end; |
255 | 257 | ||
256 | printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n", | 258 | pr_debug("md/raid0:%s: current zone start: %llu\n", |
257 | mdname(mddev), | 259 | mdname(mddev), |
258 | (unsigned long long)smallest->sectors); | 260 | (unsigned long long)smallest->sectors); |
259 | } | 261 | } |
260 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; | 262 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; |
261 | mddev->queue->backing_dev_info.congested_data = mddev; | 263 | mddev->queue->backing_dev_info.congested_data = mddev; |
@@ -275,7 +277,7 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
275 | blk_queue_io_opt(mddev->queue, | 277 | blk_queue_io_opt(mddev->queue, |
276 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | 278 | (mddev->chunk_sectors << 9) * mddev->raid_disks); |
277 | 279 | ||
278 | printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev)); | 280 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); |
279 | *private_conf = conf; | 281 | *private_conf = conf; |
280 | 282 | ||
281 | return 0; | 283 | return 0; |
@@ -299,7 +301,7 @@ static int raid0_mergeable_bvec(struct request_queue *q, | |||
299 | struct bvec_merge_data *bvm, | 301 | struct bvec_merge_data *bvm, |
300 | struct bio_vec *biovec) | 302 | struct bio_vec *biovec) |
301 | { | 303 | { |
302 | mddev_t *mddev = q->queuedata; | 304 | struct mddev *mddev = q->queuedata; |
303 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | 305 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
304 | int max; | 306 | int max; |
305 | unsigned int chunk_sectors = mddev->chunk_sectors; | 307 | unsigned int chunk_sectors = mddev->chunk_sectors; |
@@ -318,10 +320,10 @@ static int raid0_mergeable_bvec(struct request_queue *q, | |||
318 | return max; | 320 | return max; |
319 | } | 321 | } |
320 | 322 | ||
321 | static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 323 | static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
322 | { | 324 | { |
323 | sector_t array_sectors = 0; | 325 | sector_t array_sectors = 0; |
324 | mdk_rdev_t *rdev; | 326 | struct md_rdev *rdev; |
325 | 327 | ||
326 | WARN_ONCE(sectors || raid_disks, | 328 | WARN_ONCE(sectors || raid_disks, |
327 | "%s does not support generic reshape\n", __func__); | 329 | "%s does not support generic reshape\n", __func__); |
@@ -332,9 +334,9 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
332 | return array_sectors; | 334 | return array_sectors; |
333 | } | 335 | } |
334 | 336 | ||
335 | static int raid0_run(mddev_t *mddev) | 337 | static int raid0_run(struct mddev *mddev) |
336 | { | 338 | { |
337 | raid0_conf_t *conf; | 339 | struct r0conf *conf; |
338 | int ret; | 340 | int ret; |
339 | 341 | ||
340 | if (mddev->chunk_sectors == 0) { | 342 | if (mddev->chunk_sectors == 0) { |
@@ -382,9 +384,9 @@ static int raid0_run(mddev_t *mddev) | |||
382 | return md_integrity_register(mddev); | 384 | return md_integrity_register(mddev); |
383 | } | 385 | } |
384 | 386 | ||
385 | static int raid0_stop(mddev_t *mddev) | 387 | static int raid0_stop(struct mddev *mddev) |
386 | { | 388 | { |
387 | raid0_conf_t *conf = mddev->private; | 389 | struct r0conf *conf = mddev->private; |
388 | 390 | ||
389 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 391 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
390 | kfree(conf->strip_zone); | 392 | kfree(conf->strip_zone); |
@@ -397,7 +399,7 @@ static int raid0_stop(mddev_t *mddev) | |||
397 | /* Find the zone which holds a particular offset | 399 | /* Find the zone which holds a particular offset |
398 | * Update *sectorp to be an offset in that zone | 400 | * Update *sectorp to be an offset in that zone |
399 | */ | 401 | */ |
400 | static struct strip_zone *find_zone(struct raid0_private_data *conf, | 402 | static struct strip_zone *find_zone(struct r0conf *conf, |
401 | sector_t *sectorp) | 403 | sector_t *sectorp) |
402 | { | 404 | { |
403 | int i; | 405 | int i; |
@@ -417,12 +419,12 @@ static struct strip_zone *find_zone(struct raid0_private_data *conf, | |||
417 | * remaps the bio to the target device. we separate two flows. | 419 | * remaps the bio to the target device. we separate two flows. |
418 | * power 2 flow and a general flow for the sake of perfromance | 420 | * power 2 flow and a general flow for the sake of perfromance |
419 | */ | 421 | */ |
420 | static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, | 422 | static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, |
421 | sector_t sector, sector_t *sector_offset) | 423 | sector_t sector, sector_t *sector_offset) |
422 | { | 424 | { |
423 | unsigned int sect_in_chunk; | 425 | unsigned int sect_in_chunk; |
424 | sector_t chunk; | 426 | sector_t chunk; |
425 | raid0_conf_t *conf = mddev->private; | 427 | struct r0conf *conf = mddev->private; |
426 | int raid_disks = conf->strip_zone[0].nb_dev; | 428 | int raid_disks = conf->strip_zone[0].nb_dev; |
427 | unsigned int chunk_sects = mddev->chunk_sectors; | 429 | unsigned int chunk_sects = mddev->chunk_sectors; |
428 | 430 | ||
@@ -453,7 +455,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, | |||
453 | /* | 455 | /* |
454 | * Is io distribute over 1 or more chunks ? | 456 | * Is io distribute over 1 or more chunks ? |
455 | */ | 457 | */ |
456 | static inline int is_io_in_chunk_boundary(mddev_t *mddev, | 458 | static inline int is_io_in_chunk_boundary(struct mddev *mddev, |
457 | unsigned int chunk_sects, struct bio *bio) | 459 | unsigned int chunk_sects, struct bio *bio) |
458 | { | 460 | { |
459 | if (likely(is_power_of_2(chunk_sects))) { | 461 | if (likely(is_power_of_2(chunk_sects))) { |
@@ -466,12 +468,12 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev, | |||
466 | } | 468 | } |
467 | } | 469 | } |
468 | 470 | ||
469 | static int raid0_make_request(mddev_t *mddev, struct bio *bio) | 471 | static int raid0_make_request(struct mddev *mddev, struct bio *bio) |
470 | { | 472 | { |
471 | unsigned int chunk_sects; | 473 | unsigned int chunk_sects; |
472 | sector_t sector_offset; | 474 | sector_t sector_offset; |
473 | struct strip_zone *zone; | 475 | struct strip_zone *zone; |
474 | mdk_rdev_t *tmp_dev; | 476 | struct md_rdev *tmp_dev; |
475 | 477 | ||
476 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 478 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
477 | md_flush_request(mddev, bio); | 479 | md_flush_request(mddev, bio); |
@@ -526,43 +528,16 @@ bad_map: | |||
526 | return 0; | 528 | return 0; |
527 | } | 529 | } |
528 | 530 | ||
529 | static void raid0_status(struct seq_file *seq, mddev_t *mddev) | 531 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) |
530 | { | 532 | { |
531 | #undef MD_DEBUG | ||
532 | #ifdef MD_DEBUG | ||
533 | int j, k, h; | ||
534 | char b[BDEVNAME_SIZE]; | ||
535 | raid0_conf_t *conf = mddev->private; | ||
536 | int raid_disks = conf->strip_zone[0].nb_dev; | ||
537 | |||
538 | sector_t zone_size; | ||
539 | sector_t zone_start = 0; | ||
540 | h = 0; | ||
541 | |||
542 | for (j = 0; j < conf->nr_strip_zones; j++) { | ||
543 | seq_printf(seq, " z%d", j); | ||
544 | seq_printf(seq, "=["); | ||
545 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) | ||
546 | seq_printf(seq, "%s/", bdevname( | ||
547 | conf->devlist[j*raid_disks + k] | ||
548 | ->bdev, b)); | ||
549 | |||
550 | zone_size = conf->strip_zone[j].zone_end - zone_start; | ||
551 | seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n", | ||
552 | (unsigned long long)zone_start>>1, | ||
553 | (unsigned long long)conf->strip_zone[j].dev_start>>1, | ||
554 | (unsigned long long)zone_size>>1); | ||
555 | zone_start = conf->strip_zone[j].zone_end; | ||
556 | } | ||
557 | #endif | ||
558 | seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); | 533 | seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); |
559 | return; | 534 | return; |
560 | } | 535 | } |
561 | 536 | ||
562 | static void *raid0_takeover_raid45(mddev_t *mddev) | 537 | static void *raid0_takeover_raid45(struct mddev *mddev) |
563 | { | 538 | { |
564 | mdk_rdev_t *rdev; | 539 | struct md_rdev *rdev; |
565 | raid0_conf_t *priv_conf; | 540 | struct r0conf *priv_conf; |
566 | 541 | ||
567 | if (mddev->degraded != 1) { | 542 | if (mddev->degraded != 1) { |
568 | printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", | 543 | printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", |
@@ -593,9 +568,9 @@ static void *raid0_takeover_raid45(mddev_t *mddev) | |||
593 | return priv_conf; | 568 | return priv_conf; |
594 | } | 569 | } |
595 | 570 | ||
596 | static void *raid0_takeover_raid10(mddev_t *mddev) | 571 | static void *raid0_takeover_raid10(struct mddev *mddev) |
597 | { | 572 | { |
598 | raid0_conf_t *priv_conf; | 573 | struct r0conf *priv_conf; |
599 | 574 | ||
600 | /* Check layout: | 575 | /* Check layout: |
601 | * - far_copies must be 1 | 576 | * - far_copies must be 1 |
@@ -634,9 +609,9 @@ static void *raid0_takeover_raid10(mddev_t *mddev) | |||
634 | return priv_conf; | 609 | return priv_conf; |
635 | } | 610 | } |
636 | 611 | ||
637 | static void *raid0_takeover_raid1(mddev_t *mddev) | 612 | static void *raid0_takeover_raid1(struct mddev *mddev) |
638 | { | 613 | { |
639 | raid0_conf_t *priv_conf; | 614 | struct r0conf *priv_conf; |
640 | 615 | ||
641 | /* Check layout: | 616 | /* Check layout: |
642 | * - (N - 1) mirror drives must be already faulty | 617 | * - (N - 1) mirror drives must be already faulty |
@@ -660,7 +635,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev) | |||
660 | return priv_conf; | 635 | return priv_conf; |
661 | } | 636 | } |
662 | 637 | ||
663 | static void *raid0_takeover(mddev_t *mddev) | 638 | static void *raid0_takeover(struct mddev *mddev) |
664 | { | 639 | { |
665 | /* raid0 can take over: | 640 | /* raid0 can take over: |
666 | * raid4 - if all data disks are active. | 641 | * raid4 - if all data disks are active. |
@@ -691,11 +666,11 @@ static void *raid0_takeover(mddev_t *mddev) | |||
691 | return ERR_PTR(-EINVAL); | 666 | return ERR_PTR(-EINVAL); |
692 | } | 667 | } |
693 | 668 | ||
694 | static void raid0_quiesce(mddev_t *mddev, int state) | 669 | static void raid0_quiesce(struct mddev *mddev, int state) |
695 | { | 670 | { |
696 | } | 671 | } |
697 | 672 | ||
698 | static struct mdk_personality raid0_personality= | 673 | static struct md_personality raid0_personality= |
699 | { | 674 | { |
700 | .name = "raid0", | 675 | .name = "raid0", |
701 | .level = 0, | 676 | .level = 0, |
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 91f8e876ee64..0884bba8df4c 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h | |||
@@ -1,20 +1,16 @@ | |||
1 | #ifndef _RAID0_H | 1 | #ifndef _RAID0_H |
2 | #define _RAID0_H | 2 | #define _RAID0_H |
3 | 3 | ||
4 | struct strip_zone | 4 | struct strip_zone { |
5 | { | ||
6 | sector_t zone_end; /* Start of the next zone (in sectors) */ | 5 | sector_t zone_end; /* Start of the next zone (in sectors) */ |
7 | sector_t dev_start; /* Zone offset in real dev (in sectors) */ | 6 | sector_t dev_start; /* Zone offset in real dev (in sectors) */ |
8 | int nb_dev; /* # of devices attached to the zone */ | 7 | int nb_dev; /* # of devices attached to the zone */ |
9 | }; | 8 | }; |
10 | 9 | ||
11 | struct raid0_private_data | 10 | struct r0conf { |
12 | { | ||
13 | struct strip_zone *strip_zone; | 11 | struct strip_zone *strip_zone; |
14 | mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ | 12 | struct md_rdev **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ |
15 | int nr_strip_zones; | 13 | int nr_strip_zones; |
16 | }; | 14 | }; |
17 | 15 | ||
18 | typedef struct raid0_private_data raid0_conf_t; | ||
19 | |||
20 | #endif | 16 | #endif |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d9587dffe533..4602fc57c961 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -40,22 +40,24 @@ | |||
40 | #include "raid1.h" | 40 | #include "raid1.h" |
41 | #include "bitmap.h" | 41 | #include "bitmap.h" |
42 | 42 | ||
43 | #define DEBUG 0 | ||
44 | #define PRINTK(x...) do { if (DEBUG) printk(x); } while (0) | ||
45 | |||
46 | /* | 43 | /* |
47 | * Number of guaranteed r1bios in case of extreme VM load: | 44 | * Number of guaranteed r1bios in case of extreme VM load: |
48 | */ | 45 | */ |
49 | #define NR_RAID1_BIOS 256 | 46 | #define NR_RAID1_BIOS 256 |
50 | 47 | ||
48 | /* When there are this many requests queue to be written by | ||
49 | * the raid1 thread, we become 'congested' to provide back-pressure | ||
50 | * for writeback. | ||
51 | */ | ||
52 | static int max_queued_requests = 1024; | ||
51 | 53 | ||
52 | static void allow_barrier(conf_t *conf); | 54 | static void allow_barrier(struct r1conf *conf); |
53 | static void lower_barrier(conf_t *conf); | 55 | static void lower_barrier(struct r1conf *conf); |
54 | 56 | ||
55 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) | 57 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) |
56 | { | 58 | { |
57 | struct pool_info *pi = data; | 59 | struct pool_info *pi = data; |
58 | int size = offsetof(r1bio_t, bios[pi->raid_disks]); | 60 | int size = offsetof(struct r1bio, bios[pi->raid_disks]); |
59 | 61 | ||
60 | /* allocate a r1bio with room for raid_disks entries in the bios array */ | 62 | /* allocate a r1bio with room for raid_disks entries in the bios array */ |
61 | return kzalloc(size, gfp_flags); | 63 | return kzalloc(size, gfp_flags); |
@@ -76,7 +78,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
76 | { | 78 | { |
77 | struct pool_info *pi = data; | 79 | struct pool_info *pi = data; |
78 | struct page *page; | 80 | struct page *page; |
79 | r1bio_t *r1_bio; | 81 | struct r1bio *r1_bio; |
80 | struct bio *bio; | 82 | struct bio *bio; |
81 | int i, j; | 83 | int i, j; |
82 | 84 | ||
@@ -142,7 +144,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data) | |||
142 | { | 144 | { |
143 | struct pool_info *pi = data; | 145 | struct pool_info *pi = data; |
144 | int i,j; | 146 | int i,j; |
145 | r1bio_t *r1bio = __r1_bio; | 147 | struct r1bio *r1bio = __r1_bio; |
146 | 148 | ||
147 | for (i = 0; i < RESYNC_PAGES; i++) | 149 | for (i = 0; i < RESYNC_PAGES; i++) |
148 | for (j = pi->raid_disks; j-- ;) { | 150 | for (j = pi->raid_disks; j-- ;) { |
@@ -157,7 +159,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data) | |||
157 | r1bio_pool_free(r1bio, data); | 159 | r1bio_pool_free(r1bio, data); |
158 | } | 160 | } |
159 | 161 | ||
160 | static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) | 162 | static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) |
161 | { | 163 | { |
162 | int i; | 164 | int i; |
163 | 165 | ||
@@ -169,17 +171,17 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) | |||
169 | } | 171 | } |
170 | } | 172 | } |
171 | 173 | ||
172 | static void free_r1bio(r1bio_t *r1_bio) | 174 | static void free_r1bio(struct r1bio *r1_bio) |
173 | { | 175 | { |
174 | conf_t *conf = r1_bio->mddev->private; | 176 | struct r1conf *conf = r1_bio->mddev->private; |
175 | 177 | ||
176 | put_all_bios(conf, r1_bio); | 178 | put_all_bios(conf, r1_bio); |
177 | mempool_free(r1_bio, conf->r1bio_pool); | 179 | mempool_free(r1_bio, conf->r1bio_pool); |
178 | } | 180 | } |
179 | 181 | ||
180 | static void put_buf(r1bio_t *r1_bio) | 182 | static void put_buf(struct r1bio *r1_bio) |
181 | { | 183 | { |
182 | conf_t *conf = r1_bio->mddev->private; | 184 | struct r1conf *conf = r1_bio->mddev->private; |
183 | int i; | 185 | int i; |
184 | 186 | ||
185 | for (i=0; i<conf->raid_disks; i++) { | 187 | for (i=0; i<conf->raid_disks; i++) { |
@@ -193,11 +195,11 @@ static void put_buf(r1bio_t *r1_bio) | |||
193 | lower_barrier(conf); | 195 | lower_barrier(conf); |
194 | } | 196 | } |
195 | 197 | ||
196 | static void reschedule_retry(r1bio_t *r1_bio) | 198 | static void reschedule_retry(struct r1bio *r1_bio) |
197 | { | 199 | { |
198 | unsigned long flags; | 200 | unsigned long flags; |
199 | mddev_t *mddev = r1_bio->mddev; | 201 | struct mddev *mddev = r1_bio->mddev; |
200 | conf_t *conf = mddev->private; | 202 | struct r1conf *conf = mddev->private; |
201 | 203 | ||
202 | spin_lock_irqsave(&conf->device_lock, flags); | 204 | spin_lock_irqsave(&conf->device_lock, flags); |
203 | list_add(&r1_bio->retry_list, &conf->retry_list); | 205 | list_add(&r1_bio->retry_list, &conf->retry_list); |
@@ -213,11 +215,11 @@ static void reschedule_retry(r1bio_t *r1_bio) | |||
213 | * operation and are ready to return a success/failure code to the buffer | 215 | * operation and are ready to return a success/failure code to the buffer |
214 | * cache layer. | 216 | * cache layer. |
215 | */ | 217 | */ |
216 | static void call_bio_endio(r1bio_t *r1_bio) | 218 | static void call_bio_endio(struct r1bio *r1_bio) |
217 | { | 219 | { |
218 | struct bio *bio = r1_bio->master_bio; | 220 | struct bio *bio = r1_bio->master_bio; |
219 | int done; | 221 | int done; |
220 | conf_t *conf = r1_bio->mddev->private; | 222 | struct r1conf *conf = r1_bio->mddev->private; |
221 | 223 | ||
222 | if (bio->bi_phys_segments) { | 224 | if (bio->bi_phys_segments) { |
223 | unsigned long flags; | 225 | unsigned long flags; |
@@ -240,17 +242,17 @@ static void call_bio_endio(r1bio_t *r1_bio) | |||
240 | } | 242 | } |
241 | } | 243 | } |
242 | 244 | ||
243 | static void raid_end_bio_io(r1bio_t *r1_bio) | 245 | static void raid_end_bio_io(struct r1bio *r1_bio) |
244 | { | 246 | { |
245 | struct bio *bio = r1_bio->master_bio; | 247 | struct bio *bio = r1_bio->master_bio; |
246 | 248 | ||
247 | /* if nobody has done the final endio yet, do it now */ | 249 | /* if nobody has done the final endio yet, do it now */ |
248 | if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { | 250 | if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { |
249 | PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n", | 251 | pr_debug("raid1: sync end %s on sectors %llu-%llu\n", |
250 | (bio_data_dir(bio) == WRITE) ? "write" : "read", | 252 | (bio_data_dir(bio) == WRITE) ? "write" : "read", |
251 | (unsigned long long) bio->bi_sector, | 253 | (unsigned long long) bio->bi_sector, |
252 | (unsigned long long) bio->bi_sector + | 254 | (unsigned long long) bio->bi_sector + |
253 | (bio->bi_size >> 9) - 1); | 255 | (bio->bi_size >> 9) - 1); |
254 | 256 | ||
255 | call_bio_endio(r1_bio); | 257 | call_bio_endio(r1_bio); |
256 | } | 258 | } |
@@ -260,20 +262,38 @@ static void raid_end_bio_io(r1bio_t *r1_bio) | |||
260 | /* | 262 | /* |
261 | * Update disk head position estimator based on IRQ completion info. | 263 | * Update disk head position estimator based on IRQ completion info. |
262 | */ | 264 | */ |
263 | static inline void update_head_pos(int disk, r1bio_t *r1_bio) | 265 | static inline void update_head_pos(int disk, struct r1bio *r1_bio) |
264 | { | 266 | { |
265 | conf_t *conf = r1_bio->mddev->private; | 267 | struct r1conf *conf = r1_bio->mddev->private; |
266 | 268 | ||
267 | conf->mirrors[disk].head_position = | 269 | conf->mirrors[disk].head_position = |
268 | r1_bio->sector + (r1_bio->sectors); | 270 | r1_bio->sector + (r1_bio->sectors); |
269 | } | 271 | } |
270 | 272 | ||
273 | /* | ||
274 | * Find the disk number which triggered given bio | ||
275 | */ | ||
276 | static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) | ||
277 | { | ||
278 | int mirror; | ||
279 | int raid_disks = r1_bio->mddev->raid_disks; | ||
280 | |||
281 | for (mirror = 0; mirror < raid_disks; mirror++) | ||
282 | if (r1_bio->bios[mirror] == bio) | ||
283 | break; | ||
284 | |||
285 | BUG_ON(mirror == raid_disks); | ||
286 | update_head_pos(mirror, r1_bio); | ||
287 | |||
288 | return mirror; | ||
289 | } | ||
290 | |||
271 | static void raid1_end_read_request(struct bio *bio, int error) | 291 | static void raid1_end_read_request(struct bio *bio, int error) |
272 | { | 292 | { |
273 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 293 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
274 | r1bio_t *r1_bio = bio->bi_private; | 294 | struct r1bio *r1_bio = bio->bi_private; |
275 | int mirror; | 295 | int mirror; |
276 | conf_t *conf = r1_bio->mddev->private; | 296 | struct r1conf *conf = r1_bio->mddev->private; |
277 | 297 | ||
278 | mirror = r1_bio->read_disk; | 298 | mirror = r1_bio->read_disk; |
279 | /* | 299 | /* |
@@ -318,7 +338,7 @@ static void raid1_end_read_request(struct bio *bio, int error) | |||
318 | rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); | 338 | rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); |
319 | } | 339 | } |
320 | 340 | ||
321 | static void close_write(r1bio_t *r1_bio) | 341 | static void close_write(struct r1bio *r1_bio) |
322 | { | 342 | { |
323 | /* it really is the end of this request */ | 343 | /* it really is the end of this request */ |
324 | if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { | 344 | if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { |
@@ -337,7 +357,7 @@ static void close_write(r1bio_t *r1_bio) | |||
337 | md_write_end(r1_bio->mddev); | 357 | md_write_end(r1_bio->mddev); |
338 | } | 358 | } |
339 | 359 | ||
340 | static void r1_bio_write_done(r1bio_t *r1_bio) | 360 | static void r1_bio_write_done(struct r1bio *r1_bio) |
341 | { | 361 | { |
342 | if (!atomic_dec_and_test(&r1_bio->remaining)) | 362 | if (!atomic_dec_and_test(&r1_bio->remaining)) |
343 | return; | 363 | return; |
@@ -356,15 +376,12 @@ static void r1_bio_write_done(r1bio_t *r1_bio) | |||
356 | static void raid1_end_write_request(struct bio *bio, int error) | 376 | static void raid1_end_write_request(struct bio *bio, int error) |
357 | { | 377 | { |
358 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 378 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
359 | r1bio_t *r1_bio = bio->bi_private; | 379 | struct r1bio *r1_bio = bio->bi_private; |
360 | int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); | 380 | int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); |
361 | conf_t *conf = r1_bio->mddev->private; | 381 | struct r1conf *conf = r1_bio->mddev->private; |
362 | struct bio *to_put = NULL; | 382 | struct bio *to_put = NULL; |
363 | 383 | ||
364 | 384 | mirror = find_bio_disk(r1_bio, bio); | |
365 | for (mirror = 0; mirror < conf->raid_disks; mirror++) | ||
366 | if (r1_bio->bios[mirror] == bio) | ||
367 | break; | ||
368 | 385 | ||
369 | /* | 386 | /* |
370 | * 'one mirror IO has finished' event handler: | 387 | * 'one mirror IO has finished' event handler: |
@@ -400,8 +417,6 @@ static void raid1_end_write_request(struct bio *bio, int error) | |||
400 | } | 417 | } |
401 | } | 418 | } |
402 | 419 | ||
403 | update_head_pos(mirror, r1_bio); | ||
404 | |||
405 | if (behind) { | 420 | if (behind) { |
406 | if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) | 421 | if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) |
407 | atomic_dec(&r1_bio->behind_remaining); | 422 | atomic_dec(&r1_bio->behind_remaining); |
@@ -418,10 +433,11 @@ static void raid1_end_write_request(struct bio *bio, int error) | |||
418 | /* Maybe we can return now */ | 433 | /* Maybe we can return now */ |
419 | if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { | 434 | if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { |
420 | struct bio *mbio = r1_bio->master_bio; | 435 | struct bio *mbio = r1_bio->master_bio; |
421 | PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", | 436 | pr_debug("raid1: behind end write sectors" |
422 | (unsigned long long) mbio->bi_sector, | 437 | " %llu-%llu\n", |
423 | (unsigned long long) mbio->bi_sector + | 438 | (unsigned long long) mbio->bi_sector, |
424 | (mbio->bi_size >> 9) - 1); | 439 | (unsigned long long) mbio->bi_sector + |
440 | (mbio->bi_size >> 9) - 1); | ||
425 | call_bio_endio(r1_bio); | 441 | call_bio_endio(r1_bio); |
426 | } | 442 | } |
427 | } | 443 | } |
@@ -455,7 +471,7 @@ static void raid1_end_write_request(struct bio *bio, int error) | |||
455 | * | 471 | * |
456 | * The rdev for the device selected will have nr_pending incremented. | 472 | * The rdev for the device selected will have nr_pending incremented. |
457 | */ | 473 | */ |
458 | static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors) | 474 | static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) |
459 | { | 475 | { |
460 | const sector_t this_sector = r1_bio->sector; | 476 | const sector_t this_sector = r1_bio->sector; |
461 | int sectors; | 477 | int sectors; |
@@ -464,7 +480,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors) | |||
464 | int best_disk; | 480 | int best_disk; |
465 | int i; | 481 | int i; |
466 | sector_t best_dist; | 482 | sector_t best_dist; |
467 | mdk_rdev_t *rdev; | 483 | struct md_rdev *rdev; |
468 | int choose_first; | 484 | int choose_first; |
469 | 485 | ||
470 | rcu_read_lock(); | 486 | rcu_read_lock(); |
@@ -582,14 +598,18 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors) | |||
582 | return best_disk; | 598 | return best_disk; |
583 | } | 599 | } |
584 | 600 | ||
585 | int md_raid1_congested(mddev_t *mddev, int bits) | 601 | int md_raid1_congested(struct mddev *mddev, int bits) |
586 | { | 602 | { |
587 | conf_t *conf = mddev->private; | 603 | struct r1conf *conf = mddev->private; |
588 | int i, ret = 0; | 604 | int i, ret = 0; |
589 | 605 | ||
606 | if ((bits & (1 << BDI_async_congested)) && | ||
607 | conf->pending_count >= max_queued_requests) | ||
608 | return 1; | ||
609 | |||
590 | rcu_read_lock(); | 610 | rcu_read_lock(); |
591 | for (i = 0; i < mddev->raid_disks; i++) { | 611 | for (i = 0; i < mddev->raid_disks; i++) { |
592 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 612 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
593 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | 613 | if (rdev && !test_bit(Faulty, &rdev->flags)) { |
594 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 614 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
595 | 615 | ||
@@ -611,13 +631,13 @@ EXPORT_SYMBOL_GPL(md_raid1_congested); | |||
611 | 631 | ||
612 | static int raid1_congested(void *data, int bits) | 632 | static int raid1_congested(void *data, int bits) |
613 | { | 633 | { |
614 | mddev_t *mddev = data; | 634 | struct mddev *mddev = data; |
615 | 635 | ||
616 | return mddev_congested(mddev, bits) || | 636 | return mddev_congested(mddev, bits) || |
617 | md_raid1_congested(mddev, bits); | 637 | md_raid1_congested(mddev, bits); |
618 | } | 638 | } |
619 | 639 | ||
620 | static void flush_pending_writes(conf_t *conf) | 640 | static void flush_pending_writes(struct r1conf *conf) |
621 | { | 641 | { |
622 | /* Any writes that have been queued but are awaiting | 642 | /* Any writes that have been queued but are awaiting |
623 | * bitmap updates get flushed here. | 643 | * bitmap updates get flushed here. |
@@ -627,10 +647,12 @@ static void flush_pending_writes(conf_t *conf) | |||
627 | if (conf->pending_bio_list.head) { | 647 | if (conf->pending_bio_list.head) { |
628 | struct bio *bio; | 648 | struct bio *bio; |
629 | bio = bio_list_get(&conf->pending_bio_list); | 649 | bio = bio_list_get(&conf->pending_bio_list); |
650 | conf->pending_count = 0; | ||
630 | spin_unlock_irq(&conf->device_lock); | 651 | spin_unlock_irq(&conf->device_lock); |
631 | /* flush any pending bitmap writes to | 652 | /* flush any pending bitmap writes to |
632 | * disk before proceeding w/ I/O */ | 653 | * disk before proceeding w/ I/O */ |
633 | bitmap_unplug(conf->mddev->bitmap); | 654 | bitmap_unplug(conf->mddev->bitmap); |
655 | wake_up(&conf->wait_barrier); | ||
634 | 656 | ||
635 | while (bio) { /* submit pending writes */ | 657 | while (bio) { /* submit pending writes */ |
636 | struct bio *next = bio->bi_next; | 658 | struct bio *next = bio->bi_next; |
@@ -665,7 +687,7 @@ static void flush_pending_writes(conf_t *conf) | |||
665 | */ | 687 | */ |
666 | #define RESYNC_DEPTH 32 | 688 | #define RESYNC_DEPTH 32 |
667 | 689 | ||
668 | static void raise_barrier(conf_t *conf) | 690 | static void raise_barrier(struct r1conf *conf) |
669 | { | 691 | { |
670 | spin_lock_irq(&conf->resync_lock); | 692 | spin_lock_irq(&conf->resync_lock); |
671 | 693 | ||
@@ -684,7 +706,7 @@ static void raise_barrier(conf_t *conf) | |||
684 | spin_unlock_irq(&conf->resync_lock); | 706 | spin_unlock_irq(&conf->resync_lock); |
685 | } | 707 | } |
686 | 708 | ||
687 | static void lower_barrier(conf_t *conf) | 709 | static void lower_barrier(struct r1conf *conf) |
688 | { | 710 | { |
689 | unsigned long flags; | 711 | unsigned long flags; |
690 | BUG_ON(conf->barrier <= 0); | 712 | BUG_ON(conf->barrier <= 0); |
@@ -694,7 +716,7 @@ static void lower_barrier(conf_t *conf) | |||
694 | wake_up(&conf->wait_barrier); | 716 | wake_up(&conf->wait_barrier); |
695 | } | 717 | } |
696 | 718 | ||
697 | static void wait_barrier(conf_t *conf) | 719 | static void wait_barrier(struct r1conf *conf) |
698 | { | 720 | { |
699 | spin_lock_irq(&conf->resync_lock); | 721 | spin_lock_irq(&conf->resync_lock); |
700 | if (conf->barrier) { | 722 | if (conf->barrier) { |
@@ -708,7 +730,7 @@ static void wait_barrier(conf_t *conf) | |||
708 | spin_unlock_irq(&conf->resync_lock); | 730 | spin_unlock_irq(&conf->resync_lock); |
709 | } | 731 | } |
710 | 732 | ||
711 | static void allow_barrier(conf_t *conf) | 733 | static void allow_barrier(struct r1conf *conf) |
712 | { | 734 | { |
713 | unsigned long flags; | 735 | unsigned long flags; |
714 | spin_lock_irqsave(&conf->resync_lock, flags); | 736 | spin_lock_irqsave(&conf->resync_lock, flags); |
@@ -717,7 +739,7 @@ static void allow_barrier(conf_t *conf) | |||
717 | wake_up(&conf->wait_barrier); | 739 | wake_up(&conf->wait_barrier); |
718 | } | 740 | } |
719 | 741 | ||
720 | static void freeze_array(conf_t *conf) | 742 | static void freeze_array(struct r1conf *conf) |
721 | { | 743 | { |
722 | /* stop syncio and normal IO and wait for everything to | 744 | /* stop syncio and normal IO and wait for everything to |
723 | * go quite. | 745 | * go quite. |
@@ -740,7 +762,7 @@ static void freeze_array(conf_t *conf) | |||
740 | flush_pending_writes(conf)); | 762 | flush_pending_writes(conf)); |
741 | spin_unlock_irq(&conf->resync_lock); | 763 | spin_unlock_irq(&conf->resync_lock); |
742 | } | 764 | } |
743 | static void unfreeze_array(conf_t *conf) | 765 | static void unfreeze_array(struct r1conf *conf) |
744 | { | 766 | { |
745 | /* reverse the effect of the freeze */ | 767 | /* reverse the effect of the freeze */ |
746 | spin_lock_irq(&conf->resync_lock); | 768 | spin_lock_irq(&conf->resync_lock); |
@@ -753,7 +775,7 @@ static void unfreeze_array(conf_t *conf) | |||
753 | 775 | ||
754 | /* duplicate the data pages for behind I/O | 776 | /* duplicate the data pages for behind I/O |
755 | */ | 777 | */ |
756 | static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio) | 778 | static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) |
757 | { | 779 | { |
758 | int i; | 780 | int i; |
759 | struct bio_vec *bvec; | 781 | struct bio_vec *bvec; |
@@ -782,14 +804,14 @@ do_sync_io: | |||
782 | if (bvecs[i].bv_page) | 804 | if (bvecs[i].bv_page) |
783 | put_page(bvecs[i].bv_page); | 805 | put_page(bvecs[i].bv_page); |
784 | kfree(bvecs); | 806 | kfree(bvecs); |
785 | PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); | 807 | pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); |
786 | } | 808 | } |
787 | 809 | ||
788 | static int make_request(mddev_t *mddev, struct bio * bio) | 810 | static int make_request(struct mddev *mddev, struct bio * bio) |
789 | { | 811 | { |
790 | conf_t *conf = mddev->private; | 812 | struct r1conf *conf = mddev->private; |
791 | mirror_info_t *mirror; | 813 | struct mirror_info *mirror; |
792 | r1bio_t *r1_bio; | 814 | struct r1bio *r1_bio; |
793 | struct bio *read_bio; | 815 | struct bio *read_bio; |
794 | int i, disks; | 816 | int i, disks; |
795 | struct bitmap *bitmap; | 817 | struct bitmap *bitmap; |
@@ -797,7 +819,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
797 | const int rw = bio_data_dir(bio); | 819 | const int rw = bio_data_dir(bio); |
798 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 820 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
799 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); | 821 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); |
800 | mdk_rdev_t *blocked_rdev; | 822 | struct md_rdev *blocked_rdev; |
801 | int plugged; | 823 | int plugged; |
802 | int first_clone; | 824 | int first_clone; |
803 | int sectors_handled; | 825 | int sectors_handled; |
@@ -934,6 +956,11 @@ read_again: | |||
934 | /* | 956 | /* |
935 | * WRITE: | 957 | * WRITE: |
936 | */ | 958 | */ |
959 | if (conf->pending_count >= max_queued_requests) { | ||
960 | md_wakeup_thread(mddev->thread); | ||
961 | wait_event(conf->wait_barrier, | ||
962 | conf->pending_count < max_queued_requests); | ||
963 | } | ||
937 | /* first select target devices under rcu_lock and | 964 | /* first select target devices under rcu_lock and |
938 | * inc refcount on their rdev. Record them by setting | 965 | * inc refcount on their rdev. Record them by setting |
939 | * bios[x] to bio | 966 | * bios[x] to bio |
@@ -952,7 +979,7 @@ read_again: | |||
952 | rcu_read_lock(); | 979 | rcu_read_lock(); |
953 | max_sectors = r1_bio->sectors; | 980 | max_sectors = r1_bio->sectors; |
954 | for (i = 0; i < disks; i++) { | 981 | for (i = 0; i < disks; i++) { |
955 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 982 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
956 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | 983 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { |
957 | atomic_inc(&rdev->nr_pending); | 984 | atomic_inc(&rdev->nr_pending); |
958 | blocked_rdev = rdev; | 985 | blocked_rdev = rdev; |
@@ -1097,6 +1124,7 @@ read_again: | |||
1097 | atomic_inc(&r1_bio->remaining); | 1124 | atomic_inc(&r1_bio->remaining); |
1098 | spin_lock_irqsave(&conf->device_lock, flags); | 1125 | spin_lock_irqsave(&conf->device_lock, flags); |
1099 | bio_list_add(&conf->pending_bio_list, mbio); | 1126 | bio_list_add(&conf->pending_bio_list, mbio); |
1127 | conf->pending_count++; | ||
1100 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1128 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1101 | } | 1129 | } |
1102 | /* Mustn't call r1_bio_write_done before this next test, | 1130 | /* Mustn't call r1_bio_write_done before this next test, |
@@ -1127,16 +1155,16 @@ read_again: | |||
1127 | return 0; | 1155 | return 0; |
1128 | } | 1156 | } |
1129 | 1157 | ||
1130 | static void status(struct seq_file *seq, mddev_t *mddev) | 1158 | static void status(struct seq_file *seq, struct mddev *mddev) |
1131 | { | 1159 | { |
1132 | conf_t *conf = mddev->private; | 1160 | struct r1conf *conf = mddev->private; |
1133 | int i; | 1161 | int i; |
1134 | 1162 | ||
1135 | seq_printf(seq, " [%d/%d] [", conf->raid_disks, | 1163 | seq_printf(seq, " [%d/%d] [", conf->raid_disks, |
1136 | conf->raid_disks - mddev->degraded); | 1164 | conf->raid_disks - mddev->degraded); |
1137 | rcu_read_lock(); | 1165 | rcu_read_lock(); |
1138 | for (i = 0; i < conf->raid_disks; i++) { | 1166 | for (i = 0; i < conf->raid_disks; i++) { |
1139 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 1167 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
1140 | seq_printf(seq, "%s", | 1168 | seq_printf(seq, "%s", |
1141 | rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); | 1169 | rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); |
1142 | } | 1170 | } |
@@ -1145,10 +1173,10 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
1145 | } | 1173 | } |
1146 | 1174 | ||
1147 | 1175 | ||
1148 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1176 | static void error(struct mddev *mddev, struct md_rdev *rdev) |
1149 | { | 1177 | { |
1150 | char b[BDEVNAME_SIZE]; | 1178 | char b[BDEVNAME_SIZE]; |
1151 | conf_t *conf = mddev->private; | 1179 | struct r1conf *conf = mddev->private; |
1152 | 1180 | ||
1153 | /* | 1181 | /* |
1154 | * If it is not operational, then we have already marked it as dead | 1182 | * If it is not operational, then we have already marked it as dead |
@@ -1188,7 +1216,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1188 | mdname(mddev), conf->raid_disks - mddev->degraded); | 1216 | mdname(mddev), conf->raid_disks - mddev->degraded); |
1189 | } | 1217 | } |
1190 | 1218 | ||
1191 | static void print_conf(conf_t *conf) | 1219 | static void print_conf(struct r1conf *conf) |
1192 | { | 1220 | { |
1193 | int i; | 1221 | int i; |
1194 | 1222 | ||
@@ -1203,7 +1231,7 @@ static void print_conf(conf_t *conf) | |||
1203 | rcu_read_lock(); | 1231 | rcu_read_lock(); |
1204 | for (i = 0; i < conf->raid_disks; i++) { | 1232 | for (i = 0; i < conf->raid_disks; i++) { |
1205 | char b[BDEVNAME_SIZE]; | 1233 | char b[BDEVNAME_SIZE]; |
1206 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 1234 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
1207 | if (rdev) | 1235 | if (rdev) |
1208 | printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", | 1236 | printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", |
1209 | i, !test_bit(In_sync, &rdev->flags), | 1237 | i, !test_bit(In_sync, &rdev->flags), |
@@ -1213,7 +1241,7 @@ static void print_conf(conf_t *conf) | |||
1213 | rcu_read_unlock(); | 1241 | rcu_read_unlock(); |
1214 | } | 1242 | } |
1215 | 1243 | ||
1216 | static void close_sync(conf_t *conf) | 1244 | static void close_sync(struct r1conf *conf) |
1217 | { | 1245 | { |
1218 | wait_barrier(conf); | 1246 | wait_barrier(conf); |
1219 | allow_barrier(conf); | 1247 | allow_barrier(conf); |
@@ -1222,10 +1250,10 @@ static void close_sync(conf_t *conf) | |||
1222 | conf->r1buf_pool = NULL; | 1250 | conf->r1buf_pool = NULL; |
1223 | } | 1251 | } |
1224 | 1252 | ||
1225 | static int raid1_spare_active(mddev_t *mddev) | 1253 | static int raid1_spare_active(struct mddev *mddev) |
1226 | { | 1254 | { |
1227 | int i; | 1255 | int i; |
1228 | conf_t *conf = mddev->private; | 1256 | struct r1conf *conf = mddev->private; |
1229 | int count = 0; | 1257 | int count = 0; |
1230 | unsigned long flags; | 1258 | unsigned long flags; |
1231 | 1259 | ||
@@ -1235,7 +1263,7 @@ static int raid1_spare_active(mddev_t *mddev) | |||
1235 | * Called under mddev lock, so rcu protection not needed. | 1263 | * Called under mddev lock, so rcu protection not needed. |
1236 | */ | 1264 | */ |
1237 | for (i = 0; i < conf->raid_disks; i++) { | 1265 | for (i = 0; i < conf->raid_disks; i++) { |
1238 | mdk_rdev_t *rdev = conf->mirrors[i].rdev; | 1266 | struct md_rdev *rdev = conf->mirrors[i].rdev; |
1239 | if (rdev | 1267 | if (rdev |
1240 | && !test_bit(Faulty, &rdev->flags) | 1268 | && !test_bit(Faulty, &rdev->flags) |
1241 | && !test_and_set_bit(In_sync, &rdev->flags)) { | 1269 | && !test_and_set_bit(In_sync, &rdev->flags)) { |
@@ -1252,12 +1280,12 @@ static int raid1_spare_active(mddev_t *mddev) | |||
1252 | } | 1280 | } |
1253 | 1281 | ||
1254 | 1282 | ||
1255 | static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | 1283 | static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
1256 | { | 1284 | { |
1257 | conf_t *conf = mddev->private; | 1285 | struct r1conf *conf = mddev->private; |
1258 | int err = -EEXIST; | 1286 | int err = -EEXIST; |
1259 | int mirror = 0; | 1287 | int mirror = 0; |
1260 | mirror_info_t *p; | 1288 | struct mirror_info *p; |
1261 | int first = 0; | 1289 | int first = 0; |
1262 | int last = mddev->raid_disks - 1; | 1290 | int last = mddev->raid_disks - 1; |
1263 | 1291 | ||
@@ -1300,12 +1328,12 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1300 | return err; | 1328 | return err; |
1301 | } | 1329 | } |
1302 | 1330 | ||
1303 | static int raid1_remove_disk(mddev_t *mddev, int number) | 1331 | static int raid1_remove_disk(struct mddev *mddev, int number) |
1304 | { | 1332 | { |
1305 | conf_t *conf = mddev->private; | 1333 | struct r1conf *conf = mddev->private; |
1306 | int err = 0; | 1334 | int err = 0; |
1307 | mdk_rdev_t *rdev; | 1335 | struct md_rdev *rdev; |
1308 | mirror_info_t *p = conf->mirrors+ number; | 1336 | struct mirror_info *p = conf->mirrors+ number; |
1309 | 1337 | ||
1310 | print_conf(conf); | 1338 | print_conf(conf); |
1311 | rdev = p->rdev; | 1339 | rdev = p->rdev; |
@@ -1343,14 +1371,10 @@ abort: | |||
1343 | 1371 | ||
1344 | static void end_sync_read(struct bio *bio, int error) | 1372 | static void end_sync_read(struct bio *bio, int error) |
1345 | { | 1373 | { |
1346 | r1bio_t *r1_bio = bio->bi_private; | 1374 | struct r1bio *r1_bio = bio->bi_private; |
1347 | int i; | 1375 | |
1376 | update_head_pos(r1_bio->read_disk, r1_bio); | ||
1348 | 1377 | ||
1349 | for (i=r1_bio->mddev->raid_disks; i--; ) | ||
1350 | if (r1_bio->bios[i] == bio) | ||
1351 | break; | ||
1352 | BUG_ON(i < 0); | ||
1353 | update_head_pos(i, r1_bio); | ||
1354 | /* | 1378 | /* |
1355 | * we have read a block, now it needs to be re-written, | 1379 | * we have read a block, now it needs to be re-written, |
1356 | * or re-read if the read failed. | 1380 | * or re-read if the read failed. |
@@ -1366,19 +1390,15 @@ static void end_sync_read(struct bio *bio, int error) | |||
1366 | static void end_sync_write(struct bio *bio, int error) | 1390 | static void end_sync_write(struct bio *bio, int error) |
1367 | { | 1391 | { |
1368 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 1392 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
1369 | r1bio_t *r1_bio = bio->bi_private; | 1393 | struct r1bio *r1_bio = bio->bi_private; |
1370 | mddev_t *mddev = r1_bio->mddev; | 1394 | struct mddev *mddev = r1_bio->mddev; |
1371 | conf_t *conf = mddev->private; | 1395 | struct r1conf *conf = mddev->private; |
1372 | int i; | ||
1373 | int mirror=0; | 1396 | int mirror=0; |
1374 | sector_t first_bad; | 1397 | sector_t first_bad; |
1375 | int bad_sectors; | 1398 | int bad_sectors; |
1376 | 1399 | ||
1377 | for (i = 0; i < conf->raid_disks; i++) | 1400 | mirror = find_bio_disk(r1_bio, bio); |
1378 | if (r1_bio->bios[i] == bio) { | 1401 | |
1379 | mirror = i; | ||
1380 | break; | ||
1381 | } | ||
1382 | if (!uptodate) { | 1402 | if (!uptodate) { |
1383 | sector_t sync_blocks = 0; | 1403 | sector_t sync_blocks = 0; |
1384 | sector_t s = r1_bio->sector; | 1404 | sector_t s = r1_bio->sector; |
@@ -1404,8 +1424,6 @@ static void end_sync_write(struct bio *bio, int error) | |||
1404 | ) | 1424 | ) |
1405 | set_bit(R1BIO_MadeGood, &r1_bio->state); | 1425 | set_bit(R1BIO_MadeGood, &r1_bio->state); |
1406 | 1426 | ||
1407 | update_head_pos(mirror, r1_bio); | ||
1408 | |||
1409 | if (atomic_dec_and_test(&r1_bio->remaining)) { | 1427 | if (atomic_dec_and_test(&r1_bio->remaining)) { |
1410 | int s = r1_bio->sectors; | 1428 | int s = r1_bio->sectors; |
1411 | if (test_bit(R1BIO_MadeGood, &r1_bio->state) || | 1429 | if (test_bit(R1BIO_MadeGood, &r1_bio->state) || |
@@ -1418,7 +1436,7 @@ static void end_sync_write(struct bio *bio, int error) | |||
1418 | } | 1436 | } |
1419 | } | 1437 | } |
1420 | 1438 | ||
1421 | static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector, | 1439 | static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, |
1422 | int sectors, struct page *page, int rw) | 1440 | int sectors, struct page *page, int rw) |
1423 | { | 1441 | { |
1424 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) | 1442 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) |
@@ -1432,7 +1450,7 @@ static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector, | |||
1432 | return 0; | 1450 | return 0; |
1433 | } | 1451 | } |
1434 | 1452 | ||
1435 | static int fix_sync_read_error(r1bio_t *r1_bio) | 1453 | static int fix_sync_read_error(struct r1bio *r1_bio) |
1436 | { | 1454 | { |
1437 | /* Try some synchronous reads of other devices to get | 1455 | /* Try some synchronous reads of other devices to get |
1438 | * good data, much like with normal read errors. Only | 1456 | * good data, much like with normal read errors. Only |
@@ -1445,8 +1463,8 @@ static int fix_sync_read_error(r1bio_t *r1_bio) | |||
1445 | * made sure that anything with a bad block in range | 1463 | * made sure that anything with a bad block in range |
1446 | * will have bi_end_io clear. | 1464 | * will have bi_end_io clear. |
1447 | */ | 1465 | */ |
1448 | mddev_t *mddev = r1_bio->mddev; | 1466 | struct mddev *mddev = r1_bio->mddev; |
1449 | conf_t *conf = mddev->private; | 1467 | struct r1conf *conf = mddev->private; |
1450 | struct bio *bio = r1_bio->bios[r1_bio->read_disk]; | 1468 | struct bio *bio = r1_bio->bios[r1_bio->read_disk]; |
1451 | sector_t sect = r1_bio->sector; | 1469 | sector_t sect = r1_bio->sector; |
1452 | int sectors = r1_bio->sectors; | 1470 | int sectors = r1_bio->sectors; |
@@ -1456,7 +1474,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio) | |||
1456 | int s = sectors; | 1474 | int s = sectors; |
1457 | int d = r1_bio->read_disk; | 1475 | int d = r1_bio->read_disk; |
1458 | int success = 0; | 1476 | int success = 0; |
1459 | mdk_rdev_t *rdev; | 1477 | struct md_rdev *rdev; |
1460 | int start; | 1478 | int start; |
1461 | 1479 | ||
1462 | if (s > (PAGE_SIZE>>9)) | 1480 | if (s > (PAGE_SIZE>>9)) |
@@ -1501,7 +1519,8 @@ static int fix_sync_read_error(r1bio_t *r1_bio) | |||
1501 | abort = 1; | 1519 | abort = 1; |
1502 | } | 1520 | } |
1503 | if (abort) { | 1521 | if (abort) { |
1504 | mddev->recovery_disabled = 1; | 1522 | conf->recovery_disabled = |
1523 | mddev->recovery_disabled; | ||
1505 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | 1524 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
1506 | md_done_sync(mddev, r1_bio->sectors, 0); | 1525 | md_done_sync(mddev, r1_bio->sectors, 0); |
1507 | put_buf(r1_bio); | 1526 | put_buf(r1_bio); |
@@ -1552,7 +1571,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio) | |||
1552 | return 1; | 1571 | return 1; |
1553 | } | 1572 | } |
1554 | 1573 | ||
1555 | static int process_checks(r1bio_t *r1_bio) | 1574 | static int process_checks(struct r1bio *r1_bio) |
1556 | { | 1575 | { |
1557 | /* We have read all readable devices. If we haven't | 1576 | /* We have read all readable devices. If we haven't |
1558 | * got the block, then there is no hope left. | 1577 | * got the block, then there is no hope left. |
@@ -1561,8 +1580,8 @@ static int process_checks(r1bio_t *r1_bio) | |||
1561 | * If any blocks failed to read, then we need to | 1580 | * If any blocks failed to read, then we need to |
1562 | * attempt an over-write | 1581 | * attempt an over-write |
1563 | */ | 1582 | */ |
1564 | mddev_t *mddev = r1_bio->mddev; | 1583 | struct mddev *mddev = r1_bio->mddev; |
1565 | conf_t *conf = mddev->private; | 1584 | struct r1conf *conf = mddev->private; |
1566 | int primary; | 1585 | int primary; |
1567 | int i; | 1586 | int i; |
1568 | 1587 | ||
@@ -1634,9 +1653,9 @@ static int process_checks(r1bio_t *r1_bio) | |||
1634 | return 0; | 1653 | return 0; |
1635 | } | 1654 | } |
1636 | 1655 | ||
1637 | static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) | 1656 | static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) |
1638 | { | 1657 | { |
1639 | conf_t *conf = mddev->private; | 1658 | struct r1conf *conf = mddev->private; |
1640 | int i; | 1659 | int i; |
1641 | int disks = conf->raid_disks; | 1660 | int disks = conf->raid_disks; |
1642 | struct bio *bio, *wbio; | 1661 | struct bio *bio, *wbio; |
@@ -1686,16 +1705,16 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) | |||
1686 | * 3. Performs writes following reads for array synchronising. | 1705 | * 3. Performs writes following reads for array synchronising. |
1687 | */ | 1706 | */ |
1688 | 1707 | ||
1689 | static void fix_read_error(conf_t *conf, int read_disk, | 1708 | static void fix_read_error(struct r1conf *conf, int read_disk, |
1690 | sector_t sect, int sectors) | 1709 | sector_t sect, int sectors) |
1691 | { | 1710 | { |
1692 | mddev_t *mddev = conf->mddev; | 1711 | struct mddev *mddev = conf->mddev; |
1693 | while(sectors) { | 1712 | while(sectors) { |
1694 | int s = sectors; | 1713 | int s = sectors; |
1695 | int d = read_disk; | 1714 | int d = read_disk; |
1696 | int success = 0; | 1715 | int success = 0; |
1697 | int start; | 1716 | int start; |
1698 | mdk_rdev_t *rdev; | 1717 | struct md_rdev *rdev; |
1699 | 1718 | ||
1700 | if (s > (PAGE_SIZE>>9)) | 1719 | if (s > (PAGE_SIZE>>9)) |
1701 | s = PAGE_SIZE >> 9; | 1720 | s = PAGE_SIZE >> 9; |
@@ -1726,7 +1745,7 @@ static void fix_read_error(conf_t *conf, int read_disk, | |||
1726 | 1745 | ||
1727 | if (!success) { | 1746 | if (!success) { |
1728 | /* Cannot read from anywhere - mark it bad */ | 1747 | /* Cannot read from anywhere - mark it bad */ |
1729 | mdk_rdev_t *rdev = conf->mirrors[read_disk].rdev; | 1748 | struct md_rdev *rdev = conf->mirrors[read_disk].rdev; |
1730 | if (!rdev_set_badblocks(rdev, sect, s, 0)) | 1749 | if (!rdev_set_badblocks(rdev, sect, s, 0)) |
1731 | md_error(mddev, rdev); | 1750 | md_error(mddev, rdev); |
1732 | break; | 1751 | break; |
@@ -1789,11 +1808,11 @@ static int submit_bio_wait(int rw, struct bio *bio) | |||
1789 | return test_bit(BIO_UPTODATE, &bio->bi_flags); | 1808 | return test_bit(BIO_UPTODATE, &bio->bi_flags); |
1790 | } | 1809 | } |
1791 | 1810 | ||
1792 | static int narrow_write_error(r1bio_t *r1_bio, int i) | 1811 | static int narrow_write_error(struct r1bio *r1_bio, int i) |
1793 | { | 1812 | { |
1794 | mddev_t *mddev = r1_bio->mddev; | 1813 | struct mddev *mddev = r1_bio->mddev; |
1795 | conf_t *conf = mddev->private; | 1814 | struct r1conf *conf = mddev->private; |
1796 | mdk_rdev_t *rdev = conf->mirrors[i].rdev; | 1815 | struct md_rdev *rdev = conf->mirrors[i].rdev; |
1797 | int vcnt, idx; | 1816 | int vcnt, idx; |
1798 | struct bio_vec *vec; | 1817 | struct bio_vec *vec; |
1799 | 1818 | ||
@@ -1865,12 +1884,12 @@ static int narrow_write_error(r1bio_t *r1_bio, int i) | |||
1865 | return ok; | 1884 | return ok; |
1866 | } | 1885 | } |
1867 | 1886 | ||
1868 | static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio) | 1887 | static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) |
1869 | { | 1888 | { |
1870 | int m; | 1889 | int m; |
1871 | int s = r1_bio->sectors; | 1890 | int s = r1_bio->sectors; |
1872 | for (m = 0; m < conf->raid_disks ; m++) { | 1891 | for (m = 0; m < conf->raid_disks ; m++) { |
1873 | mdk_rdev_t *rdev = conf->mirrors[m].rdev; | 1892 | struct md_rdev *rdev = conf->mirrors[m].rdev; |
1874 | struct bio *bio = r1_bio->bios[m]; | 1893 | struct bio *bio = r1_bio->bios[m]; |
1875 | if (bio->bi_end_io == NULL) | 1894 | if (bio->bi_end_io == NULL) |
1876 | continue; | 1895 | continue; |
@@ -1888,12 +1907,12 @@ static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio) | |||
1888 | md_done_sync(conf->mddev, s, 1); | 1907 | md_done_sync(conf->mddev, s, 1); |
1889 | } | 1908 | } |
1890 | 1909 | ||
1891 | static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio) | 1910 | static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) |
1892 | { | 1911 | { |
1893 | int m; | 1912 | int m; |
1894 | for (m = 0; m < conf->raid_disks ; m++) | 1913 | for (m = 0; m < conf->raid_disks ; m++) |
1895 | if (r1_bio->bios[m] == IO_MADE_GOOD) { | 1914 | if (r1_bio->bios[m] == IO_MADE_GOOD) { |
1896 | mdk_rdev_t *rdev = conf->mirrors[m].rdev; | 1915 | struct md_rdev *rdev = conf->mirrors[m].rdev; |
1897 | rdev_clear_badblocks(rdev, | 1916 | rdev_clear_badblocks(rdev, |
1898 | r1_bio->sector, | 1917 | r1_bio->sector, |
1899 | r1_bio->sectors); | 1918 | r1_bio->sectors); |
@@ -1917,14 +1936,14 @@ static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio) | |||
1917 | raid_end_bio_io(r1_bio); | 1936 | raid_end_bio_io(r1_bio); |
1918 | } | 1937 | } |
1919 | 1938 | ||
1920 | static void handle_read_error(conf_t *conf, r1bio_t *r1_bio) | 1939 | static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) |
1921 | { | 1940 | { |
1922 | int disk; | 1941 | int disk; |
1923 | int max_sectors; | 1942 | int max_sectors; |
1924 | mddev_t *mddev = conf->mddev; | 1943 | struct mddev *mddev = conf->mddev; |
1925 | struct bio *bio; | 1944 | struct bio *bio; |
1926 | char b[BDEVNAME_SIZE]; | 1945 | char b[BDEVNAME_SIZE]; |
1927 | mdk_rdev_t *rdev; | 1946 | struct md_rdev *rdev; |
1928 | 1947 | ||
1929 | clear_bit(R1BIO_ReadError, &r1_bio->state); | 1948 | clear_bit(R1BIO_ReadError, &r1_bio->state); |
1930 | /* we got a read error. Maybe the drive is bad. Maybe just | 1949 | /* we got a read error. Maybe the drive is bad. Maybe just |
@@ -2007,11 +2026,11 @@ read_more: | |||
2007 | } | 2026 | } |
2008 | } | 2027 | } |
2009 | 2028 | ||
2010 | static void raid1d(mddev_t *mddev) | 2029 | static void raid1d(struct mddev *mddev) |
2011 | { | 2030 | { |
2012 | r1bio_t *r1_bio; | 2031 | struct r1bio *r1_bio; |
2013 | unsigned long flags; | 2032 | unsigned long flags; |
2014 | conf_t *conf = mddev->private; | 2033 | struct r1conf *conf = mddev->private; |
2015 | struct list_head *head = &conf->retry_list; | 2034 | struct list_head *head = &conf->retry_list; |
2016 | struct blk_plug plug; | 2035 | struct blk_plug plug; |
2017 | 2036 | ||
@@ -2028,7 +2047,7 @@ static void raid1d(mddev_t *mddev) | |||
2028 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2047 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2029 | break; | 2048 | break; |
2030 | } | 2049 | } |
2031 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); | 2050 | r1_bio = list_entry(head->prev, struct r1bio, retry_list); |
2032 | list_del(head->prev); | 2051 | list_del(head->prev); |
2033 | conf->nr_queued--; | 2052 | conf->nr_queued--; |
2034 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2053 | spin_unlock_irqrestore(&conf->device_lock, flags); |
@@ -2060,7 +2079,7 @@ static void raid1d(mddev_t *mddev) | |||
2060 | } | 2079 | } |
2061 | 2080 | ||
2062 | 2081 | ||
2063 | static int init_resync(conf_t *conf) | 2082 | static int init_resync(struct r1conf *conf) |
2064 | { | 2083 | { |
2065 | int buffs; | 2084 | int buffs; |
2066 | 2085 | ||
@@ -2084,10 +2103,10 @@ static int init_resync(conf_t *conf) | |||
2084 | * that can be installed to exclude normal IO requests. | 2103 | * that can be installed to exclude normal IO requests. |
2085 | */ | 2104 | */ |
2086 | 2105 | ||
2087 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) | 2106 | static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) |
2088 | { | 2107 | { |
2089 | conf_t *conf = mddev->private; | 2108 | struct r1conf *conf = mddev->private; |
2090 | r1bio_t *r1_bio; | 2109 | struct r1bio *r1_bio; |
2091 | struct bio *bio; | 2110 | struct bio *bio; |
2092 | sector_t max_sector, nr_sectors; | 2111 | sector_t max_sector, nr_sectors; |
2093 | int disk = -1; | 2112 | int disk = -1; |
@@ -2167,7 +2186,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2167 | set_bit(R1BIO_IsSync, &r1_bio->state); | 2186 | set_bit(R1BIO_IsSync, &r1_bio->state); |
2168 | 2187 | ||
2169 | for (i=0; i < conf->raid_disks; i++) { | 2188 | for (i=0; i < conf->raid_disks; i++) { |
2170 | mdk_rdev_t *rdev; | 2189 | struct md_rdev *rdev; |
2171 | bio = r1_bio->bios[i]; | 2190 | bio = r1_bio->bios[i]; |
2172 | 2191 | ||
2173 | /* take from bio_init */ | 2192 | /* take from bio_init */ |
@@ -2239,7 +2258,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2239 | int ok = 1; | 2258 | int ok = 1; |
2240 | for (i = 0 ; i < conf->raid_disks ; i++) | 2259 | for (i = 0 ; i < conf->raid_disks ; i++) |
2241 | if (r1_bio->bios[i]->bi_end_io == end_sync_write) { | 2260 | if (r1_bio->bios[i]->bi_end_io == end_sync_write) { |
2242 | mdk_rdev_t *rdev = | 2261 | struct md_rdev *rdev = |
2243 | rcu_dereference(conf->mirrors[i].rdev); | 2262 | rcu_dereference(conf->mirrors[i].rdev); |
2244 | ok = rdev_set_badblocks(rdev, sector_nr, | 2263 | ok = rdev_set_badblocks(rdev, sector_nr, |
2245 | min_bad, 0 | 2264 | min_bad, 0 |
@@ -2356,7 +2375,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2356 | return nr_sectors; | 2375 | return nr_sectors; |
2357 | } | 2376 | } |
2358 | 2377 | ||
2359 | static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 2378 | static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
2360 | { | 2379 | { |
2361 | if (sectors) | 2380 | if (sectors) |
2362 | return sectors; | 2381 | return sectors; |
@@ -2364,15 +2383,15 @@ static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
2364 | return mddev->dev_sectors; | 2383 | return mddev->dev_sectors; |
2365 | } | 2384 | } |
2366 | 2385 | ||
2367 | static conf_t *setup_conf(mddev_t *mddev) | 2386 | static struct r1conf *setup_conf(struct mddev *mddev) |
2368 | { | 2387 | { |
2369 | conf_t *conf; | 2388 | struct r1conf *conf; |
2370 | int i; | 2389 | int i; |
2371 | mirror_info_t *disk; | 2390 | struct mirror_info *disk; |
2372 | mdk_rdev_t *rdev; | 2391 | struct md_rdev *rdev; |
2373 | int err = -ENOMEM; | 2392 | int err = -ENOMEM; |
2374 | 2393 | ||
2375 | conf = kzalloc(sizeof(conf_t), GFP_KERNEL); | 2394 | conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); |
2376 | if (!conf) | 2395 | if (!conf) |
2377 | goto abort; | 2396 | goto abort; |
2378 | 2397 | ||
@@ -2417,6 +2436,8 @@ static conf_t *setup_conf(mddev_t *mddev) | |||
2417 | init_waitqueue_head(&conf->wait_barrier); | 2436 | init_waitqueue_head(&conf->wait_barrier); |
2418 | 2437 | ||
2419 | bio_list_init(&conf->pending_bio_list); | 2438 | bio_list_init(&conf->pending_bio_list); |
2439 | conf->pending_count = 0; | ||
2440 | conf->recovery_disabled = mddev->recovery_disabled - 1; | ||
2420 | 2441 | ||
2421 | conf->last_used = -1; | 2442 | conf->last_used = -1; |
2422 | for (i = 0; i < conf->raid_disks; i++) { | 2443 | for (i = 0; i < conf->raid_disks; i++) { |
@@ -2465,11 +2486,11 @@ static conf_t *setup_conf(mddev_t *mddev) | |||
2465 | return ERR_PTR(err); | 2486 | return ERR_PTR(err); |
2466 | } | 2487 | } |
2467 | 2488 | ||
2468 | static int run(mddev_t *mddev) | 2489 | static int run(struct mddev *mddev) |
2469 | { | 2490 | { |
2470 | conf_t *conf; | 2491 | struct r1conf *conf; |
2471 | int i; | 2492 | int i; |
2472 | mdk_rdev_t *rdev; | 2493 | struct md_rdev *rdev; |
2473 | 2494 | ||
2474 | if (mddev->level != 1) { | 2495 | if (mddev->level != 1) { |
2475 | printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", | 2496 | printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", |
@@ -2545,9 +2566,9 @@ static int run(mddev_t *mddev) | |||
2545 | return md_integrity_register(mddev); | 2566 | return md_integrity_register(mddev); |
2546 | } | 2567 | } |
2547 | 2568 | ||
2548 | static int stop(mddev_t *mddev) | 2569 | static int stop(struct mddev *mddev) |
2549 | { | 2570 | { |
2550 | conf_t *conf = mddev->private; | 2571 | struct r1conf *conf = mddev->private; |
2551 | struct bitmap *bitmap = mddev->bitmap; | 2572 | struct bitmap *bitmap = mddev->bitmap; |
2552 | 2573 | ||
2553 | /* wait for behind writes to complete */ | 2574 | /* wait for behind writes to complete */ |
@@ -2572,7 +2593,7 @@ static int stop(mddev_t *mddev) | |||
2572 | return 0; | 2593 | return 0; |
2573 | } | 2594 | } |
2574 | 2595 | ||
2575 | static int raid1_resize(mddev_t *mddev, sector_t sectors) | 2596 | static int raid1_resize(struct mddev *mddev, sector_t sectors) |
2576 | { | 2597 | { |
2577 | /* no resync is happening, and there is enough space | 2598 | /* no resync is happening, and there is enough space |
2578 | * on all devices, so we can resize. | 2599 | * on all devices, so we can resize. |
@@ -2596,7 +2617,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors) | |||
2596 | return 0; | 2617 | return 0; |
2597 | } | 2618 | } |
2598 | 2619 | ||
2599 | static int raid1_reshape(mddev_t *mddev) | 2620 | static int raid1_reshape(struct mddev *mddev) |
2600 | { | 2621 | { |
2601 | /* We need to: | 2622 | /* We need to: |
2602 | * 1/ resize the r1bio_pool | 2623 | * 1/ resize the r1bio_pool |
@@ -2611,8 +2632,8 @@ static int raid1_reshape(mddev_t *mddev) | |||
2611 | */ | 2632 | */ |
2612 | mempool_t *newpool, *oldpool; | 2633 | mempool_t *newpool, *oldpool; |
2613 | struct pool_info *newpoolinfo; | 2634 | struct pool_info *newpoolinfo; |
2614 | mirror_info_t *newmirrors; | 2635 | struct mirror_info *newmirrors; |
2615 | conf_t *conf = mddev->private; | 2636 | struct r1conf *conf = mddev->private; |
2616 | int cnt, raid_disks; | 2637 | int cnt, raid_disks; |
2617 | unsigned long flags; | 2638 | unsigned long flags; |
2618 | int d, d2, err; | 2639 | int d, d2, err; |
@@ -2668,7 +2689,7 @@ static int raid1_reshape(mddev_t *mddev) | |||
2668 | conf->r1bio_pool = newpool; | 2689 | conf->r1bio_pool = newpool; |
2669 | 2690 | ||
2670 | for (d = d2 = 0; d < conf->raid_disks; d++) { | 2691 | for (d = d2 = 0; d < conf->raid_disks; d++) { |
2671 | mdk_rdev_t *rdev = conf->mirrors[d].rdev; | 2692 | struct md_rdev *rdev = conf->mirrors[d].rdev; |
2672 | if (rdev && rdev->raid_disk != d2) { | 2693 | if (rdev && rdev->raid_disk != d2) { |
2673 | sysfs_unlink_rdev(mddev, rdev); | 2694 | sysfs_unlink_rdev(mddev, rdev); |
2674 | rdev->raid_disk = d2; | 2695 | rdev->raid_disk = d2; |
@@ -2702,9 +2723,9 @@ static int raid1_reshape(mddev_t *mddev) | |||
2702 | return 0; | 2723 | return 0; |
2703 | } | 2724 | } |
2704 | 2725 | ||
2705 | static void raid1_quiesce(mddev_t *mddev, int state) | 2726 | static void raid1_quiesce(struct mddev *mddev, int state) |
2706 | { | 2727 | { |
2707 | conf_t *conf = mddev->private; | 2728 | struct r1conf *conf = mddev->private; |
2708 | 2729 | ||
2709 | switch(state) { | 2730 | switch(state) { |
2710 | case 2: /* wake for suspend */ | 2731 | case 2: /* wake for suspend */ |
@@ -2719,13 +2740,13 @@ static void raid1_quiesce(mddev_t *mddev, int state) | |||
2719 | } | 2740 | } |
2720 | } | 2741 | } |
2721 | 2742 | ||
2722 | static void *raid1_takeover(mddev_t *mddev) | 2743 | static void *raid1_takeover(struct mddev *mddev) |
2723 | { | 2744 | { |
2724 | /* raid1 can take over: | 2745 | /* raid1 can take over: |
2725 | * raid5 with 2 devices, any layout or chunk size | 2746 | * raid5 with 2 devices, any layout or chunk size |
2726 | */ | 2747 | */ |
2727 | if (mddev->level == 5 && mddev->raid_disks == 2) { | 2748 | if (mddev->level == 5 && mddev->raid_disks == 2) { |
2728 | conf_t *conf; | 2749 | struct r1conf *conf; |
2729 | mddev->new_level = 1; | 2750 | mddev->new_level = 1; |
2730 | mddev->new_layout = 0; | 2751 | mddev->new_layout = 0; |
2731 | mddev->new_chunk_sectors = 0; | 2752 | mddev->new_chunk_sectors = 0; |
@@ -2737,7 +2758,7 @@ static void *raid1_takeover(mddev_t *mddev) | |||
2737 | return ERR_PTR(-EINVAL); | 2758 | return ERR_PTR(-EINVAL); |
2738 | } | 2759 | } |
2739 | 2760 | ||
2740 | static struct mdk_personality raid1_personality = | 2761 | static struct md_personality raid1_personality = |
2741 | { | 2762 | { |
2742 | .name = "raid1", | 2763 | .name = "raid1", |
2743 | .level = 1, | 2764 | .level = 1, |
@@ -2775,3 +2796,5 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); | |||
2775 | MODULE_ALIAS("md-personality-3"); /* RAID1 */ | 2796 | MODULE_ALIAS("md-personality-3"); /* RAID1 */ |
2776 | MODULE_ALIAS("md-raid1"); | 2797 | MODULE_ALIAS("md-raid1"); |
2777 | MODULE_ALIAS("md-level-1"); | 2798 | MODULE_ALIAS("md-level-1"); |
2799 | |||
2800 | module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); | ||
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index e0d676b48974..c732b6cce935 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h | |||
@@ -1,10 +1,8 @@ | |||
1 | #ifndef _RAID1_H | 1 | #ifndef _RAID1_H |
2 | #define _RAID1_H | 2 | #define _RAID1_H |
3 | 3 | ||
4 | typedef struct mirror_info mirror_info_t; | ||
5 | |||
6 | struct mirror_info { | 4 | struct mirror_info { |
7 | mdk_rdev_t *rdev; | 5 | struct md_rdev *rdev; |
8 | sector_t head_position; | 6 | sector_t head_position; |
9 | }; | 7 | }; |
10 | 8 | ||
@@ -17,61 +15,82 @@ struct mirror_info { | |||
17 | */ | 15 | */ |
18 | 16 | ||
19 | struct pool_info { | 17 | struct pool_info { |
20 | mddev_t *mddev; | 18 | struct mddev *mddev; |
21 | int raid_disks; | 19 | int raid_disks; |
22 | }; | 20 | }; |
23 | 21 | ||
24 | 22 | struct r1conf { | |
25 | typedef struct r1bio_s r1bio_t; | 23 | struct mddev *mddev; |
26 | 24 | struct mirror_info *mirrors; | |
27 | struct r1_private_data_s { | ||
28 | mddev_t *mddev; | ||
29 | mirror_info_t *mirrors; | ||
30 | int raid_disks; | 25 | int raid_disks; |
26 | |||
27 | /* When choose the best device for a read (read_balance()) | ||
28 | * we try to keep sequential reads one the same device | ||
29 | * using 'last_used' and 'next_seq_sect' | ||
30 | */ | ||
31 | int last_used; | 31 | int last_used; |
32 | sector_t next_seq_sect; | 32 | sector_t next_seq_sect; |
33 | /* During resync, read_balancing is only allowed on the part | ||
34 | * of the array that has been resynced. 'next_resync' tells us | ||
35 | * where that is. | ||
36 | */ | ||
37 | sector_t next_resync; | ||
38 | |||
33 | spinlock_t device_lock; | 39 | spinlock_t device_lock; |
34 | 40 | ||
41 | /* list of 'struct r1bio' that need to be processed by raid1d, | ||
42 | * whether to retry a read, writeout a resync or recovery | ||
43 | * block, or anything else. | ||
44 | */ | ||
35 | struct list_head retry_list; | 45 | struct list_head retry_list; |
36 | /* queue pending writes and submit them on unplug */ | ||
37 | struct bio_list pending_bio_list; | ||
38 | 46 | ||
39 | /* for use when syncing mirrors: */ | 47 | /* queue pending writes to be submitted on unplug */ |
48 | struct bio_list pending_bio_list; | ||
49 | int pending_count; | ||
40 | 50 | ||
51 | /* for use when syncing mirrors: | ||
52 | * We don't allow both normal IO and resync/recovery IO at | ||
53 | * the same time - resync/recovery can only happen when there | ||
54 | * is no other IO. So when either is active, the other has to wait. | ||
55 | * See more details description in raid1.c near raise_barrier(). | ||
56 | */ | ||
57 | wait_queue_head_t wait_barrier; | ||
41 | spinlock_t resync_lock; | 58 | spinlock_t resync_lock; |
42 | int nr_pending; | 59 | int nr_pending; |
43 | int nr_waiting; | 60 | int nr_waiting; |
44 | int nr_queued; | 61 | int nr_queued; |
45 | int barrier; | 62 | int barrier; |
46 | sector_t next_resync; | ||
47 | int fullsync; /* set to 1 if a full sync is needed, | ||
48 | * (fresh device added). | ||
49 | * Cleared when a sync completes. | ||
50 | */ | ||
51 | int recovery_disabled; /* when the same as | ||
52 | * mddev->recovery_disabled | ||
53 | * we don't allow recovery | ||
54 | * to be attempted as we | ||
55 | * expect a read error | ||
56 | */ | ||
57 | 63 | ||
58 | wait_queue_head_t wait_barrier; | 64 | /* Set to 1 if a full sync is needed, (fresh device added). |
65 | * Cleared when a sync completes. | ||
66 | */ | ||
67 | int fullsync; | ||
59 | 68 | ||
69 | /* When the same as mddev->recovery_disabled we don't allow | ||
70 | * recovery to be attempted as we expect a read error. | ||
71 | */ | ||
72 | int recovery_disabled; | ||
73 | |||
74 | |||
75 | /* poolinfo contains information about the content of the | ||
76 | * mempools - it changes when the array grows or shrinks | ||
77 | */ | ||
60 | struct pool_info *poolinfo; | 78 | struct pool_info *poolinfo; |
79 | mempool_t *r1bio_pool; | ||
80 | mempool_t *r1buf_pool; | ||
61 | 81 | ||
82 | /* temporary buffer to synchronous IO when attempting to repair | ||
83 | * a read error. | ||
84 | */ | ||
62 | struct page *tmppage; | 85 | struct page *tmppage; |
63 | 86 | ||
64 | mempool_t *r1bio_pool; | ||
65 | mempool_t *r1buf_pool; | ||
66 | 87 | ||
67 | /* When taking over an array from a different personality, we store | 88 | /* When taking over an array from a different personality, we store |
68 | * the new thread here until we fully activate the array. | 89 | * the new thread here until we fully activate the array. |
69 | */ | 90 | */ |
70 | struct mdk_thread_s *thread; | 91 | struct md_thread *thread; |
71 | }; | 92 | }; |
72 | 93 | ||
73 | typedef struct r1_private_data_s conf_t; | ||
74 | |||
75 | /* | 94 | /* |
76 | * this is our 'private' RAID1 bio. | 95 | * this is our 'private' RAID1 bio. |
77 | * | 96 | * |
@@ -79,7 +98,7 @@ typedef struct r1_private_data_s conf_t; | |||
79 | * for this RAID1 operation, and about their status: | 98 | * for this RAID1 operation, and about their status: |
80 | */ | 99 | */ |
81 | 100 | ||
82 | struct r1bio_s { | 101 | struct r1bio { |
83 | atomic_t remaining; /* 'have we finished' count, | 102 | atomic_t remaining; /* 'have we finished' count, |
84 | * used from IRQ handlers | 103 | * used from IRQ handlers |
85 | */ | 104 | */ |
@@ -89,7 +108,7 @@ struct r1bio_s { | |||
89 | sector_t sector; | 108 | sector_t sector; |
90 | int sectors; | 109 | int sectors; |
91 | unsigned long state; | 110 | unsigned long state; |
92 | mddev_t *mddev; | 111 | struct mddev *mddev; |
93 | /* | 112 | /* |
94 | * original bio going to /dev/mdx | 113 | * original bio going to /dev/mdx |
95 | */ | 114 | */ |
@@ -148,6 +167,6 @@ struct r1bio_s { | |||
148 | #define R1BIO_MadeGood 7 | 167 | #define R1BIO_MadeGood 7 |
149 | #define R1BIO_WriteError 8 | 168 | #define R1BIO_WriteError 8 |
150 | 169 | ||
151 | extern int md_raid1_congested(mddev_t *mddev, int bits); | 170 | extern int md_raid1_congested(struct mddev *mddev, int bits); |
152 | 171 | ||
153 | #endif | 172 | #endif |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0cd9672cf9cb..132c18ef8665 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -58,13 +58,19 @@ | |||
58 | */ | 58 | */ |
59 | #define NR_RAID10_BIOS 256 | 59 | #define NR_RAID10_BIOS 256 |
60 | 60 | ||
61 | static void allow_barrier(conf_t *conf); | 61 | /* When there are this many requests queue to be written by |
62 | static void lower_barrier(conf_t *conf); | 62 | * the raid10 thread, we become 'congested' to provide back-pressure |
63 | * for writeback. | ||
64 | */ | ||
65 | static int max_queued_requests = 1024; | ||
66 | |||
67 | static void allow_barrier(struct r10conf *conf); | ||
68 | static void lower_barrier(struct r10conf *conf); | ||
63 | 69 | ||
64 | static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) | 70 | static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) |
65 | { | 71 | { |
66 | conf_t *conf = data; | 72 | struct r10conf *conf = data; |
67 | int size = offsetof(struct r10bio_s, devs[conf->copies]); | 73 | int size = offsetof(struct r10bio, devs[conf->copies]); |
68 | 74 | ||
69 | /* allocate a r10bio with room for raid_disks entries in the bios array */ | 75 | /* allocate a r10bio with room for raid_disks entries in the bios array */ |
70 | return kzalloc(size, gfp_flags); | 76 | return kzalloc(size, gfp_flags); |
@@ -92,9 +98,9 @@ static void r10bio_pool_free(void *r10_bio, void *data) | |||
92 | */ | 98 | */ |
93 | static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) | 99 | static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) |
94 | { | 100 | { |
95 | conf_t *conf = data; | 101 | struct r10conf *conf = data; |
96 | struct page *page; | 102 | struct page *page; |
97 | r10bio_t *r10_bio; | 103 | struct r10bio *r10_bio; |
98 | struct bio *bio; | 104 | struct bio *bio; |
99 | int i, j; | 105 | int i, j; |
100 | int nalloc; | 106 | int nalloc; |
@@ -158,8 +164,8 @@ out_free_bio: | |||
158 | static void r10buf_pool_free(void *__r10_bio, void *data) | 164 | static void r10buf_pool_free(void *__r10_bio, void *data) |
159 | { | 165 | { |
160 | int i; | 166 | int i; |
161 | conf_t *conf = data; | 167 | struct r10conf *conf = data; |
162 | r10bio_t *r10bio = __r10_bio; | 168 | struct r10bio *r10bio = __r10_bio; |
163 | int j; | 169 | int j; |
164 | 170 | ||
165 | for (j=0; j < conf->copies; j++) { | 171 | for (j=0; j < conf->copies; j++) { |
@@ -175,7 +181,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data) | |||
175 | r10bio_pool_free(r10bio, conf); | 181 | r10bio_pool_free(r10bio, conf); |
176 | } | 182 | } |
177 | 183 | ||
178 | static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) | 184 | static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) |
179 | { | 185 | { |
180 | int i; | 186 | int i; |
181 | 187 | ||
@@ -187,28 +193,28 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) | |||
187 | } | 193 | } |
188 | } | 194 | } |
189 | 195 | ||
190 | static void free_r10bio(r10bio_t *r10_bio) | 196 | static void free_r10bio(struct r10bio *r10_bio) |
191 | { | 197 | { |
192 | conf_t *conf = r10_bio->mddev->private; | 198 | struct r10conf *conf = r10_bio->mddev->private; |
193 | 199 | ||
194 | put_all_bios(conf, r10_bio); | 200 | put_all_bios(conf, r10_bio); |
195 | mempool_free(r10_bio, conf->r10bio_pool); | 201 | mempool_free(r10_bio, conf->r10bio_pool); |
196 | } | 202 | } |
197 | 203 | ||
198 | static void put_buf(r10bio_t *r10_bio) | 204 | static void put_buf(struct r10bio *r10_bio) |
199 | { | 205 | { |
200 | conf_t *conf = r10_bio->mddev->private; | 206 | struct r10conf *conf = r10_bio->mddev->private; |
201 | 207 | ||
202 | mempool_free(r10_bio, conf->r10buf_pool); | 208 | mempool_free(r10_bio, conf->r10buf_pool); |
203 | 209 | ||
204 | lower_barrier(conf); | 210 | lower_barrier(conf); |
205 | } | 211 | } |
206 | 212 | ||
207 | static void reschedule_retry(r10bio_t *r10_bio) | 213 | static void reschedule_retry(struct r10bio *r10_bio) |
208 | { | 214 | { |
209 | unsigned long flags; | 215 | unsigned long flags; |
210 | mddev_t *mddev = r10_bio->mddev; | 216 | struct mddev *mddev = r10_bio->mddev; |
211 | conf_t *conf = mddev->private; | 217 | struct r10conf *conf = mddev->private; |
212 | 218 | ||
213 | spin_lock_irqsave(&conf->device_lock, flags); | 219 | spin_lock_irqsave(&conf->device_lock, flags); |
214 | list_add(&r10_bio->retry_list, &conf->retry_list); | 220 | list_add(&r10_bio->retry_list, &conf->retry_list); |
@@ -226,11 +232,11 @@ static void reschedule_retry(r10bio_t *r10_bio) | |||
226 | * operation and are ready to return a success/failure code to the buffer | 232 | * operation and are ready to return a success/failure code to the buffer |
227 | * cache layer. | 233 | * cache layer. |
228 | */ | 234 | */ |
229 | static void raid_end_bio_io(r10bio_t *r10_bio) | 235 | static void raid_end_bio_io(struct r10bio *r10_bio) |
230 | { | 236 | { |
231 | struct bio *bio = r10_bio->master_bio; | 237 | struct bio *bio = r10_bio->master_bio; |
232 | int done; | 238 | int done; |
233 | conf_t *conf = r10_bio->mddev->private; | 239 | struct r10conf *conf = r10_bio->mddev->private; |
234 | 240 | ||
235 | if (bio->bi_phys_segments) { | 241 | if (bio->bi_phys_segments) { |
236 | unsigned long flags; | 242 | unsigned long flags; |
@@ -256,9 +262,9 @@ static void raid_end_bio_io(r10bio_t *r10_bio) | |||
256 | /* | 262 | /* |
257 | * Update disk head position estimator based on IRQ completion info. | 263 | * Update disk head position estimator based on IRQ completion info. |
258 | */ | 264 | */ |
259 | static inline void update_head_pos(int slot, r10bio_t *r10_bio) | 265 | static inline void update_head_pos(int slot, struct r10bio *r10_bio) |
260 | { | 266 | { |
261 | conf_t *conf = r10_bio->mddev->private; | 267 | struct r10conf *conf = r10_bio->mddev->private; |
262 | 268 | ||
263 | conf->mirrors[r10_bio->devs[slot].devnum].head_position = | 269 | conf->mirrors[r10_bio->devs[slot].devnum].head_position = |
264 | r10_bio->devs[slot].addr + (r10_bio->sectors); | 270 | r10_bio->devs[slot].addr + (r10_bio->sectors); |
@@ -267,7 +273,7 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio) | |||
267 | /* | 273 | /* |
268 | * Find the disk number which triggered given bio | 274 | * Find the disk number which triggered given bio |
269 | */ | 275 | */ |
270 | static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, | 276 | static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, |
271 | struct bio *bio, int *slotp) | 277 | struct bio *bio, int *slotp) |
272 | { | 278 | { |
273 | int slot; | 279 | int slot; |
@@ -287,9 +293,9 @@ static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, | |||
287 | static void raid10_end_read_request(struct bio *bio, int error) | 293 | static void raid10_end_read_request(struct bio *bio, int error) |
288 | { | 294 | { |
289 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 295 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
290 | r10bio_t *r10_bio = bio->bi_private; | 296 | struct r10bio *r10_bio = bio->bi_private; |
291 | int slot, dev; | 297 | int slot, dev; |
292 | conf_t *conf = r10_bio->mddev->private; | 298 | struct r10conf *conf = r10_bio->mddev->private; |
293 | 299 | ||
294 | 300 | ||
295 | slot = r10_bio->read_slot; | 301 | slot = r10_bio->read_slot; |
@@ -327,7 +333,7 @@ static void raid10_end_read_request(struct bio *bio, int error) | |||
327 | } | 333 | } |
328 | } | 334 | } |
329 | 335 | ||
330 | static void close_write(r10bio_t *r10_bio) | 336 | static void close_write(struct r10bio *r10_bio) |
331 | { | 337 | { |
332 | /* clear the bitmap if all writes complete successfully */ | 338 | /* clear the bitmap if all writes complete successfully */ |
333 | bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, | 339 | bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, |
@@ -337,7 +343,7 @@ static void close_write(r10bio_t *r10_bio) | |||
337 | md_write_end(r10_bio->mddev); | 343 | md_write_end(r10_bio->mddev); |
338 | } | 344 | } |
339 | 345 | ||
340 | static void one_write_done(r10bio_t *r10_bio) | 346 | static void one_write_done(struct r10bio *r10_bio) |
341 | { | 347 | { |
342 | if (atomic_dec_and_test(&r10_bio->remaining)) { | 348 | if (atomic_dec_and_test(&r10_bio->remaining)) { |
343 | if (test_bit(R10BIO_WriteError, &r10_bio->state)) | 349 | if (test_bit(R10BIO_WriteError, &r10_bio->state)) |
@@ -355,10 +361,10 @@ static void one_write_done(r10bio_t *r10_bio) | |||
355 | static void raid10_end_write_request(struct bio *bio, int error) | 361 | static void raid10_end_write_request(struct bio *bio, int error) |
356 | { | 362 | { |
357 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 363 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
358 | r10bio_t *r10_bio = bio->bi_private; | 364 | struct r10bio *r10_bio = bio->bi_private; |
359 | int dev; | 365 | int dev; |
360 | int dec_rdev = 1; | 366 | int dec_rdev = 1; |
361 | conf_t *conf = r10_bio->mddev->private; | 367 | struct r10conf *conf = r10_bio->mddev->private; |
362 | int slot; | 368 | int slot; |
363 | 369 | ||
364 | dev = find_bio_disk(conf, r10_bio, bio, &slot); | 370 | dev = find_bio_disk(conf, r10_bio, bio, &slot); |
@@ -433,7 +439,7 @@ static void raid10_end_write_request(struct bio *bio, int error) | |||
433 | * sector offset to a virtual address | 439 | * sector offset to a virtual address |
434 | */ | 440 | */ |
435 | 441 | ||
436 | static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio) | 442 | static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) |
437 | { | 443 | { |
438 | int n,f; | 444 | int n,f; |
439 | sector_t sector; | 445 | sector_t sector; |
@@ -481,7 +487,7 @@ static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio) | |||
481 | BUG_ON(slot != conf->copies); | 487 | BUG_ON(slot != conf->copies); |
482 | } | 488 | } |
483 | 489 | ||
484 | static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev) | 490 | static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) |
485 | { | 491 | { |
486 | sector_t offset, chunk, vchunk; | 492 | sector_t offset, chunk, vchunk; |
487 | 493 | ||
@@ -522,7 +528,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, | |||
522 | struct bvec_merge_data *bvm, | 528 | struct bvec_merge_data *bvm, |
523 | struct bio_vec *biovec) | 529 | struct bio_vec *biovec) |
524 | { | 530 | { |
525 | mddev_t *mddev = q->queuedata; | 531 | struct mddev *mddev = q->queuedata; |
526 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | 532 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
527 | int max; | 533 | int max; |
528 | unsigned int chunk_sectors = mddev->chunk_sectors; | 534 | unsigned int chunk_sectors = mddev->chunk_sectors; |
@@ -555,14 +561,14 @@ static int raid10_mergeable_bvec(struct request_queue *q, | |||
555 | * FIXME: possibly should rethink readbalancing and do it differently | 561 | * FIXME: possibly should rethink readbalancing and do it differently |
556 | * depending on near_copies / far_copies geometry. | 562 | * depending on near_copies / far_copies geometry. |
557 | */ | 563 | */ |
558 | static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors) | 564 | static int read_balance(struct r10conf *conf, struct r10bio *r10_bio, int *max_sectors) |
559 | { | 565 | { |
560 | const sector_t this_sector = r10_bio->sector; | 566 | const sector_t this_sector = r10_bio->sector; |
561 | int disk, slot; | 567 | int disk, slot; |
562 | int sectors = r10_bio->sectors; | 568 | int sectors = r10_bio->sectors; |
563 | int best_good_sectors; | 569 | int best_good_sectors; |
564 | sector_t new_distance, best_dist; | 570 | sector_t new_distance, best_dist; |
565 | mdk_rdev_t *rdev; | 571 | struct md_rdev *rdev; |
566 | int do_balance; | 572 | int do_balance; |
567 | int best_slot; | 573 | int best_slot; |
568 | 574 | ||
@@ -677,15 +683,19 @@ retry: | |||
677 | 683 | ||
678 | static int raid10_congested(void *data, int bits) | 684 | static int raid10_congested(void *data, int bits) |
679 | { | 685 | { |
680 | mddev_t *mddev = data; | 686 | struct mddev *mddev = data; |
681 | conf_t *conf = mddev->private; | 687 | struct r10conf *conf = mddev->private; |
682 | int i, ret = 0; | 688 | int i, ret = 0; |
683 | 689 | ||
690 | if ((bits & (1 << BDI_async_congested)) && | ||
691 | conf->pending_count >= max_queued_requests) | ||
692 | return 1; | ||
693 | |||
684 | if (mddev_congested(mddev, bits)) | 694 | if (mddev_congested(mddev, bits)) |
685 | return 1; | 695 | return 1; |
686 | rcu_read_lock(); | 696 | rcu_read_lock(); |
687 | for (i = 0; i < conf->raid_disks && ret == 0; i++) { | 697 | for (i = 0; i < conf->raid_disks && ret == 0; i++) { |
688 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 698 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
689 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | 699 | if (rdev && !test_bit(Faulty, &rdev->flags)) { |
690 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 700 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
691 | 701 | ||
@@ -696,7 +706,7 @@ static int raid10_congested(void *data, int bits) | |||
696 | return ret; | 706 | return ret; |
697 | } | 707 | } |
698 | 708 | ||
699 | static void flush_pending_writes(conf_t *conf) | 709 | static void flush_pending_writes(struct r10conf *conf) |
700 | { | 710 | { |
701 | /* Any writes that have been queued but are awaiting | 711 | /* Any writes that have been queued but are awaiting |
702 | * bitmap updates get flushed here. | 712 | * bitmap updates get flushed here. |
@@ -706,10 +716,12 @@ static void flush_pending_writes(conf_t *conf) | |||
706 | if (conf->pending_bio_list.head) { | 716 | if (conf->pending_bio_list.head) { |
707 | struct bio *bio; | 717 | struct bio *bio; |
708 | bio = bio_list_get(&conf->pending_bio_list); | 718 | bio = bio_list_get(&conf->pending_bio_list); |
719 | conf->pending_count = 0; | ||
709 | spin_unlock_irq(&conf->device_lock); | 720 | spin_unlock_irq(&conf->device_lock); |
710 | /* flush any pending bitmap writes to disk | 721 | /* flush any pending bitmap writes to disk |
711 | * before proceeding w/ I/O */ | 722 | * before proceeding w/ I/O */ |
712 | bitmap_unplug(conf->mddev->bitmap); | 723 | bitmap_unplug(conf->mddev->bitmap); |
724 | wake_up(&conf->wait_barrier); | ||
713 | 725 | ||
714 | while (bio) { /* submit pending writes */ | 726 | while (bio) { /* submit pending writes */ |
715 | struct bio *next = bio->bi_next; | 727 | struct bio *next = bio->bi_next; |
@@ -743,7 +755,7 @@ static void flush_pending_writes(conf_t *conf) | |||
743 | * lower_barrier when the particular background IO completes. | 755 | * lower_barrier when the particular background IO completes. |
744 | */ | 756 | */ |
745 | 757 | ||
746 | static void raise_barrier(conf_t *conf, int force) | 758 | static void raise_barrier(struct r10conf *conf, int force) |
747 | { | 759 | { |
748 | BUG_ON(force && !conf->barrier); | 760 | BUG_ON(force && !conf->barrier); |
749 | spin_lock_irq(&conf->resync_lock); | 761 | spin_lock_irq(&conf->resync_lock); |
@@ -763,7 +775,7 @@ static void raise_barrier(conf_t *conf, int force) | |||
763 | spin_unlock_irq(&conf->resync_lock); | 775 | spin_unlock_irq(&conf->resync_lock); |
764 | } | 776 | } |
765 | 777 | ||
766 | static void lower_barrier(conf_t *conf) | 778 | static void lower_barrier(struct r10conf *conf) |
767 | { | 779 | { |
768 | unsigned long flags; | 780 | unsigned long flags; |
769 | spin_lock_irqsave(&conf->resync_lock, flags); | 781 | spin_lock_irqsave(&conf->resync_lock, flags); |
@@ -772,7 +784,7 @@ static void lower_barrier(conf_t *conf) | |||
772 | wake_up(&conf->wait_barrier); | 784 | wake_up(&conf->wait_barrier); |
773 | } | 785 | } |
774 | 786 | ||
775 | static void wait_barrier(conf_t *conf) | 787 | static void wait_barrier(struct r10conf *conf) |
776 | { | 788 | { |
777 | spin_lock_irq(&conf->resync_lock); | 789 | spin_lock_irq(&conf->resync_lock); |
778 | if (conf->barrier) { | 790 | if (conf->barrier) { |
@@ -786,7 +798,7 @@ static void wait_barrier(conf_t *conf) | |||
786 | spin_unlock_irq(&conf->resync_lock); | 798 | spin_unlock_irq(&conf->resync_lock); |
787 | } | 799 | } |
788 | 800 | ||
789 | static void allow_barrier(conf_t *conf) | 801 | static void allow_barrier(struct r10conf *conf) |
790 | { | 802 | { |
791 | unsigned long flags; | 803 | unsigned long flags; |
792 | spin_lock_irqsave(&conf->resync_lock, flags); | 804 | spin_lock_irqsave(&conf->resync_lock, flags); |
@@ -795,7 +807,7 @@ static void allow_barrier(conf_t *conf) | |||
795 | wake_up(&conf->wait_barrier); | 807 | wake_up(&conf->wait_barrier); |
796 | } | 808 | } |
797 | 809 | ||
798 | static void freeze_array(conf_t *conf) | 810 | static void freeze_array(struct r10conf *conf) |
799 | { | 811 | { |
800 | /* stop syncio and normal IO and wait for everything to | 812 | /* stop syncio and normal IO and wait for everything to |
801 | * go quiet. | 813 | * go quiet. |
@@ -820,7 +832,7 @@ static void freeze_array(conf_t *conf) | |||
820 | spin_unlock_irq(&conf->resync_lock); | 832 | spin_unlock_irq(&conf->resync_lock); |
821 | } | 833 | } |
822 | 834 | ||
823 | static void unfreeze_array(conf_t *conf) | 835 | static void unfreeze_array(struct r10conf *conf) |
824 | { | 836 | { |
825 | /* reverse the effect of the freeze */ | 837 | /* reverse the effect of the freeze */ |
826 | spin_lock_irq(&conf->resync_lock); | 838 | spin_lock_irq(&conf->resync_lock); |
@@ -830,11 +842,11 @@ static void unfreeze_array(conf_t *conf) | |||
830 | spin_unlock_irq(&conf->resync_lock); | 842 | spin_unlock_irq(&conf->resync_lock); |
831 | } | 843 | } |
832 | 844 | ||
833 | static int make_request(mddev_t *mddev, struct bio * bio) | 845 | static int make_request(struct mddev *mddev, struct bio * bio) |
834 | { | 846 | { |
835 | conf_t *conf = mddev->private; | 847 | struct r10conf *conf = mddev->private; |
836 | mirror_info_t *mirror; | 848 | struct mirror_info *mirror; |
837 | r10bio_t *r10_bio; | 849 | struct r10bio *r10_bio; |
838 | struct bio *read_bio; | 850 | struct bio *read_bio; |
839 | int i; | 851 | int i; |
840 | int chunk_sects = conf->chunk_mask + 1; | 852 | int chunk_sects = conf->chunk_mask + 1; |
@@ -842,7 +854,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
842 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 854 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
843 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 855 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
844 | unsigned long flags; | 856 | unsigned long flags; |
845 | mdk_rdev_t *blocked_rdev; | 857 | struct md_rdev *blocked_rdev; |
846 | int plugged; | 858 | int plugged; |
847 | int sectors_handled; | 859 | int sectors_handled; |
848 | int max_sectors; | 860 | int max_sectors; |
@@ -996,6 +1008,11 @@ read_again: | |||
996 | /* | 1008 | /* |
997 | * WRITE: | 1009 | * WRITE: |
998 | */ | 1010 | */ |
1011 | if (conf->pending_count >= max_queued_requests) { | ||
1012 | md_wakeup_thread(mddev->thread); | ||
1013 | wait_event(conf->wait_barrier, | ||
1014 | conf->pending_count < max_queued_requests); | ||
1015 | } | ||
999 | /* first select target devices under rcu_lock and | 1016 | /* first select target devices under rcu_lock and |
1000 | * inc refcount on their rdev. Record them by setting | 1017 | * inc refcount on their rdev. Record them by setting |
1001 | * bios[x] to bio | 1018 | * bios[x] to bio |
@@ -1017,7 +1034,7 @@ retry_write: | |||
1017 | 1034 | ||
1018 | for (i = 0; i < conf->copies; i++) { | 1035 | for (i = 0; i < conf->copies; i++) { |
1019 | int d = r10_bio->devs[i].devnum; | 1036 | int d = r10_bio->devs[i].devnum; |
1020 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); | 1037 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); |
1021 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | 1038 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { |
1022 | atomic_inc(&rdev->nr_pending); | 1039 | atomic_inc(&rdev->nr_pending); |
1023 | blocked_rdev = rdev; | 1040 | blocked_rdev = rdev; |
@@ -1129,6 +1146,7 @@ retry_write: | |||
1129 | atomic_inc(&r10_bio->remaining); | 1146 | atomic_inc(&r10_bio->remaining); |
1130 | spin_lock_irqsave(&conf->device_lock, flags); | 1147 | spin_lock_irqsave(&conf->device_lock, flags); |
1131 | bio_list_add(&conf->pending_bio_list, mbio); | 1148 | bio_list_add(&conf->pending_bio_list, mbio); |
1149 | conf->pending_count++; | ||
1132 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1150 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1133 | } | 1151 | } |
1134 | 1152 | ||
@@ -1161,9 +1179,9 @@ retry_write: | |||
1161 | return 0; | 1179 | return 0; |
1162 | } | 1180 | } |
1163 | 1181 | ||
1164 | static void status(struct seq_file *seq, mddev_t *mddev) | 1182 | static void status(struct seq_file *seq, struct mddev *mddev) |
1165 | { | 1183 | { |
1166 | conf_t *conf = mddev->private; | 1184 | struct r10conf *conf = mddev->private; |
1167 | int i; | 1185 | int i; |
1168 | 1186 | ||
1169 | if (conf->near_copies < conf->raid_disks) | 1187 | if (conf->near_copies < conf->raid_disks) |
@@ -1190,7 +1208,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
1190 | * Don't consider the device numbered 'ignore' | 1208 | * Don't consider the device numbered 'ignore' |
1191 | * as we might be about to remove it. | 1209 | * as we might be about to remove it. |
1192 | */ | 1210 | */ |
1193 | static int enough(conf_t *conf, int ignore) | 1211 | static int enough(struct r10conf *conf, int ignore) |
1194 | { | 1212 | { |
1195 | int first = 0; | 1213 | int first = 0; |
1196 | 1214 | ||
@@ -1209,10 +1227,10 @@ static int enough(conf_t *conf, int ignore) | |||
1209 | return 1; | 1227 | return 1; |
1210 | } | 1228 | } |
1211 | 1229 | ||
1212 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1230 | static void error(struct mddev *mddev, struct md_rdev *rdev) |
1213 | { | 1231 | { |
1214 | char b[BDEVNAME_SIZE]; | 1232 | char b[BDEVNAME_SIZE]; |
1215 | conf_t *conf = mddev->private; | 1233 | struct r10conf *conf = mddev->private; |
1216 | 1234 | ||
1217 | /* | 1235 | /* |
1218 | * If it is not operational, then we have already marked it as dead | 1236 | * If it is not operational, then we have already marked it as dead |
@@ -1246,10 +1264,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1246 | mdname(mddev), conf->raid_disks - mddev->degraded); | 1264 | mdname(mddev), conf->raid_disks - mddev->degraded); |
1247 | } | 1265 | } |
1248 | 1266 | ||
1249 | static void print_conf(conf_t *conf) | 1267 | static void print_conf(struct r10conf *conf) |
1250 | { | 1268 | { |
1251 | int i; | 1269 | int i; |
1252 | mirror_info_t *tmp; | 1270 | struct mirror_info *tmp; |
1253 | 1271 | ||
1254 | printk(KERN_DEBUG "RAID10 conf printout:\n"); | 1272 | printk(KERN_DEBUG "RAID10 conf printout:\n"); |
1255 | if (!conf) { | 1273 | if (!conf) { |
@@ -1270,7 +1288,7 @@ static void print_conf(conf_t *conf) | |||
1270 | } | 1288 | } |
1271 | } | 1289 | } |
1272 | 1290 | ||
1273 | static void close_sync(conf_t *conf) | 1291 | static void close_sync(struct r10conf *conf) |
1274 | { | 1292 | { |
1275 | wait_barrier(conf); | 1293 | wait_barrier(conf); |
1276 | allow_barrier(conf); | 1294 | allow_barrier(conf); |
@@ -1279,11 +1297,11 @@ static void close_sync(conf_t *conf) | |||
1279 | conf->r10buf_pool = NULL; | 1297 | conf->r10buf_pool = NULL; |
1280 | } | 1298 | } |
1281 | 1299 | ||
1282 | static int raid10_spare_active(mddev_t *mddev) | 1300 | static int raid10_spare_active(struct mddev *mddev) |
1283 | { | 1301 | { |
1284 | int i; | 1302 | int i; |
1285 | conf_t *conf = mddev->private; | 1303 | struct r10conf *conf = mddev->private; |
1286 | mirror_info_t *tmp; | 1304 | struct mirror_info *tmp; |
1287 | int count = 0; | 1305 | int count = 0; |
1288 | unsigned long flags; | 1306 | unsigned long flags; |
1289 | 1307 | ||
@@ -1309,9 +1327,9 @@ static int raid10_spare_active(mddev_t *mddev) | |||
1309 | } | 1327 | } |
1310 | 1328 | ||
1311 | 1329 | ||
1312 | static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | 1330 | static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
1313 | { | 1331 | { |
1314 | conf_t *conf = mddev->private; | 1332 | struct r10conf *conf = mddev->private; |
1315 | int err = -EEXIST; | 1333 | int err = -EEXIST; |
1316 | int mirror; | 1334 | int mirror; |
1317 | int first = 0; | 1335 | int first = 0; |
@@ -1334,7 +1352,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1334 | else | 1352 | else |
1335 | mirror = first; | 1353 | mirror = first; |
1336 | for ( ; mirror <= last ; mirror++) { | 1354 | for ( ; mirror <= last ; mirror++) { |
1337 | mirror_info_t *p = &conf->mirrors[mirror]; | 1355 | struct mirror_info *p = &conf->mirrors[mirror]; |
1338 | if (p->recovery_disabled == mddev->recovery_disabled) | 1356 | if (p->recovery_disabled == mddev->recovery_disabled) |
1339 | continue; | 1357 | continue; |
1340 | if (!p->rdev) | 1358 | if (!p->rdev) |
@@ -1355,6 +1373,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1355 | } | 1373 | } |
1356 | 1374 | ||
1357 | p->head_position = 0; | 1375 | p->head_position = 0; |
1376 | p->recovery_disabled = mddev->recovery_disabled - 1; | ||
1358 | rdev->raid_disk = mirror; | 1377 | rdev->raid_disk = mirror; |
1359 | err = 0; | 1378 | err = 0; |
1360 | if (rdev->saved_raid_disk != mirror) | 1379 | if (rdev->saved_raid_disk != mirror) |
@@ -1368,12 +1387,12 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1368 | return err; | 1387 | return err; |
1369 | } | 1388 | } |
1370 | 1389 | ||
1371 | static int raid10_remove_disk(mddev_t *mddev, int number) | 1390 | static int raid10_remove_disk(struct mddev *mddev, int number) |
1372 | { | 1391 | { |
1373 | conf_t *conf = mddev->private; | 1392 | struct r10conf *conf = mddev->private; |
1374 | int err = 0; | 1393 | int err = 0; |
1375 | mdk_rdev_t *rdev; | 1394 | struct md_rdev *rdev; |
1376 | mirror_info_t *p = conf->mirrors+ number; | 1395 | struct mirror_info *p = conf->mirrors+ number; |
1377 | 1396 | ||
1378 | print_conf(conf); | 1397 | print_conf(conf); |
1379 | rdev = p->rdev; | 1398 | rdev = p->rdev; |
@@ -1411,8 +1430,8 @@ abort: | |||
1411 | 1430 | ||
1412 | static void end_sync_read(struct bio *bio, int error) | 1431 | static void end_sync_read(struct bio *bio, int error) |
1413 | { | 1432 | { |
1414 | r10bio_t *r10_bio = bio->bi_private; | 1433 | struct r10bio *r10_bio = bio->bi_private; |
1415 | conf_t *conf = r10_bio->mddev->private; | 1434 | struct r10conf *conf = r10_bio->mddev->private; |
1416 | int d; | 1435 | int d; |
1417 | 1436 | ||
1418 | d = find_bio_disk(conf, r10_bio, bio, NULL); | 1437 | d = find_bio_disk(conf, r10_bio, bio, NULL); |
@@ -1439,9 +1458,9 @@ static void end_sync_read(struct bio *bio, int error) | |||
1439 | } | 1458 | } |
1440 | } | 1459 | } |
1441 | 1460 | ||
1442 | static void end_sync_request(r10bio_t *r10_bio) | 1461 | static void end_sync_request(struct r10bio *r10_bio) |
1443 | { | 1462 | { |
1444 | mddev_t *mddev = r10_bio->mddev; | 1463 | struct mddev *mddev = r10_bio->mddev; |
1445 | 1464 | ||
1446 | while (atomic_dec_and_test(&r10_bio->remaining)) { | 1465 | while (atomic_dec_and_test(&r10_bio->remaining)) { |
1447 | if (r10_bio->master_bio == NULL) { | 1466 | if (r10_bio->master_bio == NULL) { |
@@ -1455,7 +1474,7 @@ static void end_sync_request(r10bio_t *r10_bio) | |||
1455 | md_done_sync(mddev, s, 1); | 1474 | md_done_sync(mddev, s, 1); |
1456 | break; | 1475 | break; |
1457 | } else { | 1476 | } else { |
1458 | r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; | 1477 | struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; |
1459 | if (test_bit(R10BIO_MadeGood, &r10_bio->state) || | 1478 | if (test_bit(R10BIO_MadeGood, &r10_bio->state) || |
1460 | test_bit(R10BIO_WriteError, &r10_bio->state)) | 1479 | test_bit(R10BIO_WriteError, &r10_bio->state)) |
1461 | reschedule_retry(r10_bio); | 1480 | reschedule_retry(r10_bio); |
@@ -1469,9 +1488,9 @@ static void end_sync_request(r10bio_t *r10_bio) | |||
1469 | static void end_sync_write(struct bio *bio, int error) | 1488 | static void end_sync_write(struct bio *bio, int error) |
1470 | { | 1489 | { |
1471 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 1490 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
1472 | r10bio_t *r10_bio = bio->bi_private; | 1491 | struct r10bio *r10_bio = bio->bi_private; |
1473 | mddev_t *mddev = r10_bio->mddev; | 1492 | struct mddev *mddev = r10_bio->mddev; |
1474 | conf_t *conf = mddev->private; | 1493 | struct r10conf *conf = mddev->private; |
1475 | int d; | 1494 | int d; |
1476 | sector_t first_bad; | 1495 | sector_t first_bad; |
1477 | int bad_sectors; | 1496 | int bad_sectors; |
@@ -1509,9 +1528,9 @@ static void end_sync_write(struct bio *bio, int error) | |||
1509 | * We check if all blocks are in-sync and only write to blocks that | 1528 | * We check if all blocks are in-sync and only write to blocks that |
1510 | * aren't in sync | 1529 | * aren't in sync |
1511 | */ | 1530 | */ |
1512 | static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) | 1531 | static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) |
1513 | { | 1532 | { |
1514 | conf_t *conf = mddev->private; | 1533 | struct r10conf *conf = mddev->private; |
1515 | int i, first; | 1534 | int i, first; |
1516 | struct bio *tbio, *fbio; | 1535 | struct bio *tbio, *fbio; |
1517 | 1536 | ||
@@ -1609,7 +1628,7 @@ done: | |||
1609 | * The second for writing. | 1628 | * The second for writing. |
1610 | * | 1629 | * |
1611 | */ | 1630 | */ |
1612 | static void fix_recovery_read_error(r10bio_t *r10_bio) | 1631 | static void fix_recovery_read_error(struct r10bio *r10_bio) |
1613 | { | 1632 | { |
1614 | /* We got a read error during recovery. | 1633 | /* We got a read error during recovery. |
1615 | * We repeat the read in smaller page-sized sections. | 1634 | * We repeat the read in smaller page-sized sections. |
@@ -1618,8 +1637,8 @@ static void fix_recovery_read_error(r10bio_t *r10_bio) | |||
1618 | * If a read fails, record a bad block on both old and | 1637 | * If a read fails, record a bad block on both old and |
1619 | * new devices. | 1638 | * new devices. |
1620 | */ | 1639 | */ |
1621 | mddev_t *mddev = r10_bio->mddev; | 1640 | struct mddev *mddev = r10_bio->mddev; |
1622 | conf_t *conf = mddev->private; | 1641 | struct r10conf *conf = mddev->private; |
1623 | struct bio *bio = r10_bio->devs[0].bio; | 1642 | struct bio *bio = r10_bio->devs[0].bio; |
1624 | sector_t sect = 0; | 1643 | sector_t sect = 0; |
1625 | int sectors = r10_bio->sectors; | 1644 | int sectors = r10_bio->sectors; |
@@ -1629,7 +1648,7 @@ static void fix_recovery_read_error(r10bio_t *r10_bio) | |||
1629 | 1648 | ||
1630 | while (sectors) { | 1649 | while (sectors) { |
1631 | int s = sectors; | 1650 | int s = sectors; |
1632 | mdk_rdev_t *rdev; | 1651 | struct md_rdev *rdev; |
1633 | sector_t addr; | 1652 | sector_t addr; |
1634 | int ok; | 1653 | int ok; |
1635 | 1654 | ||
@@ -1663,7 +1682,7 @@ static void fix_recovery_read_error(r10bio_t *r10_bio) | |||
1663 | 1682 | ||
1664 | if (rdev != conf->mirrors[dw].rdev) { | 1683 | if (rdev != conf->mirrors[dw].rdev) { |
1665 | /* need bad block on destination too */ | 1684 | /* need bad block on destination too */ |
1666 | mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev; | 1685 | struct md_rdev *rdev2 = conf->mirrors[dw].rdev; |
1667 | addr = r10_bio->devs[1].addr + sect; | 1686 | addr = r10_bio->devs[1].addr + sect; |
1668 | ok = rdev_set_badblocks(rdev2, addr, s, 0); | 1687 | ok = rdev_set_badblocks(rdev2, addr, s, 0); |
1669 | if (!ok) { | 1688 | if (!ok) { |
@@ -1688,9 +1707,9 @@ static void fix_recovery_read_error(r10bio_t *r10_bio) | |||
1688 | } | 1707 | } |
1689 | } | 1708 | } |
1690 | 1709 | ||
1691 | static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) | 1710 | static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) |
1692 | { | 1711 | { |
1693 | conf_t *conf = mddev->private; | 1712 | struct r10conf *conf = mddev->private; |
1694 | int d; | 1713 | int d; |
1695 | struct bio *wbio; | 1714 | struct bio *wbio; |
1696 | 1715 | ||
@@ -1719,7 +1738,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) | |||
1719 | * since the last recorded read error. | 1738 | * since the last recorded read error. |
1720 | * | 1739 | * |
1721 | */ | 1740 | */ |
1722 | static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) | 1741 | static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) |
1723 | { | 1742 | { |
1724 | struct timespec cur_time_mon; | 1743 | struct timespec cur_time_mon; |
1725 | unsigned long hours_since_last; | 1744 | unsigned long hours_since_last; |
@@ -1750,7 +1769,7 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1750 | atomic_set(&rdev->read_errors, read_errors >> hours_since_last); | 1769 | atomic_set(&rdev->read_errors, read_errors >> hours_since_last); |
1751 | } | 1770 | } |
1752 | 1771 | ||
1753 | static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector, | 1772 | static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, |
1754 | int sectors, struct page *page, int rw) | 1773 | int sectors, struct page *page, int rw) |
1755 | { | 1774 | { |
1756 | sector_t first_bad; | 1775 | sector_t first_bad; |
@@ -1778,11 +1797,11 @@ static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector, | |||
1778 | * 3. Performs writes following reads for array synchronising. | 1797 | * 3. Performs writes following reads for array synchronising. |
1779 | */ | 1798 | */ |
1780 | 1799 | ||
1781 | static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) | 1800 | static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) |
1782 | { | 1801 | { |
1783 | int sect = 0; /* Offset from r10_bio->sector */ | 1802 | int sect = 0; /* Offset from r10_bio->sector */ |
1784 | int sectors = r10_bio->sectors; | 1803 | int sectors = r10_bio->sectors; |
1785 | mdk_rdev_t*rdev; | 1804 | struct md_rdev*rdev; |
1786 | int max_read_errors = atomic_read(&mddev->max_corr_read_errors); | 1805 | int max_read_errors = atomic_read(&mddev->max_corr_read_errors); |
1787 | int d = r10_bio->devs[r10_bio->read_slot].devnum; | 1806 | int d = r10_bio->devs[r10_bio->read_slot].devnum; |
1788 | 1807 | ||
@@ -1983,12 +2002,12 @@ static int submit_bio_wait(int rw, struct bio *bio) | |||
1983 | return test_bit(BIO_UPTODATE, &bio->bi_flags); | 2002 | return test_bit(BIO_UPTODATE, &bio->bi_flags); |
1984 | } | 2003 | } |
1985 | 2004 | ||
1986 | static int narrow_write_error(r10bio_t *r10_bio, int i) | 2005 | static int narrow_write_error(struct r10bio *r10_bio, int i) |
1987 | { | 2006 | { |
1988 | struct bio *bio = r10_bio->master_bio; | 2007 | struct bio *bio = r10_bio->master_bio; |
1989 | mddev_t *mddev = r10_bio->mddev; | 2008 | struct mddev *mddev = r10_bio->mddev; |
1990 | conf_t *conf = mddev->private; | 2009 | struct r10conf *conf = mddev->private; |
1991 | mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; | 2010 | struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; |
1992 | /* bio has the data to be written to slot 'i' where | 2011 | /* bio has the data to be written to slot 'i' where |
1993 | * we just recently had a write error. | 2012 | * we just recently had a write error. |
1994 | * We repeatedly clone the bio and trim down to one block, | 2013 | * We repeatedly clone the bio and trim down to one block, |
@@ -2040,13 +2059,13 @@ static int narrow_write_error(r10bio_t *r10_bio, int i) | |||
2040 | return ok; | 2059 | return ok; |
2041 | } | 2060 | } |
2042 | 2061 | ||
2043 | static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) | 2062 | static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) |
2044 | { | 2063 | { |
2045 | int slot = r10_bio->read_slot; | 2064 | int slot = r10_bio->read_slot; |
2046 | int mirror = r10_bio->devs[slot].devnum; | 2065 | int mirror = r10_bio->devs[slot].devnum; |
2047 | struct bio *bio; | 2066 | struct bio *bio; |
2048 | conf_t *conf = mddev->private; | 2067 | struct r10conf *conf = mddev->private; |
2049 | mdk_rdev_t *rdev; | 2068 | struct md_rdev *rdev; |
2050 | char b[BDEVNAME_SIZE]; | 2069 | char b[BDEVNAME_SIZE]; |
2051 | unsigned long do_sync; | 2070 | unsigned long do_sync; |
2052 | int max_sectors; | 2071 | int max_sectors; |
@@ -2139,7 +2158,7 @@ read_more: | |||
2139 | generic_make_request(bio); | 2158 | generic_make_request(bio); |
2140 | } | 2159 | } |
2141 | 2160 | ||
2142 | static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio) | 2161 | static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) |
2143 | { | 2162 | { |
2144 | /* Some sort of write request has finished and it | 2163 | /* Some sort of write request has finished and it |
2145 | * succeeded in writing where we thought there was a | 2164 | * succeeded in writing where we thought there was a |
@@ -2148,7 +2167,7 @@ static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio) | |||
2148 | * a bad block. | 2167 | * a bad block. |
2149 | */ | 2168 | */ |
2150 | int m; | 2169 | int m; |
2151 | mdk_rdev_t *rdev; | 2170 | struct md_rdev *rdev; |
2152 | 2171 | ||
2153 | if (test_bit(R10BIO_IsSync, &r10_bio->state) || | 2172 | if (test_bit(R10BIO_IsSync, &r10_bio->state) || |
2154 | test_bit(R10BIO_IsRecover, &r10_bio->state)) { | 2173 | test_bit(R10BIO_IsRecover, &r10_bio->state)) { |
@@ -2200,11 +2219,11 @@ static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio) | |||
2200 | } | 2219 | } |
2201 | } | 2220 | } |
2202 | 2221 | ||
2203 | static void raid10d(mddev_t *mddev) | 2222 | static void raid10d(struct mddev *mddev) |
2204 | { | 2223 | { |
2205 | r10bio_t *r10_bio; | 2224 | struct r10bio *r10_bio; |
2206 | unsigned long flags; | 2225 | unsigned long flags; |
2207 | conf_t *conf = mddev->private; | 2226 | struct r10conf *conf = mddev->private; |
2208 | struct list_head *head = &conf->retry_list; | 2227 | struct list_head *head = &conf->retry_list; |
2209 | struct blk_plug plug; | 2228 | struct blk_plug plug; |
2210 | 2229 | ||
@@ -2220,7 +2239,7 @@ static void raid10d(mddev_t *mddev) | |||
2220 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2239 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2221 | break; | 2240 | break; |
2222 | } | 2241 | } |
2223 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); | 2242 | r10_bio = list_entry(head->prev, struct r10bio, retry_list); |
2224 | list_del(head->prev); | 2243 | list_del(head->prev); |
2225 | conf->nr_queued--; | 2244 | conf->nr_queued--; |
2226 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2245 | spin_unlock_irqrestore(&conf->device_lock, flags); |
@@ -2252,7 +2271,7 @@ static void raid10d(mddev_t *mddev) | |||
2252 | } | 2271 | } |
2253 | 2272 | ||
2254 | 2273 | ||
2255 | static int init_resync(conf_t *conf) | 2274 | static int init_resync(struct r10conf *conf) |
2256 | { | 2275 | { |
2257 | int buffs; | 2276 | int buffs; |
2258 | 2277 | ||
@@ -2297,11 +2316,11 @@ static int init_resync(conf_t *conf) | |||
2297 | * | 2316 | * |
2298 | */ | 2317 | */ |
2299 | 2318 | ||
2300 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, | 2319 | static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, |
2301 | int *skipped, int go_faster) | 2320 | int *skipped, int go_faster) |
2302 | { | 2321 | { |
2303 | conf_t *conf = mddev->private; | 2322 | struct r10conf *conf = mddev->private; |
2304 | r10bio_t *r10_bio; | 2323 | struct r10bio *r10_bio; |
2305 | struct bio *biolist = NULL, *bio; | 2324 | struct bio *biolist = NULL, *bio; |
2306 | sector_t max_sector, nr_sectors; | 2325 | sector_t max_sector, nr_sectors; |
2307 | int i; | 2326 | int i; |
@@ -2393,7 +2412,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, | |||
2393 | 2412 | ||
2394 | for (i=0 ; i<conf->raid_disks; i++) { | 2413 | for (i=0 ; i<conf->raid_disks; i++) { |
2395 | int still_degraded; | 2414 | int still_degraded; |
2396 | r10bio_t *rb2; | 2415 | struct r10bio *rb2; |
2397 | sector_t sect; | 2416 | sector_t sect; |
2398 | int must_sync; | 2417 | int must_sync; |
2399 | int any_working; | 2418 | int any_working; |
@@ -2453,7 +2472,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, | |||
2453 | int k; | 2472 | int k; |
2454 | int d = r10_bio->devs[j].devnum; | 2473 | int d = r10_bio->devs[j].devnum; |
2455 | sector_t from_addr, to_addr; | 2474 | sector_t from_addr, to_addr; |
2456 | mdk_rdev_t *rdev; | 2475 | struct md_rdev *rdev; |
2457 | sector_t sector, first_bad; | 2476 | sector_t sector, first_bad; |
2458 | int bad_sectors; | 2477 | int bad_sectors; |
2459 | if (!conf->mirrors[d].rdev || | 2478 | if (!conf->mirrors[d].rdev || |
@@ -2547,8 +2566,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, | |||
2547 | } | 2566 | } |
2548 | if (biolist == NULL) { | 2567 | if (biolist == NULL) { |
2549 | while (r10_bio) { | 2568 | while (r10_bio) { |
2550 | r10bio_t *rb2 = r10_bio; | 2569 | struct r10bio *rb2 = r10_bio; |
2551 | r10_bio = (r10bio_t*) rb2->master_bio; | 2570 | r10_bio = (struct r10bio*) rb2->master_bio; |
2552 | rb2->master_bio = NULL; | 2571 | rb2->master_bio = NULL; |
2553 | put_buf(rb2); | 2572 | put_buf(rb2); |
2554 | } | 2573 | } |
@@ -2714,10 +2733,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, | |||
2714 | } | 2733 | } |
2715 | 2734 | ||
2716 | static sector_t | 2735 | static sector_t |
2717 | raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 2736 | raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
2718 | { | 2737 | { |
2719 | sector_t size; | 2738 | sector_t size; |
2720 | conf_t *conf = mddev->private; | 2739 | struct r10conf *conf = mddev->private; |
2721 | 2740 | ||
2722 | if (!raid_disks) | 2741 | if (!raid_disks) |
2723 | raid_disks = conf->raid_disks; | 2742 | raid_disks = conf->raid_disks; |
@@ -2733,9 +2752,9 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
2733 | } | 2752 | } |
2734 | 2753 | ||
2735 | 2754 | ||
2736 | static conf_t *setup_conf(mddev_t *mddev) | 2755 | static struct r10conf *setup_conf(struct mddev *mddev) |
2737 | { | 2756 | { |
2738 | conf_t *conf = NULL; | 2757 | struct r10conf *conf = NULL; |
2739 | int nc, fc, fo; | 2758 | int nc, fc, fo; |
2740 | sector_t stride, size; | 2759 | sector_t stride, size; |
2741 | int err = -EINVAL; | 2760 | int err = -EINVAL; |
@@ -2760,7 +2779,7 @@ static conf_t *setup_conf(mddev_t *mddev) | |||
2760 | } | 2779 | } |
2761 | 2780 | ||
2762 | err = -ENOMEM; | 2781 | err = -ENOMEM; |
2763 | conf = kzalloc(sizeof(conf_t), GFP_KERNEL); | 2782 | conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); |
2764 | if (!conf) | 2783 | if (!conf) |
2765 | goto out; | 2784 | goto out; |
2766 | 2785 | ||
@@ -2836,12 +2855,12 @@ static conf_t *setup_conf(mddev_t *mddev) | |||
2836 | return ERR_PTR(err); | 2855 | return ERR_PTR(err); |
2837 | } | 2856 | } |
2838 | 2857 | ||
2839 | static int run(mddev_t *mddev) | 2858 | static int run(struct mddev *mddev) |
2840 | { | 2859 | { |
2841 | conf_t *conf; | 2860 | struct r10conf *conf; |
2842 | int i, disk_idx, chunk_size; | 2861 | int i, disk_idx, chunk_size; |
2843 | mirror_info_t *disk; | 2862 | struct mirror_info *disk; |
2844 | mdk_rdev_t *rdev; | 2863 | struct md_rdev *rdev; |
2845 | sector_t size; | 2864 | sector_t size; |
2846 | 2865 | ||
2847 | /* | 2866 | /* |
@@ -2913,6 +2932,7 @@ static int run(mddev_t *mddev) | |||
2913 | if (disk->rdev) | 2932 | if (disk->rdev) |
2914 | conf->fullsync = 1; | 2933 | conf->fullsync = 1; |
2915 | } | 2934 | } |
2935 | disk->recovery_disabled = mddev->recovery_disabled - 1; | ||
2916 | } | 2936 | } |
2917 | 2937 | ||
2918 | if (mddev->recovery_cp != MaxSector) | 2938 | if (mddev->recovery_cp != MaxSector) |
@@ -2966,9 +2986,9 @@ out: | |||
2966 | return -EIO; | 2986 | return -EIO; |
2967 | } | 2987 | } |
2968 | 2988 | ||
2969 | static int stop(mddev_t *mddev) | 2989 | static int stop(struct mddev *mddev) |
2970 | { | 2990 | { |
2971 | conf_t *conf = mddev->private; | 2991 | struct r10conf *conf = mddev->private; |
2972 | 2992 | ||
2973 | raise_barrier(conf, 0); | 2993 | raise_barrier(conf, 0); |
2974 | lower_barrier(conf); | 2994 | lower_barrier(conf); |
@@ -2983,9 +3003,9 @@ static int stop(mddev_t *mddev) | |||
2983 | return 0; | 3003 | return 0; |
2984 | } | 3004 | } |
2985 | 3005 | ||
2986 | static void raid10_quiesce(mddev_t *mddev, int state) | 3006 | static void raid10_quiesce(struct mddev *mddev, int state) |
2987 | { | 3007 | { |
2988 | conf_t *conf = mddev->private; | 3008 | struct r10conf *conf = mddev->private; |
2989 | 3009 | ||
2990 | switch(state) { | 3010 | switch(state) { |
2991 | case 1: | 3011 | case 1: |
@@ -2997,10 +3017,10 @@ static void raid10_quiesce(mddev_t *mddev, int state) | |||
2997 | } | 3017 | } |
2998 | } | 3018 | } |
2999 | 3019 | ||
3000 | static void *raid10_takeover_raid0(mddev_t *mddev) | 3020 | static void *raid10_takeover_raid0(struct mddev *mddev) |
3001 | { | 3021 | { |
3002 | mdk_rdev_t *rdev; | 3022 | struct md_rdev *rdev; |
3003 | conf_t *conf; | 3023 | struct r10conf *conf; |
3004 | 3024 | ||
3005 | if (mddev->degraded > 0) { | 3025 | if (mddev->degraded > 0) { |
3006 | printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", | 3026 | printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", |
@@ -3029,17 +3049,17 @@ static void *raid10_takeover_raid0(mddev_t *mddev) | |||
3029 | return conf; | 3049 | return conf; |
3030 | } | 3050 | } |
3031 | 3051 | ||
3032 | static void *raid10_takeover(mddev_t *mddev) | 3052 | static void *raid10_takeover(struct mddev *mddev) |
3033 | { | 3053 | { |
3034 | struct raid0_private_data *raid0_priv; | 3054 | struct r0conf *raid0_conf; |
3035 | 3055 | ||
3036 | /* raid10 can take over: | 3056 | /* raid10 can take over: |
3037 | * raid0 - providing it has only two drives | 3057 | * raid0 - providing it has only two drives |
3038 | */ | 3058 | */ |
3039 | if (mddev->level == 0) { | 3059 | if (mddev->level == 0) { |
3040 | /* for raid0 takeover only one zone is supported */ | 3060 | /* for raid0 takeover only one zone is supported */ |
3041 | raid0_priv = mddev->private; | 3061 | raid0_conf = mddev->private; |
3042 | if (raid0_priv->nr_strip_zones > 1) { | 3062 | if (raid0_conf->nr_strip_zones > 1) { |
3043 | printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" | 3063 | printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" |
3044 | " with more than one zone.\n", | 3064 | " with more than one zone.\n", |
3045 | mdname(mddev)); | 3065 | mdname(mddev)); |
@@ -3050,7 +3070,7 @@ static void *raid10_takeover(mddev_t *mddev) | |||
3050 | return ERR_PTR(-EINVAL); | 3070 | return ERR_PTR(-EINVAL); |
3051 | } | 3071 | } |
3052 | 3072 | ||
3053 | static struct mdk_personality raid10_personality = | 3073 | static struct md_personality raid10_personality = |
3054 | { | 3074 | { |
3055 | .name = "raid10", | 3075 | .name = "raid10", |
3056 | .level = 10, | 3076 | .level = 10, |
@@ -3086,3 +3106,5 @@ MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); | |||
3086 | MODULE_ALIAS("md-personality-9"); /* RAID10 */ | 3106 | MODULE_ALIAS("md-personality-9"); /* RAID10 */ |
3087 | MODULE_ALIAS("md-raid10"); | 3107 | MODULE_ALIAS("md-raid10"); |
3088 | MODULE_ALIAS("md-level-10"); | 3108 | MODULE_ALIAS("md-level-10"); |
3109 | |||
3110 | module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); | ||
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 79cb52a0d4a2..7facfdf841f4 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h | |||
@@ -1,10 +1,8 @@ | |||
1 | #ifndef _RAID10_H | 1 | #ifndef _RAID10_H |
2 | #define _RAID10_H | 2 | #define _RAID10_H |
3 | 3 | ||
4 | typedef struct mirror_info mirror_info_t; | ||
5 | |||
6 | struct mirror_info { | 4 | struct mirror_info { |
7 | mdk_rdev_t *rdev; | 5 | struct md_rdev *rdev; |
8 | sector_t head_position; | 6 | sector_t head_position; |
9 | int recovery_disabled; /* matches | 7 | int recovery_disabled; /* matches |
10 | * mddev->recovery_disabled | 8 | * mddev->recovery_disabled |
@@ -13,11 +11,9 @@ struct mirror_info { | |||
13 | */ | 11 | */ |
14 | }; | 12 | }; |
15 | 13 | ||
16 | typedef struct r10bio_s r10bio_t; | 14 | struct r10conf { |
17 | 15 | struct mddev *mddev; | |
18 | struct r10_private_data_s { | 16 | struct mirror_info *mirrors; |
19 | mddev_t *mddev; | ||
20 | mirror_info_t *mirrors; | ||
21 | int raid_disks; | 17 | int raid_disks; |
22 | spinlock_t device_lock; | 18 | spinlock_t device_lock; |
23 | 19 | ||
@@ -46,7 +42,7 @@ struct r10_private_data_s { | |||
46 | struct list_head retry_list; | 42 | struct list_head retry_list; |
47 | /* queue pending writes and submit them on unplug */ | 43 | /* queue pending writes and submit them on unplug */ |
48 | struct bio_list pending_bio_list; | 44 | struct bio_list pending_bio_list; |
49 | 45 | int pending_count; | |
50 | 46 | ||
51 | spinlock_t resync_lock; | 47 | spinlock_t resync_lock; |
52 | int nr_pending; | 48 | int nr_pending; |
@@ -68,11 +64,9 @@ struct r10_private_data_s { | |||
68 | /* When taking over an array from a different personality, we store | 64 | /* When taking over an array from a different personality, we store |
69 | * the new thread here until we fully activate the array. | 65 | * the new thread here until we fully activate the array. |
70 | */ | 66 | */ |
71 | struct mdk_thread_s *thread; | 67 | struct md_thread *thread; |
72 | }; | 68 | }; |
73 | 69 | ||
74 | typedef struct r10_private_data_s conf_t; | ||
75 | |||
76 | /* | 70 | /* |
77 | * this is our 'private' RAID10 bio. | 71 | * this is our 'private' RAID10 bio. |
78 | * | 72 | * |
@@ -80,14 +74,14 @@ typedef struct r10_private_data_s conf_t; | |||
80 | * for this RAID10 operation, and about their status: | 74 | * for this RAID10 operation, and about their status: |
81 | */ | 75 | */ |
82 | 76 | ||
83 | struct r10bio_s { | 77 | struct r10bio { |
84 | atomic_t remaining; /* 'have we finished' count, | 78 | atomic_t remaining; /* 'have we finished' count, |
85 | * used from IRQ handlers | 79 | * used from IRQ handlers |
86 | */ | 80 | */ |
87 | sector_t sector; /* virtual sector number */ | 81 | sector_t sector; /* virtual sector number */ |
88 | int sectors; | 82 | int sectors; |
89 | unsigned long state; | 83 | unsigned long state; |
90 | mddev_t *mddev; | 84 | struct mddev *mddev; |
91 | /* | 85 | /* |
92 | * original bio going to /dev/mdx | 86 | * original bio going to /dev/mdx |
93 | */ | 87 | */ |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ac5e8b57e50f..f6fe053a5bed 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -70,7 +70,11 @@ | |||
70 | #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) | 70 | #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) |
71 | #define HASH_MASK (NR_HASH - 1) | 71 | #define HASH_MASK (NR_HASH - 1) |
72 | 72 | ||
73 | #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) | 73 | static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) |
74 | { | ||
75 | int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; | ||
76 | return &conf->stripe_hashtbl[hash]; | ||
77 | } | ||
74 | 78 | ||
75 | /* bio's attached to a stripe+device for I/O are linked together in bi_sector | 79 | /* bio's attached to a stripe+device for I/O are linked together in bi_sector |
76 | * order without overlap. There may be several bio's per stripe+device, and | 80 | * order without overlap. There may be several bio's per stripe+device, and |
@@ -78,24 +82,17 @@ | |||
78 | * When walking this list for a particular stripe+device, we must never proceed | 82 | * When walking this list for a particular stripe+device, we must never proceed |
79 | * beyond a bio that extends past this device, as the next bio might no longer | 83 | * beyond a bio that extends past this device, as the next bio might no longer |
80 | * be valid. | 84 | * be valid. |
81 | * This macro is used to determine the 'next' bio in the list, given the sector | 85 | * This function is used to determine the 'next' bio in the list, given the sector |
82 | * of the current stripe+device | 86 | * of the current stripe+device |
83 | */ | 87 | */ |
84 | #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) | 88 | static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) |
85 | /* | 89 | { |
86 | * The following can be used to debug the driver | 90 | int sectors = bio->bi_size >> 9; |
87 | */ | 91 | if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) |
88 | #define RAID5_PARANOIA 1 | 92 | return bio->bi_next; |
89 | #if RAID5_PARANOIA && defined(CONFIG_SMP) | 93 | else |
90 | # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) | 94 | return NULL; |
91 | #else | 95 | } |
92 | # define CHECK_DEVLOCK() | ||
93 | #endif | ||
94 | |||
95 | #ifdef DEBUG | ||
96 | #define inline | ||
97 | #define __inline__ | ||
98 | #endif | ||
99 | 96 | ||
100 | /* | 97 | /* |
101 | * We maintain a biased count of active stripes in the bottom 16 bits of | 98 | * We maintain a biased count of active stripes in the bottom 16 bits of |
@@ -183,7 +180,7 @@ static void return_io(struct bio *return_bi) | |||
183 | } | 180 | } |
184 | } | 181 | } |
185 | 182 | ||
186 | static void print_raid5_conf (raid5_conf_t *conf); | 183 | static void print_raid5_conf (struct r5conf *conf); |
187 | 184 | ||
188 | static int stripe_operations_active(struct stripe_head *sh) | 185 | static int stripe_operations_active(struct stripe_head *sh) |
189 | { | 186 | { |
@@ -192,7 +189,7 @@ static int stripe_operations_active(struct stripe_head *sh) | |||
192 | test_bit(STRIPE_COMPUTE_RUN, &sh->state); | 189 | test_bit(STRIPE_COMPUTE_RUN, &sh->state); |
193 | } | 190 | } |
194 | 191 | ||
195 | static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) | 192 | static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) |
196 | { | 193 | { |
197 | if (atomic_dec_and_test(&sh->count)) { | 194 | if (atomic_dec_and_test(&sh->count)) { |
198 | BUG_ON(!list_empty(&sh->lru)); | 195 | BUG_ON(!list_empty(&sh->lru)); |
@@ -228,7 +225,7 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) | |||
228 | 225 | ||
229 | static void release_stripe(struct stripe_head *sh) | 226 | static void release_stripe(struct stripe_head *sh) |
230 | { | 227 | { |
231 | raid5_conf_t *conf = sh->raid_conf; | 228 | struct r5conf *conf = sh->raid_conf; |
232 | unsigned long flags; | 229 | unsigned long flags; |
233 | 230 | ||
234 | spin_lock_irqsave(&conf->device_lock, flags); | 231 | spin_lock_irqsave(&conf->device_lock, flags); |
@@ -244,25 +241,23 @@ static inline void remove_hash(struct stripe_head *sh) | |||
244 | hlist_del_init(&sh->hash); | 241 | hlist_del_init(&sh->hash); |
245 | } | 242 | } |
246 | 243 | ||
247 | static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) | 244 | static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) |
248 | { | 245 | { |
249 | struct hlist_head *hp = stripe_hash(conf, sh->sector); | 246 | struct hlist_head *hp = stripe_hash(conf, sh->sector); |
250 | 247 | ||
251 | pr_debug("insert_hash(), stripe %llu\n", | 248 | pr_debug("insert_hash(), stripe %llu\n", |
252 | (unsigned long long)sh->sector); | 249 | (unsigned long long)sh->sector); |
253 | 250 | ||
254 | CHECK_DEVLOCK(); | ||
255 | hlist_add_head(&sh->hash, hp); | 251 | hlist_add_head(&sh->hash, hp); |
256 | } | 252 | } |
257 | 253 | ||
258 | 254 | ||
259 | /* find an idle stripe, make sure it is unhashed, and return it. */ | 255 | /* find an idle stripe, make sure it is unhashed, and return it. */ |
260 | static struct stripe_head *get_free_stripe(raid5_conf_t *conf) | 256 | static struct stripe_head *get_free_stripe(struct r5conf *conf) |
261 | { | 257 | { |
262 | struct stripe_head *sh = NULL; | 258 | struct stripe_head *sh = NULL; |
263 | struct list_head *first; | 259 | struct list_head *first; |
264 | 260 | ||
265 | CHECK_DEVLOCK(); | ||
266 | if (list_empty(&conf->inactive_list)) | 261 | if (list_empty(&conf->inactive_list)) |
267 | goto out; | 262 | goto out; |
268 | first = conf->inactive_list.next; | 263 | first = conf->inactive_list.next; |
@@ -306,19 +301,18 @@ static int grow_buffers(struct stripe_head *sh) | |||
306 | } | 301 | } |
307 | 302 | ||
308 | static void raid5_build_block(struct stripe_head *sh, int i, int previous); | 303 | static void raid5_build_block(struct stripe_head *sh, int i, int previous); |
309 | static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, | 304 | static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, |
310 | struct stripe_head *sh); | 305 | struct stripe_head *sh); |
311 | 306 | ||
312 | static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) | 307 | static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) |
313 | { | 308 | { |
314 | raid5_conf_t *conf = sh->raid_conf; | 309 | struct r5conf *conf = sh->raid_conf; |
315 | int i; | 310 | int i; |
316 | 311 | ||
317 | BUG_ON(atomic_read(&sh->count) != 0); | 312 | BUG_ON(atomic_read(&sh->count) != 0); |
318 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); | 313 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); |
319 | BUG_ON(stripe_operations_active(sh)); | 314 | BUG_ON(stripe_operations_active(sh)); |
320 | 315 | ||
321 | CHECK_DEVLOCK(); | ||
322 | pr_debug("init_stripe called, stripe %llu\n", | 316 | pr_debug("init_stripe called, stripe %llu\n", |
323 | (unsigned long long)sh->sector); | 317 | (unsigned long long)sh->sector); |
324 | 318 | ||
@@ -348,13 +342,12 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) | |||
348 | insert_hash(conf, sh); | 342 | insert_hash(conf, sh); |
349 | } | 343 | } |
350 | 344 | ||
351 | static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, | 345 | static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, |
352 | short generation) | 346 | short generation) |
353 | { | 347 | { |
354 | struct stripe_head *sh; | 348 | struct stripe_head *sh; |
355 | struct hlist_node *hn; | 349 | struct hlist_node *hn; |
356 | 350 | ||
357 | CHECK_DEVLOCK(); | ||
358 | pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); | 351 | pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); |
359 | hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) | 352 | hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) |
360 | if (sh->sector == sector && sh->generation == generation) | 353 | if (sh->sector == sector && sh->generation == generation) |
@@ -376,7 +369,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, | |||
376 | * of the two sections, and some non-in_sync devices may | 369 | * of the two sections, and some non-in_sync devices may |
377 | * be insync in the section most affected by failed devices. | 370 | * be insync in the section most affected by failed devices. |
378 | */ | 371 | */ |
379 | static int has_failed(raid5_conf_t *conf) | 372 | static int has_failed(struct r5conf *conf) |
380 | { | 373 | { |
381 | int degraded; | 374 | int degraded; |
382 | int i; | 375 | int i; |
@@ -386,7 +379,7 @@ static int has_failed(raid5_conf_t *conf) | |||
386 | rcu_read_lock(); | 379 | rcu_read_lock(); |
387 | degraded = 0; | 380 | degraded = 0; |
388 | for (i = 0; i < conf->previous_raid_disks; i++) { | 381 | for (i = 0; i < conf->previous_raid_disks; i++) { |
389 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | 382 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
390 | if (!rdev || test_bit(Faulty, &rdev->flags)) | 383 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
391 | degraded++; | 384 | degraded++; |
392 | else if (test_bit(In_sync, &rdev->flags)) | 385 | else if (test_bit(In_sync, &rdev->flags)) |
@@ -410,7 +403,7 @@ static int has_failed(raid5_conf_t *conf) | |||
410 | rcu_read_lock(); | 403 | rcu_read_lock(); |
411 | degraded = 0; | 404 | degraded = 0; |
412 | for (i = 0; i < conf->raid_disks; i++) { | 405 | for (i = 0; i < conf->raid_disks; i++) { |
413 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | 406 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
414 | if (!rdev || test_bit(Faulty, &rdev->flags)) | 407 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
415 | degraded++; | 408 | degraded++; |
416 | else if (test_bit(In_sync, &rdev->flags)) | 409 | else if (test_bit(In_sync, &rdev->flags)) |
@@ -431,7 +424,7 @@ static int has_failed(raid5_conf_t *conf) | |||
431 | } | 424 | } |
432 | 425 | ||
433 | static struct stripe_head * | 426 | static struct stripe_head * |
434 | get_active_stripe(raid5_conf_t *conf, sector_t sector, | 427 | get_active_stripe(struct r5conf *conf, sector_t sector, |
435 | int previous, int noblock, int noquiesce) | 428 | int previous, int noblock, int noquiesce) |
436 | { | 429 | { |
437 | struct stripe_head *sh; | 430 | struct stripe_head *sh; |
@@ -491,7 +484,7 @@ raid5_end_write_request(struct bio *bi, int error); | |||
491 | 484 | ||
492 | static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | 485 | static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) |
493 | { | 486 | { |
494 | raid5_conf_t *conf = sh->raid_conf; | 487 | struct r5conf *conf = sh->raid_conf; |
495 | int i, disks = sh->disks; | 488 | int i, disks = sh->disks; |
496 | 489 | ||
497 | might_sleep(); | 490 | might_sleep(); |
@@ -499,7 +492,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
499 | for (i = disks; i--; ) { | 492 | for (i = disks; i--; ) { |
500 | int rw; | 493 | int rw; |
501 | struct bio *bi; | 494 | struct bio *bi; |
502 | mdk_rdev_t *rdev; | 495 | struct md_rdev *rdev; |
503 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { | 496 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { |
504 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) | 497 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) |
505 | rw = WRITE_FUA; | 498 | rw = WRITE_FUA; |
@@ -650,7 +643,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
650 | { | 643 | { |
651 | struct stripe_head *sh = stripe_head_ref; | 644 | struct stripe_head *sh = stripe_head_ref; |
652 | struct bio *return_bi = NULL; | 645 | struct bio *return_bi = NULL; |
653 | raid5_conf_t *conf = sh->raid_conf; | 646 | struct r5conf *conf = sh->raid_conf; |
654 | int i; | 647 | int i; |
655 | 648 | ||
656 | pr_debug("%s: stripe %llu\n", __func__, | 649 | pr_debug("%s: stripe %llu\n", __func__, |
@@ -695,7 +688,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
695 | static void ops_run_biofill(struct stripe_head *sh) | 688 | static void ops_run_biofill(struct stripe_head *sh) |
696 | { | 689 | { |
697 | struct dma_async_tx_descriptor *tx = NULL; | 690 | struct dma_async_tx_descriptor *tx = NULL; |
698 | raid5_conf_t *conf = sh->raid_conf; | 691 | struct r5conf *conf = sh->raid_conf; |
699 | struct async_submit_ctl submit; | 692 | struct async_submit_ctl submit; |
700 | int i; | 693 | int i; |
701 | 694 | ||
@@ -1246,7 +1239,7 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
1246 | { | 1239 | { |
1247 | int overlap_clear = 0, i, disks = sh->disks; | 1240 | int overlap_clear = 0, i, disks = sh->disks; |
1248 | struct dma_async_tx_descriptor *tx = NULL; | 1241 | struct dma_async_tx_descriptor *tx = NULL; |
1249 | raid5_conf_t *conf = sh->raid_conf; | 1242 | struct r5conf *conf = sh->raid_conf; |
1250 | int level = conf->level; | 1243 | int level = conf->level; |
1251 | struct raid5_percpu *percpu; | 1244 | struct raid5_percpu *percpu; |
1252 | unsigned long cpu; | 1245 | unsigned long cpu; |
@@ -1337,7 +1330,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
1337 | #define raid_run_ops __raid_run_ops | 1330 | #define raid_run_ops __raid_run_ops |
1338 | #endif | 1331 | #endif |
1339 | 1332 | ||
1340 | static int grow_one_stripe(raid5_conf_t *conf) | 1333 | static int grow_one_stripe(struct r5conf *conf) |
1341 | { | 1334 | { |
1342 | struct stripe_head *sh; | 1335 | struct stripe_head *sh; |
1343 | sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); | 1336 | sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); |
@@ -1362,7 +1355,7 @@ static int grow_one_stripe(raid5_conf_t *conf) | |||
1362 | return 1; | 1355 | return 1; |
1363 | } | 1356 | } |
1364 | 1357 | ||
1365 | static int grow_stripes(raid5_conf_t *conf, int num) | 1358 | static int grow_stripes(struct r5conf *conf, int num) |
1366 | { | 1359 | { |
1367 | struct kmem_cache *sc; | 1360 | struct kmem_cache *sc; |
1368 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | 1361 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
@@ -1411,7 +1404,7 @@ static size_t scribble_len(int num) | |||
1411 | return len; | 1404 | return len; |
1412 | } | 1405 | } |
1413 | 1406 | ||
1414 | static int resize_stripes(raid5_conf_t *conf, int newsize) | 1407 | static int resize_stripes(struct r5conf *conf, int newsize) |
1415 | { | 1408 | { |
1416 | /* Make all the stripes able to hold 'newsize' devices. | 1409 | /* Make all the stripes able to hold 'newsize' devices. |
1417 | * New slots in each stripe get 'page' set to a new page. | 1410 | * New slots in each stripe get 'page' set to a new page. |
@@ -1556,7 +1549,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1556 | return err; | 1549 | return err; |
1557 | } | 1550 | } |
1558 | 1551 | ||
1559 | static int drop_one_stripe(raid5_conf_t *conf) | 1552 | static int drop_one_stripe(struct r5conf *conf) |
1560 | { | 1553 | { |
1561 | struct stripe_head *sh; | 1554 | struct stripe_head *sh; |
1562 | 1555 | ||
@@ -1572,7 +1565,7 @@ static int drop_one_stripe(raid5_conf_t *conf) | |||
1572 | return 1; | 1565 | return 1; |
1573 | } | 1566 | } |
1574 | 1567 | ||
1575 | static void shrink_stripes(raid5_conf_t *conf) | 1568 | static void shrink_stripes(struct r5conf *conf) |
1576 | { | 1569 | { |
1577 | while (drop_one_stripe(conf)) | 1570 | while (drop_one_stripe(conf)) |
1578 | ; | 1571 | ; |
@@ -1585,11 +1578,11 @@ static void shrink_stripes(raid5_conf_t *conf) | |||
1585 | static void raid5_end_read_request(struct bio * bi, int error) | 1578 | static void raid5_end_read_request(struct bio * bi, int error) |
1586 | { | 1579 | { |
1587 | struct stripe_head *sh = bi->bi_private; | 1580 | struct stripe_head *sh = bi->bi_private; |
1588 | raid5_conf_t *conf = sh->raid_conf; | 1581 | struct r5conf *conf = sh->raid_conf; |
1589 | int disks = sh->disks, i; | 1582 | int disks = sh->disks, i; |
1590 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); | 1583 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
1591 | char b[BDEVNAME_SIZE]; | 1584 | char b[BDEVNAME_SIZE]; |
1592 | mdk_rdev_t *rdev; | 1585 | struct md_rdev *rdev; |
1593 | 1586 | ||
1594 | 1587 | ||
1595 | for (i=0 ; i<disks; i++) | 1588 | for (i=0 ; i<disks; i++) |
@@ -1672,7 +1665,7 @@ static void raid5_end_read_request(struct bio * bi, int error) | |||
1672 | static void raid5_end_write_request(struct bio *bi, int error) | 1665 | static void raid5_end_write_request(struct bio *bi, int error) |
1673 | { | 1666 | { |
1674 | struct stripe_head *sh = bi->bi_private; | 1667 | struct stripe_head *sh = bi->bi_private; |
1675 | raid5_conf_t *conf = sh->raid_conf; | 1668 | struct r5conf *conf = sh->raid_conf; |
1676 | int disks = sh->disks, i; | 1669 | int disks = sh->disks, i; |
1677 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); | 1670 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
1678 | sector_t first_bad; | 1671 | sector_t first_bad; |
@@ -1726,10 +1719,10 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous) | |||
1726 | dev->sector = compute_blocknr(sh, i, previous); | 1719 | dev->sector = compute_blocknr(sh, i, previous); |
1727 | } | 1720 | } |
1728 | 1721 | ||
1729 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1722 | static void error(struct mddev *mddev, struct md_rdev *rdev) |
1730 | { | 1723 | { |
1731 | char b[BDEVNAME_SIZE]; | 1724 | char b[BDEVNAME_SIZE]; |
1732 | raid5_conf_t *conf = mddev->private; | 1725 | struct r5conf *conf = mddev->private; |
1733 | pr_debug("raid456: error called\n"); | 1726 | pr_debug("raid456: error called\n"); |
1734 | 1727 | ||
1735 | if (test_and_clear_bit(In_sync, &rdev->flags)) { | 1728 | if (test_and_clear_bit(In_sync, &rdev->flags)) { |
@@ -1758,7 +1751,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1758 | * Input: a 'big' sector number, | 1751 | * Input: a 'big' sector number, |
1759 | * Output: index of the data and parity disk, and the sector # in them. | 1752 | * Output: index of the data and parity disk, and the sector # in them. |
1760 | */ | 1753 | */ |
1761 | static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | 1754 | static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, |
1762 | int previous, int *dd_idx, | 1755 | int previous, int *dd_idx, |
1763 | struct stripe_head *sh) | 1756 | struct stripe_head *sh) |
1764 | { | 1757 | { |
@@ -1963,7 +1956,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1963 | 1956 | ||
1964 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | 1957 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) |
1965 | { | 1958 | { |
1966 | raid5_conf_t *conf = sh->raid_conf; | 1959 | struct r5conf *conf = sh->raid_conf; |
1967 | int raid_disks = sh->disks; | 1960 | int raid_disks = sh->disks; |
1968 | int data_disks = raid_disks - conf->max_degraded; | 1961 | int data_disks = raid_disks - conf->max_degraded; |
1969 | sector_t new_sector = sh->sector, check; | 1962 | sector_t new_sector = sh->sector, check; |
@@ -2088,7 +2081,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2088 | int rcw, int expand) | 2081 | int rcw, int expand) |
2089 | { | 2082 | { |
2090 | int i, pd_idx = sh->pd_idx, disks = sh->disks; | 2083 | int i, pd_idx = sh->pd_idx, disks = sh->disks; |
2091 | raid5_conf_t *conf = sh->raid_conf; | 2084 | struct r5conf *conf = sh->raid_conf; |
2092 | int level = conf->level; | 2085 | int level = conf->level; |
2093 | 2086 | ||
2094 | if (rcw) { | 2087 | if (rcw) { |
@@ -2173,7 +2166,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2173 | static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) | 2166 | static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) |
2174 | { | 2167 | { |
2175 | struct bio **bip; | 2168 | struct bio **bip; |
2176 | raid5_conf_t *conf = sh->raid_conf; | 2169 | struct r5conf *conf = sh->raid_conf; |
2177 | int firstwrite=0; | 2170 | int firstwrite=0; |
2178 | 2171 | ||
2179 | pr_debug("adding bi b#%llu to stripe s#%llu\n", | 2172 | pr_debug("adding bi b#%llu to stripe s#%llu\n", |
@@ -2235,9 +2228,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
2235 | return 0; | 2228 | return 0; |
2236 | } | 2229 | } |
2237 | 2230 | ||
2238 | static void end_reshape(raid5_conf_t *conf); | 2231 | static void end_reshape(struct r5conf *conf); |
2239 | 2232 | ||
2240 | static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, | 2233 | static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, |
2241 | struct stripe_head *sh) | 2234 | struct stripe_head *sh) |
2242 | { | 2235 | { |
2243 | int sectors_per_chunk = | 2236 | int sectors_per_chunk = |
@@ -2254,7 +2247,7 @@ static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, | |||
2254 | } | 2247 | } |
2255 | 2248 | ||
2256 | static void | 2249 | static void |
2257 | handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | 2250 | handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, |
2258 | struct stripe_head_state *s, int disks, | 2251 | struct stripe_head_state *s, int disks, |
2259 | struct bio **return_bi) | 2252 | struct bio **return_bi) |
2260 | { | 2253 | { |
@@ -2264,7 +2257,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
2264 | int bitmap_end = 0; | 2257 | int bitmap_end = 0; |
2265 | 2258 | ||
2266 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { | 2259 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
2267 | mdk_rdev_t *rdev; | 2260 | struct md_rdev *rdev; |
2268 | rcu_read_lock(); | 2261 | rcu_read_lock(); |
2269 | rdev = rcu_dereference(conf->disks[i].rdev); | 2262 | rdev = rcu_dereference(conf->disks[i].rdev); |
2270 | if (rdev && test_bit(In_sync, &rdev->flags)) | 2263 | if (rdev && test_bit(In_sync, &rdev->flags)) |
@@ -2359,7 +2352,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
2359 | } | 2352 | } |
2360 | 2353 | ||
2361 | static void | 2354 | static void |
2362 | handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh, | 2355 | handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, |
2363 | struct stripe_head_state *s) | 2356 | struct stripe_head_state *s) |
2364 | { | 2357 | { |
2365 | int abort = 0; | 2358 | int abort = 0; |
@@ -2378,7 +2371,7 @@ handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh, | |||
2378 | * refcounting of rdevs is not needed | 2371 | * refcounting of rdevs is not needed |
2379 | */ | 2372 | */ |
2380 | for (i = 0; i < conf->raid_disks; i++) { | 2373 | for (i = 0; i < conf->raid_disks; i++) { |
2381 | mdk_rdev_t *rdev = conf->disks[i].rdev; | 2374 | struct md_rdev *rdev = conf->disks[i].rdev; |
2382 | if (!rdev | 2375 | if (!rdev |
2383 | || test_bit(Faulty, &rdev->flags) | 2376 | || test_bit(Faulty, &rdev->flags) |
2384 | || test_bit(In_sync, &rdev->flags)) | 2377 | || test_bit(In_sync, &rdev->flags)) |
@@ -2508,7 +2501,7 @@ static void handle_stripe_fill(struct stripe_head *sh, | |||
2508 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but | 2501 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but |
2509 | * never LOCKED, so we don't need to test 'failed' directly. | 2502 | * never LOCKED, so we don't need to test 'failed' directly. |
2510 | */ | 2503 | */ |
2511 | static void handle_stripe_clean_event(raid5_conf_t *conf, | 2504 | static void handle_stripe_clean_event(struct r5conf *conf, |
2512 | struct stripe_head *sh, int disks, struct bio **return_bi) | 2505 | struct stripe_head *sh, int disks, struct bio **return_bi) |
2513 | { | 2506 | { |
2514 | int i; | 2507 | int i; |
@@ -2553,7 +2546,7 @@ static void handle_stripe_clean_event(raid5_conf_t *conf, | |||
2553 | md_wakeup_thread(conf->mddev->thread); | 2546 | md_wakeup_thread(conf->mddev->thread); |
2554 | } | 2547 | } |
2555 | 2548 | ||
2556 | static void handle_stripe_dirtying(raid5_conf_t *conf, | 2549 | static void handle_stripe_dirtying(struct r5conf *conf, |
2557 | struct stripe_head *sh, | 2550 | struct stripe_head *sh, |
2558 | struct stripe_head_state *s, | 2551 | struct stripe_head_state *s, |
2559 | int disks) | 2552 | int disks) |
@@ -2655,7 +2648,7 @@ static void handle_stripe_dirtying(raid5_conf_t *conf, | |||
2655 | schedule_reconstruction(sh, s, rcw == 0, 0); | 2648 | schedule_reconstruction(sh, s, rcw == 0, 0); |
2656 | } | 2649 | } |
2657 | 2650 | ||
2658 | static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, | 2651 | static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, |
2659 | struct stripe_head_state *s, int disks) | 2652 | struct stripe_head_state *s, int disks) |
2660 | { | 2653 | { |
2661 | struct r5dev *dev = NULL; | 2654 | struct r5dev *dev = NULL; |
@@ -2743,7 +2736,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, | |||
2743 | } | 2736 | } |
2744 | 2737 | ||
2745 | 2738 | ||
2746 | static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, | 2739 | static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, |
2747 | struct stripe_head_state *s, | 2740 | struct stripe_head_state *s, |
2748 | int disks) | 2741 | int disks) |
2749 | { | 2742 | { |
@@ -2906,7 +2899,7 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, | |||
2906 | } | 2899 | } |
2907 | } | 2900 | } |
2908 | 2901 | ||
2909 | static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh) | 2902 | static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) |
2910 | { | 2903 | { |
2911 | int i; | 2904 | int i; |
2912 | 2905 | ||
@@ -2985,7 +2978,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh) | |||
2985 | 2978 | ||
2986 | static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) | 2979 | static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) |
2987 | { | 2980 | { |
2988 | raid5_conf_t *conf = sh->raid_conf; | 2981 | struct r5conf *conf = sh->raid_conf; |
2989 | int disks = sh->disks; | 2982 | int disks = sh->disks; |
2990 | struct r5dev *dev; | 2983 | struct r5dev *dev; |
2991 | int i; | 2984 | int i; |
@@ -3002,7 +2995,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) | |||
3002 | rcu_read_lock(); | 2995 | rcu_read_lock(); |
3003 | spin_lock_irq(&conf->device_lock); | 2996 | spin_lock_irq(&conf->device_lock); |
3004 | for (i=disks; i--; ) { | 2997 | for (i=disks; i--; ) { |
3005 | mdk_rdev_t *rdev; | 2998 | struct md_rdev *rdev; |
3006 | sector_t first_bad; | 2999 | sector_t first_bad; |
3007 | int bad_sectors; | 3000 | int bad_sectors; |
3008 | int is_bad = 0; | 3001 | int is_bad = 0; |
@@ -3069,7 +3062,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) | |||
3069 | } | 3062 | } |
3070 | } else if (test_bit(In_sync, &rdev->flags)) | 3063 | } else if (test_bit(In_sync, &rdev->flags)) |
3071 | set_bit(R5_Insync, &dev->flags); | 3064 | set_bit(R5_Insync, &dev->flags); |
3072 | else { | 3065 | else if (!test_bit(Faulty, &rdev->flags)) { |
3073 | /* in sync if before recovery_offset */ | 3066 | /* in sync if before recovery_offset */ |
3074 | if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) | 3067 | if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) |
3075 | set_bit(R5_Insync, &dev->flags); | 3068 | set_bit(R5_Insync, &dev->flags); |
@@ -3109,7 +3102,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) | |||
3109 | static void handle_stripe(struct stripe_head *sh) | 3102 | static void handle_stripe(struct stripe_head *sh) |
3110 | { | 3103 | { |
3111 | struct stripe_head_state s; | 3104 | struct stripe_head_state s; |
3112 | raid5_conf_t *conf = sh->raid_conf; | 3105 | struct r5conf *conf = sh->raid_conf; |
3113 | int i; | 3106 | int i; |
3114 | int prexor; | 3107 | int prexor; |
3115 | int disks = sh->disks; | 3108 | int disks = sh->disks; |
@@ -3341,7 +3334,7 @@ finish: | |||
3341 | 3334 | ||
3342 | if (s.handle_bad_blocks) | 3335 | if (s.handle_bad_blocks) |
3343 | for (i = disks; i--; ) { | 3336 | for (i = disks; i--; ) { |
3344 | mdk_rdev_t *rdev; | 3337 | struct md_rdev *rdev; |
3345 | struct r5dev *dev = &sh->dev[i]; | 3338 | struct r5dev *dev = &sh->dev[i]; |
3346 | if (test_and_clear_bit(R5_WriteError, &dev->flags)) { | 3339 | if (test_and_clear_bit(R5_WriteError, &dev->flags)) { |
3347 | /* We own a safe reference to the rdev */ | 3340 | /* We own a safe reference to the rdev */ |
@@ -3380,7 +3373,7 @@ finish: | |||
3380 | clear_bit(STRIPE_ACTIVE, &sh->state); | 3373 | clear_bit(STRIPE_ACTIVE, &sh->state); |
3381 | } | 3374 | } |
3382 | 3375 | ||
3383 | static void raid5_activate_delayed(raid5_conf_t *conf) | 3376 | static void raid5_activate_delayed(struct r5conf *conf) |
3384 | { | 3377 | { |
3385 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { | 3378 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { |
3386 | while (!list_empty(&conf->delayed_list)) { | 3379 | while (!list_empty(&conf->delayed_list)) { |
@@ -3396,7 +3389,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf) | |||
3396 | } | 3389 | } |
3397 | } | 3390 | } |
3398 | 3391 | ||
3399 | static void activate_bit_delay(raid5_conf_t *conf) | 3392 | static void activate_bit_delay(struct r5conf *conf) |
3400 | { | 3393 | { |
3401 | /* device_lock is held */ | 3394 | /* device_lock is held */ |
3402 | struct list_head head; | 3395 | struct list_head head; |
@@ -3410,9 +3403,9 @@ static void activate_bit_delay(raid5_conf_t *conf) | |||
3410 | } | 3403 | } |
3411 | } | 3404 | } |
3412 | 3405 | ||
3413 | int md_raid5_congested(mddev_t *mddev, int bits) | 3406 | int md_raid5_congested(struct mddev *mddev, int bits) |
3414 | { | 3407 | { |
3415 | raid5_conf_t *conf = mddev->private; | 3408 | struct r5conf *conf = mddev->private; |
3416 | 3409 | ||
3417 | /* No difference between reads and writes. Just check | 3410 | /* No difference between reads and writes. Just check |
3418 | * how busy the stripe_cache is | 3411 | * how busy the stripe_cache is |
@@ -3431,7 +3424,7 @@ EXPORT_SYMBOL_GPL(md_raid5_congested); | |||
3431 | 3424 | ||
3432 | static int raid5_congested(void *data, int bits) | 3425 | static int raid5_congested(void *data, int bits) |
3433 | { | 3426 | { |
3434 | mddev_t *mddev = data; | 3427 | struct mddev *mddev = data; |
3435 | 3428 | ||
3436 | return mddev_congested(mddev, bits) || | 3429 | return mddev_congested(mddev, bits) || |
3437 | md_raid5_congested(mddev, bits); | 3430 | md_raid5_congested(mddev, bits); |
@@ -3444,7 +3437,7 @@ static int raid5_mergeable_bvec(struct request_queue *q, | |||
3444 | struct bvec_merge_data *bvm, | 3437 | struct bvec_merge_data *bvm, |
3445 | struct bio_vec *biovec) | 3438 | struct bio_vec *biovec) |
3446 | { | 3439 | { |
3447 | mddev_t *mddev = q->queuedata; | 3440 | struct mddev *mddev = q->queuedata; |
3448 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | 3441 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
3449 | int max; | 3442 | int max; |
3450 | unsigned int chunk_sectors = mddev->chunk_sectors; | 3443 | unsigned int chunk_sectors = mddev->chunk_sectors; |
@@ -3464,7 +3457,7 @@ static int raid5_mergeable_bvec(struct request_queue *q, | |||
3464 | } | 3457 | } |
3465 | 3458 | ||
3466 | 3459 | ||
3467 | static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) | 3460 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) |
3468 | { | 3461 | { |
3469 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); | 3462 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); |
3470 | unsigned int chunk_sectors = mddev->chunk_sectors; | 3463 | unsigned int chunk_sectors = mddev->chunk_sectors; |
@@ -3480,7 +3473,7 @@ static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) | |||
3480 | * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) | 3473 | * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) |
3481 | * later sampled by raid5d. | 3474 | * later sampled by raid5d. |
3482 | */ | 3475 | */ |
3483 | static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) | 3476 | static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) |
3484 | { | 3477 | { |
3485 | unsigned long flags; | 3478 | unsigned long flags; |
3486 | 3479 | ||
@@ -3494,7 +3487,7 @@ static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) | |||
3494 | } | 3487 | } |
3495 | 3488 | ||
3496 | 3489 | ||
3497 | static struct bio *remove_bio_from_retry(raid5_conf_t *conf) | 3490 | static struct bio *remove_bio_from_retry(struct r5conf *conf) |
3498 | { | 3491 | { |
3499 | struct bio *bi; | 3492 | struct bio *bi; |
3500 | 3493 | ||
@@ -3527,10 +3520,10 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf) | |||
3527 | static void raid5_align_endio(struct bio *bi, int error) | 3520 | static void raid5_align_endio(struct bio *bi, int error) |
3528 | { | 3521 | { |
3529 | struct bio* raid_bi = bi->bi_private; | 3522 | struct bio* raid_bi = bi->bi_private; |
3530 | mddev_t *mddev; | 3523 | struct mddev *mddev; |
3531 | raid5_conf_t *conf; | 3524 | struct r5conf *conf; |
3532 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); | 3525 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
3533 | mdk_rdev_t *rdev; | 3526 | struct md_rdev *rdev; |
3534 | 3527 | ||
3535 | bio_put(bi); | 3528 | bio_put(bi); |
3536 | 3529 | ||
@@ -3574,12 +3567,12 @@ static int bio_fits_rdev(struct bio *bi) | |||
3574 | } | 3567 | } |
3575 | 3568 | ||
3576 | 3569 | ||
3577 | static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) | 3570 | static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) |
3578 | { | 3571 | { |
3579 | raid5_conf_t *conf = mddev->private; | 3572 | struct r5conf *conf = mddev->private; |
3580 | int dd_idx; | 3573 | int dd_idx; |
3581 | struct bio* align_bi; | 3574 | struct bio* align_bi; |
3582 | mdk_rdev_t *rdev; | 3575 | struct md_rdev *rdev; |
3583 | 3576 | ||
3584 | if (!in_chunk_boundary(mddev, raid_bio)) { | 3577 | if (!in_chunk_boundary(mddev, raid_bio)) { |
3585 | pr_debug("chunk_aligned_read : non aligned\n"); | 3578 | pr_debug("chunk_aligned_read : non aligned\n"); |
@@ -3652,7 +3645,7 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) | |||
3652 | * head of the hold_list has changed, i.e. the head was promoted to the | 3645 | * head of the hold_list has changed, i.e. the head was promoted to the |
3653 | * handle_list. | 3646 | * handle_list. |
3654 | */ | 3647 | */ |
3655 | static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) | 3648 | static struct stripe_head *__get_priority_stripe(struct r5conf *conf) |
3656 | { | 3649 | { |
3657 | struct stripe_head *sh; | 3650 | struct stripe_head *sh; |
3658 | 3651 | ||
@@ -3695,9 +3688,9 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) | |||
3695 | return sh; | 3688 | return sh; |
3696 | } | 3689 | } |
3697 | 3690 | ||
3698 | static int make_request(mddev_t *mddev, struct bio * bi) | 3691 | static int make_request(struct mddev *mddev, struct bio * bi) |
3699 | { | 3692 | { |
3700 | raid5_conf_t *conf = mddev->private; | 3693 | struct r5conf *conf = mddev->private; |
3701 | int dd_idx; | 3694 | int dd_idx; |
3702 | sector_t new_sector; | 3695 | sector_t new_sector; |
3703 | sector_t logical_sector, last_sector; | 3696 | sector_t logical_sector, last_sector; |
@@ -3855,9 +3848,9 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
3855 | return 0; | 3848 | return 0; |
3856 | } | 3849 | } |
3857 | 3850 | ||
3858 | static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); | 3851 | static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); |
3859 | 3852 | ||
3860 | static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) | 3853 | static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) |
3861 | { | 3854 | { |
3862 | /* reshaping is quite different to recovery/resync so it is | 3855 | /* reshaping is quite different to recovery/resync so it is |
3863 | * handled quite separately ... here. | 3856 | * handled quite separately ... here. |
@@ -3868,7 +3861,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3868 | * As the reads complete, handle_stripe will copy the data | 3861 | * As the reads complete, handle_stripe will copy the data |
3869 | * into the destination stripe and release that stripe. | 3862 | * into the destination stripe and release that stripe. |
3870 | */ | 3863 | */ |
3871 | raid5_conf_t *conf = mddev->private; | 3864 | struct r5conf *conf = mddev->private; |
3872 | struct stripe_head *sh; | 3865 | struct stripe_head *sh; |
3873 | sector_t first_sector, last_sector; | 3866 | sector_t first_sector, last_sector; |
3874 | int raid_disks = conf->previous_raid_disks; | 3867 | int raid_disks = conf->previous_raid_disks; |
@@ -4075,9 +4068,9 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
4075 | } | 4068 | } |
4076 | 4069 | ||
4077 | /* FIXME go_faster isn't used */ | 4070 | /* FIXME go_faster isn't used */ |
4078 | static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) | 4071 | static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) |
4079 | { | 4072 | { |
4080 | raid5_conf_t *conf = mddev->private; | 4073 | struct r5conf *conf = mddev->private; |
4081 | struct stripe_head *sh; | 4074 | struct stripe_head *sh; |
4082 | sector_t max_sector = mddev->dev_sectors; | 4075 | sector_t max_sector = mddev->dev_sectors; |
4083 | sector_t sync_blocks; | 4076 | sector_t sync_blocks; |
@@ -4162,7 +4155,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4162 | return STRIPE_SECTORS; | 4155 | return STRIPE_SECTORS; |
4163 | } | 4156 | } |
4164 | 4157 | ||
4165 | static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | 4158 | static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) |
4166 | { | 4159 | { |
4167 | /* We may not be able to submit a whole bio at once as there | 4160 | /* We may not be able to submit a whole bio at once as there |
4168 | * may not be enough stripe_heads available. | 4161 | * may not be enough stripe_heads available. |
@@ -4234,10 +4227,10 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
4234 | * During the scan, completed stripes are saved for us by the interrupt | 4227 | * During the scan, completed stripes are saved for us by the interrupt |
4235 | * handler, so that they will not have to wait for our next wakeup. | 4228 | * handler, so that they will not have to wait for our next wakeup. |
4236 | */ | 4229 | */ |
4237 | static void raid5d(mddev_t *mddev) | 4230 | static void raid5d(struct mddev *mddev) |
4238 | { | 4231 | { |
4239 | struct stripe_head *sh; | 4232 | struct stripe_head *sh; |
4240 | raid5_conf_t *conf = mddev->private; | 4233 | struct r5conf *conf = mddev->private; |
4241 | int handled; | 4234 | int handled; |
4242 | struct blk_plug plug; | 4235 | struct blk_plug plug; |
4243 | 4236 | ||
@@ -4301,9 +4294,9 @@ static void raid5d(mddev_t *mddev) | |||
4301 | } | 4294 | } |
4302 | 4295 | ||
4303 | static ssize_t | 4296 | static ssize_t |
4304 | raid5_show_stripe_cache_size(mddev_t *mddev, char *page) | 4297 | raid5_show_stripe_cache_size(struct mddev *mddev, char *page) |
4305 | { | 4298 | { |
4306 | raid5_conf_t *conf = mddev->private; | 4299 | struct r5conf *conf = mddev->private; |
4307 | if (conf) | 4300 | if (conf) |
4308 | return sprintf(page, "%d\n", conf->max_nr_stripes); | 4301 | return sprintf(page, "%d\n", conf->max_nr_stripes); |
4309 | else | 4302 | else |
@@ -4311,9 +4304,9 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page) | |||
4311 | } | 4304 | } |
4312 | 4305 | ||
4313 | int | 4306 | int |
4314 | raid5_set_cache_size(mddev_t *mddev, int size) | 4307 | raid5_set_cache_size(struct mddev *mddev, int size) |
4315 | { | 4308 | { |
4316 | raid5_conf_t *conf = mddev->private; | 4309 | struct r5conf *conf = mddev->private; |
4317 | int err; | 4310 | int err; |
4318 | 4311 | ||
4319 | if (size <= 16 || size > 32768) | 4312 | if (size <= 16 || size > 32768) |
@@ -4337,9 +4330,9 @@ raid5_set_cache_size(mddev_t *mddev, int size) | |||
4337 | EXPORT_SYMBOL(raid5_set_cache_size); | 4330 | EXPORT_SYMBOL(raid5_set_cache_size); |
4338 | 4331 | ||
4339 | static ssize_t | 4332 | static ssize_t |
4340 | raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) | 4333 | raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) |
4341 | { | 4334 | { |
4342 | raid5_conf_t *conf = mddev->private; | 4335 | struct r5conf *conf = mddev->private; |
4343 | unsigned long new; | 4336 | unsigned long new; |
4344 | int err; | 4337 | int err; |
4345 | 4338 | ||
@@ -4362,9 +4355,9 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, | |||
4362 | raid5_store_stripe_cache_size); | 4355 | raid5_store_stripe_cache_size); |
4363 | 4356 | ||
4364 | static ssize_t | 4357 | static ssize_t |
4365 | raid5_show_preread_threshold(mddev_t *mddev, char *page) | 4358 | raid5_show_preread_threshold(struct mddev *mddev, char *page) |
4366 | { | 4359 | { |
4367 | raid5_conf_t *conf = mddev->private; | 4360 | struct r5conf *conf = mddev->private; |
4368 | if (conf) | 4361 | if (conf) |
4369 | return sprintf(page, "%d\n", conf->bypass_threshold); | 4362 | return sprintf(page, "%d\n", conf->bypass_threshold); |
4370 | else | 4363 | else |
@@ -4372,9 +4365,9 @@ raid5_show_preread_threshold(mddev_t *mddev, char *page) | |||
4372 | } | 4365 | } |
4373 | 4366 | ||
4374 | static ssize_t | 4367 | static ssize_t |
4375 | raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) | 4368 | raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) |
4376 | { | 4369 | { |
4377 | raid5_conf_t *conf = mddev->private; | 4370 | struct r5conf *conf = mddev->private; |
4378 | unsigned long new; | 4371 | unsigned long new; |
4379 | if (len >= PAGE_SIZE) | 4372 | if (len >= PAGE_SIZE) |
4380 | return -EINVAL; | 4373 | return -EINVAL; |
@@ -4396,9 +4389,9 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, | |||
4396 | raid5_store_preread_threshold); | 4389 | raid5_store_preread_threshold); |
4397 | 4390 | ||
4398 | static ssize_t | 4391 | static ssize_t |
4399 | stripe_cache_active_show(mddev_t *mddev, char *page) | 4392 | stripe_cache_active_show(struct mddev *mddev, char *page) |
4400 | { | 4393 | { |
4401 | raid5_conf_t *conf = mddev->private; | 4394 | struct r5conf *conf = mddev->private; |
4402 | if (conf) | 4395 | if (conf) |
4403 | return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); | 4396 | return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); |
4404 | else | 4397 | else |
@@ -4420,9 +4413,9 @@ static struct attribute_group raid5_attrs_group = { | |||
4420 | }; | 4413 | }; |
4421 | 4414 | ||
4422 | static sector_t | 4415 | static sector_t |
4423 | raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 4416 | raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
4424 | { | 4417 | { |
4425 | raid5_conf_t *conf = mddev->private; | 4418 | struct r5conf *conf = mddev->private; |
4426 | 4419 | ||
4427 | if (!sectors) | 4420 | if (!sectors) |
4428 | sectors = mddev->dev_sectors; | 4421 | sectors = mddev->dev_sectors; |
@@ -4435,7 +4428,7 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
4435 | return sectors * (raid_disks - conf->max_degraded); | 4428 | return sectors * (raid_disks - conf->max_degraded); |
4436 | } | 4429 | } |
4437 | 4430 | ||
4438 | static void raid5_free_percpu(raid5_conf_t *conf) | 4431 | static void raid5_free_percpu(struct r5conf *conf) |
4439 | { | 4432 | { |
4440 | struct raid5_percpu *percpu; | 4433 | struct raid5_percpu *percpu; |
4441 | unsigned long cpu; | 4434 | unsigned long cpu; |
@@ -4457,7 +4450,7 @@ static void raid5_free_percpu(raid5_conf_t *conf) | |||
4457 | free_percpu(conf->percpu); | 4450 | free_percpu(conf->percpu); |
4458 | } | 4451 | } |
4459 | 4452 | ||
4460 | static void free_conf(raid5_conf_t *conf) | 4453 | static void free_conf(struct r5conf *conf) |
4461 | { | 4454 | { |
4462 | shrink_stripes(conf); | 4455 | shrink_stripes(conf); |
4463 | raid5_free_percpu(conf); | 4456 | raid5_free_percpu(conf); |
@@ -4470,7 +4463,7 @@ static void free_conf(raid5_conf_t *conf) | |||
4470 | static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | 4463 | static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, |
4471 | void *hcpu) | 4464 | void *hcpu) |
4472 | { | 4465 | { |
4473 | raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify); | 4466 | struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); |
4474 | long cpu = (long)hcpu; | 4467 | long cpu = (long)hcpu; |
4475 | struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); | 4468 | struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); |
4476 | 4469 | ||
@@ -4505,7 +4498,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | |||
4505 | } | 4498 | } |
4506 | #endif | 4499 | #endif |
4507 | 4500 | ||
4508 | static int raid5_alloc_percpu(raid5_conf_t *conf) | 4501 | static int raid5_alloc_percpu(struct r5conf *conf) |
4509 | { | 4502 | { |
4510 | unsigned long cpu; | 4503 | unsigned long cpu; |
4511 | struct page *spare_page; | 4504 | struct page *spare_page; |
@@ -4547,11 +4540,11 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) | |||
4547 | return err; | 4540 | return err; |
4548 | } | 4541 | } |
4549 | 4542 | ||
4550 | static raid5_conf_t *setup_conf(mddev_t *mddev) | 4543 | static struct r5conf *setup_conf(struct mddev *mddev) |
4551 | { | 4544 | { |
4552 | raid5_conf_t *conf; | 4545 | struct r5conf *conf; |
4553 | int raid_disk, memory, max_disks; | 4546 | int raid_disk, memory, max_disks; |
4554 | mdk_rdev_t *rdev; | 4547 | struct md_rdev *rdev; |
4555 | struct disk_info *disk; | 4548 | struct disk_info *disk; |
4556 | 4549 | ||
4557 | if (mddev->new_level != 5 | 4550 | if (mddev->new_level != 5 |
@@ -4583,7 +4576,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4583 | return ERR_PTR(-EINVAL); | 4576 | return ERR_PTR(-EINVAL); |
4584 | } | 4577 | } |
4585 | 4578 | ||
4586 | conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); | 4579 | conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); |
4587 | if (conf == NULL) | 4580 | if (conf == NULL) |
4588 | goto abort; | 4581 | goto abort; |
4589 | spin_lock_init(&conf->device_lock); | 4582 | spin_lock_init(&conf->device_lock); |
@@ -4598,6 +4591,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4598 | atomic_set(&conf->preread_active_stripes, 0); | 4591 | atomic_set(&conf->preread_active_stripes, 0); |
4599 | atomic_set(&conf->active_aligned_reads, 0); | 4592 | atomic_set(&conf->active_aligned_reads, 0); |
4600 | conf->bypass_threshold = BYPASS_THRESHOLD; | 4593 | conf->bypass_threshold = BYPASS_THRESHOLD; |
4594 | conf->recovery_disabled = mddev->recovery_disabled - 1; | ||
4601 | 4595 | ||
4602 | conf->raid_disks = mddev->raid_disks; | 4596 | conf->raid_disks = mddev->raid_disks; |
4603 | if (mddev->reshape_position == MaxSector) | 4597 | if (mddev->reshape_position == MaxSector) |
@@ -4712,12 +4706,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded | |||
4712 | return 0; | 4706 | return 0; |
4713 | } | 4707 | } |
4714 | 4708 | ||
4715 | static int run(mddev_t *mddev) | 4709 | static int run(struct mddev *mddev) |
4716 | { | 4710 | { |
4717 | raid5_conf_t *conf; | 4711 | struct r5conf *conf; |
4718 | int working_disks = 0; | 4712 | int working_disks = 0; |
4719 | int dirty_parity_disks = 0; | 4713 | int dirty_parity_disks = 0; |
4720 | mdk_rdev_t *rdev; | 4714 | struct md_rdev *rdev; |
4721 | sector_t reshape_offset = 0; | 4715 | sector_t reshape_offset = 0; |
4722 | 4716 | ||
4723 | if (mddev->recovery_cp != MaxSector) | 4717 | if (mddev->recovery_cp != MaxSector) |
@@ -4942,18 +4936,16 @@ static int run(mddev_t *mddev) | |||
4942 | return 0; | 4936 | return 0; |
4943 | abort: | 4937 | abort: |
4944 | md_unregister_thread(&mddev->thread); | 4938 | md_unregister_thread(&mddev->thread); |
4945 | if (conf) { | 4939 | print_raid5_conf(conf); |
4946 | print_raid5_conf(conf); | 4940 | free_conf(conf); |
4947 | free_conf(conf); | ||
4948 | } | ||
4949 | mddev->private = NULL; | 4941 | mddev->private = NULL; |
4950 | printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); | 4942 | printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); |
4951 | return -EIO; | 4943 | return -EIO; |
4952 | } | 4944 | } |
4953 | 4945 | ||
4954 | static int stop(mddev_t *mddev) | 4946 | static int stop(struct mddev *mddev) |
4955 | { | 4947 | { |
4956 | raid5_conf_t *conf = mddev->private; | 4948 | struct r5conf *conf = mddev->private; |
4957 | 4949 | ||
4958 | md_unregister_thread(&mddev->thread); | 4950 | md_unregister_thread(&mddev->thread); |
4959 | if (mddev->queue) | 4951 | if (mddev->queue) |
@@ -4964,44 +4956,9 @@ static int stop(mddev_t *mddev) | |||
4964 | return 0; | 4956 | return 0; |
4965 | } | 4957 | } |
4966 | 4958 | ||
4967 | #ifdef DEBUG | 4959 | static void status(struct seq_file *seq, struct mddev *mddev) |
4968 | static void print_sh(struct seq_file *seq, struct stripe_head *sh) | ||
4969 | { | ||
4970 | int i; | ||
4971 | |||
4972 | seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", | ||
4973 | (unsigned long long)sh->sector, sh->pd_idx, sh->state); | ||
4974 | seq_printf(seq, "sh %llu, count %d.\n", | ||
4975 | (unsigned long long)sh->sector, atomic_read(&sh->count)); | ||
4976 | seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); | ||
4977 | for (i = 0; i < sh->disks; i++) { | ||
4978 | seq_printf(seq, "(cache%d: %p %ld) ", | ||
4979 | i, sh->dev[i].page, sh->dev[i].flags); | ||
4980 | } | ||
4981 | seq_printf(seq, "\n"); | ||
4982 | } | ||
4983 | |||
4984 | static void printall(struct seq_file *seq, raid5_conf_t *conf) | ||
4985 | { | 4960 | { |
4986 | struct stripe_head *sh; | 4961 | struct r5conf *conf = mddev->private; |
4987 | struct hlist_node *hn; | ||
4988 | int i; | ||
4989 | |||
4990 | spin_lock_irq(&conf->device_lock); | ||
4991 | for (i = 0; i < NR_HASH; i++) { | ||
4992 | hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { | ||
4993 | if (sh->raid_conf != conf) | ||
4994 | continue; | ||
4995 | print_sh(seq, sh); | ||
4996 | } | ||
4997 | } | ||
4998 | spin_unlock_irq(&conf->device_lock); | ||
4999 | } | ||
5000 | #endif | ||
5001 | |||
5002 | static void status(struct seq_file *seq, mddev_t *mddev) | ||
5003 | { | ||
5004 | raid5_conf_t *conf = mddev->private; | ||
5005 | int i; | 4962 | int i; |
5006 | 4963 | ||
5007 | seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, | 4964 | seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, |
@@ -5012,13 +4969,9 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
5012 | conf->disks[i].rdev && | 4969 | conf->disks[i].rdev && |
5013 | test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); | 4970 | test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); |
5014 | seq_printf (seq, "]"); | 4971 | seq_printf (seq, "]"); |
5015 | #ifdef DEBUG | ||
5016 | seq_printf (seq, "\n"); | ||
5017 | printall(seq, conf); | ||
5018 | #endif | ||
5019 | } | 4972 | } |
5020 | 4973 | ||
5021 | static void print_raid5_conf (raid5_conf_t *conf) | 4974 | static void print_raid5_conf (struct r5conf *conf) |
5022 | { | 4975 | { |
5023 | int i; | 4976 | int i; |
5024 | struct disk_info *tmp; | 4977 | struct disk_info *tmp; |
@@ -5042,10 +4995,10 @@ static void print_raid5_conf (raid5_conf_t *conf) | |||
5042 | } | 4995 | } |
5043 | } | 4996 | } |
5044 | 4997 | ||
5045 | static int raid5_spare_active(mddev_t *mddev) | 4998 | static int raid5_spare_active(struct mddev *mddev) |
5046 | { | 4999 | { |
5047 | int i; | 5000 | int i; |
5048 | raid5_conf_t *conf = mddev->private; | 5001 | struct r5conf *conf = mddev->private; |
5049 | struct disk_info *tmp; | 5002 | struct disk_info *tmp; |
5050 | int count = 0; | 5003 | int count = 0; |
5051 | unsigned long flags; | 5004 | unsigned long flags; |
@@ -5067,11 +5020,11 @@ static int raid5_spare_active(mddev_t *mddev) | |||
5067 | return count; | 5020 | return count; |
5068 | } | 5021 | } |
5069 | 5022 | ||
5070 | static int raid5_remove_disk(mddev_t *mddev, int number) | 5023 | static int raid5_remove_disk(struct mddev *mddev, int number) |
5071 | { | 5024 | { |
5072 | raid5_conf_t *conf = mddev->private; | 5025 | struct r5conf *conf = mddev->private; |
5073 | int err = 0; | 5026 | int err = 0; |
5074 | mdk_rdev_t *rdev; | 5027 | struct md_rdev *rdev; |
5075 | struct disk_info *p = conf->disks + number; | 5028 | struct disk_info *p = conf->disks + number; |
5076 | 5029 | ||
5077 | print_raid5_conf(conf); | 5030 | print_raid5_conf(conf); |
@@ -5110,9 +5063,9 @@ abort: | |||
5110 | return err; | 5063 | return err; |
5111 | } | 5064 | } |
5112 | 5065 | ||
5113 | static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | 5066 | static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
5114 | { | 5067 | { |
5115 | raid5_conf_t *conf = mddev->private; | 5068 | struct r5conf *conf = mddev->private; |
5116 | int err = -EEXIST; | 5069 | int err = -EEXIST; |
5117 | int disk; | 5070 | int disk; |
5118 | struct disk_info *p; | 5071 | struct disk_info *p; |
@@ -5153,7 +5106,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
5153 | return err; | 5106 | return err; |
5154 | } | 5107 | } |
5155 | 5108 | ||
5156 | static int raid5_resize(mddev_t *mddev, sector_t sectors) | 5109 | static int raid5_resize(struct mddev *mddev, sector_t sectors) |
5157 | { | 5110 | { |
5158 | /* no resync is happening, and there is enough space | 5111 | /* no resync is happening, and there is enough space |
5159 | * on all devices, so we can resize. | 5112 | * on all devices, so we can resize. |
@@ -5180,7 +5133,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) | |||
5180 | return 0; | 5133 | return 0; |
5181 | } | 5134 | } |
5182 | 5135 | ||
5183 | static int check_stripe_cache(mddev_t *mddev) | 5136 | static int check_stripe_cache(struct mddev *mddev) |
5184 | { | 5137 | { |
5185 | /* Can only proceed if there are plenty of stripe_heads. | 5138 | /* Can only proceed if there are plenty of stripe_heads. |
5186 | * We need a minimum of one full stripe,, and for sensible progress | 5139 | * We need a minimum of one full stripe,, and for sensible progress |
@@ -5190,7 +5143,7 @@ static int check_stripe_cache(mddev_t *mddev) | |||
5190 | * If the chunk size is greater, user-space should request more | 5143 | * If the chunk size is greater, user-space should request more |
5191 | * stripe_heads first. | 5144 | * stripe_heads first. |
5192 | */ | 5145 | */ |
5193 | raid5_conf_t *conf = mddev->private; | 5146 | struct r5conf *conf = mddev->private; |
5194 | if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 | 5147 | if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 |
5195 | > conf->max_nr_stripes || | 5148 | > conf->max_nr_stripes || |
5196 | ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 | 5149 | ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 |
@@ -5204,9 +5157,9 @@ static int check_stripe_cache(mddev_t *mddev) | |||
5204 | return 1; | 5157 | return 1; |
5205 | } | 5158 | } |
5206 | 5159 | ||
5207 | static int check_reshape(mddev_t *mddev) | 5160 | static int check_reshape(struct mddev *mddev) |
5208 | { | 5161 | { |
5209 | raid5_conf_t *conf = mddev->private; | 5162 | struct r5conf *conf = mddev->private; |
5210 | 5163 | ||
5211 | if (mddev->delta_disks == 0 && | 5164 | if (mddev->delta_disks == 0 && |
5212 | mddev->new_layout == mddev->layout && | 5165 | mddev->new_layout == mddev->layout && |
@@ -5236,10 +5189,10 @@ static int check_reshape(mddev_t *mddev) | |||
5236 | return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); | 5189 | return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); |
5237 | } | 5190 | } |
5238 | 5191 | ||
5239 | static int raid5_start_reshape(mddev_t *mddev) | 5192 | static int raid5_start_reshape(struct mddev *mddev) |
5240 | { | 5193 | { |
5241 | raid5_conf_t *conf = mddev->private; | 5194 | struct r5conf *conf = mddev->private; |
5242 | mdk_rdev_t *rdev; | 5195 | struct md_rdev *rdev; |
5243 | int spares = 0; | 5196 | int spares = 0; |
5244 | unsigned long flags; | 5197 | unsigned long flags; |
5245 | 5198 | ||
@@ -5353,7 +5306,7 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
5353 | /* This is called from the reshape thread and should make any | 5306 | /* This is called from the reshape thread and should make any |
5354 | * changes needed in 'conf' | 5307 | * changes needed in 'conf' |
5355 | */ | 5308 | */ |
5356 | static void end_reshape(raid5_conf_t *conf) | 5309 | static void end_reshape(struct r5conf *conf) |
5357 | { | 5310 | { |
5358 | 5311 | ||
5359 | if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { | 5312 | if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { |
@@ -5380,9 +5333,9 @@ static void end_reshape(raid5_conf_t *conf) | |||
5380 | /* This is called from the raid5d thread with mddev_lock held. | 5333 | /* This is called from the raid5d thread with mddev_lock held. |
5381 | * It makes config changes to the device. | 5334 | * It makes config changes to the device. |
5382 | */ | 5335 | */ |
5383 | static void raid5_finish_reshape(mddev_t *mddev) | 5336 | static void raid5_finish_reshape(struct mddev *mddev) |
5384 | { | 5337 | { |
5385 | raid5_conf_t *conf = mddev->private; | 5338 | struct r5conf *conf = mddev->private; |
5386 | 5339 | ||
5387 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 5340 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
5388 | 5341 | ||
@@ -5401,7 +5354,7 @@ static void raid5_finish_reshape(mddev_t *mddev) | |||
5401 | for (d = conf->raid_disks ; | 5354 | for (d = conf->raid_disks ; |
5402 | d < conf->raid_disks - mddev->delta_disks; | 5355 | d < conf->raid_disks - mddev->delta_disks; |
5403 | d++) { | 5356 | d++) { |
5404 | mdk_rdev_t *rdev = conf->disks[d].rdev; | 5357 | struct md_rdev *rdev = conf->disks[d].rdev; |
5405 | if (rdev && raid5_remove_disk(mddev, d) == 0) { | 5358 | if (rdev && raid5_remove_disk(mddev, d) == 0) { |
5406 | sysfs_unlink_rdev(mddev, rdev); | 5359 | sysfs_unlink_rdev(mddev, rdev); |
5407 | rdev->raid_disk = -1; | 5360 | rdev->raid_disk = -1; |
@@ -5415,9 +5368,9 @@ static void raid5_finish_reshape(mddev_t *mddev) | |||
5415 | } | 5368 | } |
5416 | } | 5369 | } |
5417 | 5370 | ||
5418 | static void raid5_quiesce(mddev_t *mddev, int state) | 5371 | static void raid5_quiesce(struct mddev *mddev, int state) |
5419 | { | 5372 | { |
5420 | raid5_conf_t *conf = mddev->private; | 5373 | struct r5conf *conf = mddev->private; |
5421 | 5374 | ||
5422 | switch(state) { | 5375 | switch(state) { |
5423 | case 2: /* resume for a suspend */ | 5376 | case 2: /* resume for a suspend */ |
@@ -5451,20 +5404,20 @@ static void raid5_quiesce(mddev_t *mddev, int state) | |||
5451 | } | 5404 | } |
5452 | 5405 | ||
5453 | 5406 | ||
5454 | static void *raid45_takeover_raid0(mddev_t *mddev, int level) | 5407 | static void *raid45_takeover_raid0(struct mddev *mddev, int level) |
5455 | { | 5408 | { |
5456 | struct raid0_private_data *raid0_priv = mddev->private; | 5409 | struct r0conf *raid0_conf = mddev->private; |
5457 | sector_t sectors; | 5410 | sector_t sectors; |
5458 | 5411 | ||
5459 | /* for raid0 takeover only one zone is supported */ | 5412 | /* for raid0 takeover only one zone is supported */ |
5460 | if (raid0_priv->nr_strip_zones > 1) { | 5413 | if (raid0_conf->nr_strip_zones > 1) { |
5461 | printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", | 5414 | printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", |
5462 | mdname(mddev)); | 5415 | mdname(mddev)); |
5463 | return ERR_PTR(-EINVAL); | 5416 | return ERR_PTR(-EINVAL); |
5464 | } | 5417 | } |
5465 | 5418 | ||
5466 | sectors = raid0_priv->strip_zone[0].zone_end; | 5419 | sectors = raid0_conf->strip_zone[0].zone_end; |
5467 | sector_div(sectors, raid0_priv->strip_zone[0].nb_dev); | 5420 | sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); |
5468 | mddev->dev_sectors = sectors; | 5421 | mddev->dev_sectors = sectors; |
5469 | mddev->new_level = level; | 5422 | mddev->new_level = level; |
5470 | mddev->new_layout = ALGORITHM_PARITY_N; | 5423 | mddev->new_layout = ALGORITHM_PARITY_N; |
@@ -5478,7 +5431,7 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level) | |||
5478 | } | 5431 | } |
5479 | 5432 | ||
5480 | 5433 | ||
5481 | static void *raid5_takeover_raid1(mddev_t *mddev) | 5434 | static void *raid5_takeover_raid1(struct mddev *mddev) |
5482 | { | 5435 | { |
5483 | int chunksect; | 5436 | int chunksect; |
5484 | 5437 | ||
@@ -5505,7 +5458,7 @@ static void *raid5_takeover_raid1(mddev_t *mddev) | |||
5505 | return setup_conf(mddev); | 5458 | return setup_conf(mddev); |
5506 | } | 5459 | } |
5507 | 5460 | ||
5508 | static void *raid5_takeover_raid6(mddev_t *mddev) | 5461 | static void *raid5_takeover_raid6(struct mddev *mddev) |
5509 | { | 5462 | { |
5510 | int new_layout; | 5463 | int new_layout; |
5511 | 5464 | ||
@@ -5539,14 +5492,14 @@ static void *raid5_takeover_raid6(mddev_t *mddev) | |||
5539 | } | 5492 | } |
5540 | 5493 | ||
5541 | 5494 | ||
5542 | static int raid5_check_reshape(mddev_t *mddev) | 5495 | static int raid5_check_reshape(struct mddev *mddev) |
5543 | { | 5496 | { |
5544 | /* For a 2-drive array, the layout and chunk size can be changed | 5497 | /* For a 2-drive array, the layout and chunk size can be changed |
5545 | * immediately as not restriping is needed. | 5498 | * immediately as not restriping is needed. |
5546 | * For larger arrays we record the new value - after validation | 5499 | * For larger arrays we record the new value - after validation |
5547 | * to be used by a reshape pass. | 5500 | * to be used by a reshape pass. |
5548 | */ | 5501 | */ |
5549 | raid5_conf_t *conf = mddev->private; | 5502 | struct r5conf *conf = mddev->private; |
5550 | int new_chunk = mddev->new_chunk_sectors; | 5503 | int new_chunk = mddev->new_chunk_sectors; |
5551 | 5504 | ||
5552 | if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) | 5505 | if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) |
@@ -5579,7 +5532,7 @@ static int raid5_check_reshape(mddev_t *mddev) | |||
5579 | return check_reshape(mddev); | 5532 | return check_reshape(mddev); |
5580 | } | 5533 | } |
5581 | 5534 | ||
5582 | static int raid6_check_reshape(mddev_t *mddev) | 5535 | static int raid6_check_reshape(struct mddev *mddev) |
5583 | { | 5536 | { |
5584 | int new_chunk = mddev->new_chunk_sectors; | 5537 | int new_chunk = mddev->new_chunk_sectors; |
5585 | 5538 | ||
@@ -5599,7 +5552,7 @@ static int raid6_check_reshape(mddev_t *mddev) | |||
5599 | return check_reshape(mddev); | 5552 | return check_reshape(mddev); |
5600 | } | 5553 | } |
5601 | 5554 | ||
5602 | static void *raid5_takeover(mddev_t *mddev) | 5555 | static void *raid5_takeover(struct mddev *mddev) |
5603 | { | 5556 | { |
5604 | /* raid5 can take over: | 5557 | /* raid5 can take over: |
5605 | * raid0 - if there is only one strip zone - make it a raid4 layout | 5558 | * raid0 - if there is only one strip zone - make it a raid4 layout |
@@ -5622,7 +5575,7 @@ static void *raid5_takeover(mddev_t *mddev) | |||
5622 | return ERR_PTR(-EINVAL); | 5575 | return ERR_PTR(-EINVAL); |
5623 | } | 5576 | } |
5624 | 5577 | ||
5625 | static void *raid4_takeover(mddev_t *mddev) | 5578 | static void *raid4_takeover(struct mddev *mddev) |
5626 | { | 5579 | { |
5627 | /* raid4 can take over: | 5580 | /* raid4 can take over: |
5628 | * raid0 - if there is only one strip zone | 5581 | * raid0 - if there is only one strip zone |
@@ -5639,9 +5592,9 @@ static void *raid4_takeover(mddev_t *mddev) | |||
5639 | return ERR_PTR(-EINVAL); | 5592 | return ERR_PTR(-EINVAL); |
5640 | } | 5593 | } |
5641 | 5594 | ||
5642 | static struct mdk_personality raid5_personality; | 5595 | static struct md_personality raid5_personality; |
5643 | 5596 | ||
5644 | static void *raid6_takeover(mddev_t *mddev) | 5597 | static void *raid6_takeover(struct mddev *mddev) |
5645 | { | 5598 | { |
5646 | /* Currently can only take over a raid5. We map the | 5599 | /* Currently can only take over a raid5. We map the |
5647 | * personality to an equivalent raid6 personality | 5600 | * personality to an equivalent raid6 personality |
@@ -5688,7 +5641,7 @@ static void *raid6_takeover(mddev_t *mddev) | |||
5688 | } | 5641 | } |
5689 | 5642 | ||
5690 | 5643 | ||
5691 | static struct mdk_personality raid6_personality = | 5644 | static struct md_personality raid6_personality = |
5692 | { | 5645 | { |
5693 | .name = "raid6", | 5646 | .name = "raid6", |
5694 | .level = 6, | 5647 | .level = 6, |
@@ -5710,7 +5663,7 @@ static struct mdk_personality raid6_personality = | |||
5710 | .quiesce = raid5_quiesce, | 5663 | .quiesce = raid5_quiesce, |
5711 | .takeover = raid6_takeover, | 5664 | .takeover = raid6_takeover, |
5712 | }; | 5665 | }; |
5713 | static struct mdk_personality raid5_personality = | 5666 | static struct md_personality raid5_personality = |
5714 | { | 5667 | { |
5715 | .name = "raid5", | 5668 | .name = "raid5", |
5716 | .level = 5, | 5669 | .level = 5, |
@@ -5733,7 +5686,7 @@ static struct mdk_personality raid5_personality = | |||
5733 | .takeover = raid5_takeover, | 5686 | .takeover = raid5_takeover, |
5734 | }; | 5687 | }; |
5735 | 5688 | ||
5736 | static struct mdk_personality raid4_personality = | 5689 | static struct md_personality raid4_personality = |
5737 | { | 5690 | { |
5738 | .name = "raid4", | 5691 | .name = "raid4", |
5739 | .level = 4, | 5692 | .level = 4, |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 11b9566184b2..e10c5531f9c5 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -197,7 +197,7 @@ enum reconstruct_states { | |||
197 | struct stripe_head { | 197 | struct stripe_head { |
198 | struct hlist_node hash; | 198 | struct hlist_node hash; |
199 | struct list_head lru; /* inactive_list or handle_list */ | 199 | struct list_head lru; /* inactive_list or handle_list */ |
200 | struct raid5_private_data *raid_conf; | 200 | struct r5conf *raid_conf; |
201 | short generation; /* increments with every | 201 | short generation; /* increments with every |
202 | * reshape */ | 202 | * reshape */ |
203 | sector_t sector; /* sector of this row */ | 203 | sector_t sector; /* sector of this row */ |
@@ -248,7 +248,7 @@ struct stripe_head_state { | |||
248 | unsigned long ops_request; | 248 | unsigned long ops_request; |
249 | 249 | ||
250 | struct bio *return_bi; | 250 | struct bio *return_bi; |
251 | mdk_rdev_t *blocked_rdev; | 251 | struct md_rdev *blocked_rdev; |
252 | int handle_bad_blocks; | 252 | int handle_bad_blocks; |
253 | }; | 253 | }; |
254 | 254 | ||
@@ -344,12 +344,12 @@ enum { | |||
344 | 344 | ||
345 | 345 | ||
346 | struct disk_info { | 346 | struct disk_info { |
347 | mdk_rdev_t *rdev; | 347 | struct md_rdev *rdev; |
348 | }; | 348 | }; |
349 | 349 | ||
350 | struct raid5_private_data { | 350 | struct r5conf { |
351 | struct hlist_head *stripe_hashtbl; | 351 | struct hlist_head *stripe_hashtbl; |
352 | mddev_t *mddev; | 352 | struct mddev *mddev; |
353 | struct disk_info *spare; | 353 | struct disk_info *spare; |
354 | int chunk_sectors; | 354 | int chunk_sectors; |
355 | int level, algorithm; | 355 | int level, algorithm; |
@@ -436,11 +436,9 @@ struct raid5_private_data { | |||
436 | /* When taking over an array from a different personality, we store | 436 | /* When taking over an array from a different personality, we store |
437 | * the new thread here until we fully activate the array. | 437 | * the new thread here until we fully activate the array. |
438 | */ | 438 | */ |
439 | struct mdk_thread_s *thread; | 439 | struct md_thread *thread; |
440 | }; | 440 | }; |
441 | 441 | ||
442 | typedef struct raid5_private_data raid5_conf_t; | ||
443 | |||
444 | /* | 442 | /* |
445 | * Our supported algorithms | 443 | * Our supported algorithms |
446 | */ | 444 | */ |
@@ -503,7 +501,7 @@ static inline int algorithm_is_DDF(int layout) | |||
503 | return layout >= 8 && layout <= 10; | 501 | return layout >= 8 && layout <= 10; |
504 | } | 502 | } |
505 | 503 | ||
506 | extern int md_raid5_congested(mddev_t *mddev, int bits); | 504 | extern int md_raid5_congested(struct mddev *mddev, int bits); |
507 | extern void md_raid5_kick_device(raid5_conf_t *conf); | 505 | extern void md_raid5_kick_device(struct r5conf *conf); |
508 | extern int raid5_set_cache_size(mddev_t *mddev, int size); | 506 | extern int raid5_set_cache_size(struct mddev *mddev, int size); |
509 | #endif | 507 | #endif |
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc index d1e276a14fab..5b50f8dfc5d2 100644 --- a/lib/raid6/int.uc +++ b/lib/raid6/int.uc | |||
@@ -11,7 +11,7 @@ | |||
11 | * ----------------------------------------------------------------------- */ | 11 | * ----------------------------------------------------------------------- */ |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * raid6int$#.c | 14 | * int$#.c |
15 | * | 15 | * |
16 | * $#-way unrolled portable integer math RAID-6 instruction set | 16 | * $#-way unrolled portable integer math RAID-6 instruction set |
17 | * | 17 | * |