diff options
author | Dan Williams <dan.j.williams@intel.com> | 2007-07-09 14:56:43 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2007-07-13 11:06:15 -0400 |
commit | 45b4233caac05da0118b608a9fc2a40a9fc580cd (patch) | |
tree | ebc65304c088b6f2162c5ba4e04382bd49736040 /drivers/md/raid5.c | |
parent | a445685647e825c713175d180ffc8dd54d90589b (diff) |
raid5: replace custom debug PRINTKs with standard pr_debug
Replaces PRINTK with pr_debug, and kills the RAID5_DEBUG definition in
favor of the global DEBUG definition. To get local debug messages just add
'#define DEBUG' to the top of the file.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-By: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 116 |
1 files changed, 58 insertions, 58 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 38232fa111a4..e372e57687ee 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -80,7 +80,6 @@ | |||
80 | /* | 80 | /* |
81 | * The following can be used to debug the driver | 81 | * The following can be used to debug the driver |
82 | */ | 82 | */ |
83 | #define RAID5_DEBUG 0 | ||
84 | #define RAID5_PARANOIA 1 | 83 | #define RAID5_PARANOIA 1 |
85 | #if RAID5_PARANOIA && defined(CONFIG_SMP) | 84 | #if RAID5_PARANOIA && defined(CONFIG_SMP) |
86 | # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) | 85 | # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) |
@@ -88,8 +87,7 @@ | |||
88 | # define CHECK_DEVLOCK() | 87 | # define CHECK_DEVLOCK() |
89 | #endif | 88 | #endif |
90 | 89 | ||
91 | #define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x))) | 90 | #ifdef DEBUG |
92 | #if RAID5_DEBUG | ||
93 | #define inline | 91 | #define inline |
94 | #define __inline__ | 92 | #define __inline__ |
95 | #endif | 93 | #endif |
@@ -169,7 +167,8 @@ static void release_stripe(struct stripe_head *sh) | |||
169 | 167 | ||
170 | static inline void remove_hash(struct stripe_head *sh) | 168 | static inline void remove_hash(struct stripe_head *sh) |
171 | { | 169 | { |
172 | PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); | 170 | pr_debug("remove_hash(), stripe %llu\n", |
171 | (unsigned long long)sh->sector); | ||
173 | 172 | ||
174 | hlist_del_init(&sh->hash); | 173 | hlist_del_init(&sh->hash); |
175 | } | 174 | } |
@@ -178,7 +177,8 @@ static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) | |||
178 | { | 177 | { |
179 | struct hlist_head *hp = stripe_hash(conf, sh->sector); | 178 | struct hlist_head *hp = stripe_hash(conf, sh->sector); |
180 | 179 | ||
181 | PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); | 180 | pr_debug("insert_hash(), stripe %llu\n", |
181 | (unsigned long long)sh->sector); | ||
182 | 182 | ||
183 | CHECK_DEVLOCK(); | 183 | CHECK_DEVLOCK(); |
184 | hlist_add_head(&sh->hash, hp); | 184 | hlist_add_head(&sh->hash, hp); |
@@ -243,7 +243,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int | |||
243 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); | 243 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); |
244 | 244 | ||
245 | CHECK_DEVLOCK(); | 245 | CHECK_DEVLOCK(); |
246 | PRINTK("init_stripe called, stripe %llu\n", | 246 | pr_debug("init_stripe called, stripe %llu\n", |
247 | (unsigned long long)sh->sector); | 247 | (unsigned long long)sh->sector); |
248 | 248 | ||
249 | remove_hash(sh); | 249 | remove_hash(sh); |
@@ -277,11 +277,11 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in | |||
277 | struct hlist_node *hn; | 277 | struct hlist_node *hn; |
278 | 278 | ||
279 | CHECK_DEVLOCK(); | 279 | CHECK_DEVLOCK(); |
280 | PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); | 280 | pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); |
281 | hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) | 281 | hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) |
282 | if (sh->sector == sector && sh->disks == disks) | 282 | if (sh->sector == sector && sh->disks == disks) |
283 | return sh; | 283 | return sh; |
284 | PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); | 284 | pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); |
285 | return NULL; | 285 | return NULL; |
286 | } | 286 | } |
287 | 287 | ||
@@ -293,7 +293,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector | |||
293 | { | 293 | { |
294 | struct stripe_head *sh; | 294 | struct stripe_head *sh; |
295 | 295 | ||
296 | PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector); | 296 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); |
297 | 297 | ||
298 | spin_lock_irq(&conf->device_lock); | 298 | spin_lock_irq(&conf->device_lock); |
299 | 299 | ||
@@ -554,8 +554,8 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, | |||
554 | if (bi == &sh->dev[i].req) | 554 | if (bi == &sh->dev[i].req) |
555 | break; | 555 | break; |
556 | 556 | ||
557 | PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", | 557 | pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", |
558 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 558 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
559 | uptodate); | 559 | uptodate); |
560 | if (i == disks) { | 560 | if (i == disks) { |
561 | BUG(); | 561 | BUG(); |
@@ -630,7 +630,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, | |||
630 | if (bi == &sh->dev[i].req) | 630 | if (bi == &sh->dev[i].req) |
631 | break; | 631 | break; |
632 | 632 | ||
633 | PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", | 633 | pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", |
634 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 634 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
635 | uptodate); | 635 | uptodate); |
636 | if (i == disks) { | 636 | if (i == disks) { |
@@ -675,7 +675,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
675 | { | 675 | { |
676 | char b[BDEVNAME_SIZE]; | 676 | char b[BDEVNAME_SIZE]; |
677 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; | 677 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
678 | PRINTK("raid5: error called\n"); | 678 | pr_debug("raid5: error called\n"); |
679 | 679 | ||
680 | if (!test_bit(Faulty, &rdev->flags)) { | 680 | if (!test_bit(Faulty, &rdev->flags)) { |
681 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 681 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
@@ -946,7 +946,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx) | |||
946 | int i, count, disks = sh->disks; | 946 | int i, count, disks = sh->disks; |
947 | void *ptr[MAX_XOR_BLOCKS], *dest, *p; | 947 | void *ptr[MAX_XOR_BLOCKS], *dest, *p; |
948 | 948 | ||
949 | PRINTK("compute_block, stripe %llu, idx %d\n", | 949 | pr_debug("compute_block, stripe %llu, idx %d\n", |
950 | (unsigned long long)sh->sector, dd_idx); | 950 | (unsigned long long)sh->sector, dd_idx); |
951 | 951 | ||
952 | dest = page_address(sh->dev[dd_idx].page); | 952 | dest = page_address(sh->dev[dd_idx].page); |
@@ -977,7 +977,7 @@ static void compute_parity5(struct stripe_head *sh, int method) | |||
977 | void *ptr[MAX_XOR_BLOCKS], *dest; | 977 | void *ptr[MAX_XOR_BLOCKS], *dest; |
978 | struct bio *chosen; | 978 | struct bio *chosen; |
979 | 979 | ||
980 | PRINTK("compute_parity5, stripe %llu, method %d\n", | 980 | pr_debug("compute_parity5, stripe %llu, method %d\n", |
981 | (unsigned long long)sh->sector, method); | 981 | (unsigned long long)sh->sector, method); |
982 | 982 | ||
983 | count = 0; | 983 | count = 0; |
@@ -1075,7 +1075,7 @@ static void compute_parity6(struct stripe_head *sh, int method) | |||
1075 | qd_idx = raid6_next_disk(pd_idx, disks); | 1075 | qd_idx = raid6_next_disk(pd_idx, disks); |
1076 | d0_idx = raid6_next_disk(qd_idx, disks); | 1076 | d0_idx = raid6_next_disk(qd_idx, disks); |
1077 | 1077 | ||
1078 | PRINTK("compute_parity, stripe %llu, method %d\n", | 1078 | pr_debug("compute_parity, stripe %llu, method %d\n", |
1079 | (unsigned long long)sh->sector, method); | 1079 | (unsigned long long)sh->sector, method); |
1080 | 1080 | ||
1081 | switch(method) { | 1081 | switch(method) { |
@@ -1153,7 +1153,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) | |||
1153 | int pd_idx = sh->pd_idx; | 1153 | int pd_idx = sh->pd_idx; |
1154 | int qd_idx = raid6_next_disk(pd_idx, disks); | 1154 | int qd_idx = raid6_next_disk(pd_idx, disks); |
1155 | 1155 | ||
1156 | PRINTK("compute_block_1, stripe %llu, idx %d\n", | 1156 | pr_debug("compute_block_1, stripe %llu, idx %d\n", |
1157 | (unsigned long long)sh->sector, dd_idx); | 1157 | (unsigned long long)sh->sector, dd_idx); |
1158 | 1158 | ||
1159 | if ( dd_idx == qd_idx ) { | 1159 | if ( dd_idx == qd_idx ) { |
@@ -1200,7 +1200,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) | |||
1200 | BUG_ON(faila == failb); | 1200 | BUG_ON(faila == failb); |
1201 | if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } | 1201 | if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } |
1202 | 1202 | ||
1203 | PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", | 1203 | pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", |
1204 | (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); | 1204 | (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); |
1205 | 1205 | ||
1206 | if ( failb == disks-1 ) { | 1206 | if ( failb == disks-1 ) { |
@@ -1259,7 +1259,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
1259 | raid5_conf_t *conf = sh->raid_conf; | 1259 | raid5_conf_t *conf = sh->raid_conf; |
1260 | int firstwrite=0; | 1260 | int firstwrite=0; |
1261 | 1261 | ||
1262 | PRINTK("adding bh b#%llu to stripe s#%llu\n", | 1262 | pr_debug("adding bh b#%llu to stripe s#%llu\n", |
1263 | (unsigned long long)bi->bi_sector, | 1263 | (unsigned long long)bi->bi_sector, |
1264 | (unsigned long long)sh->sector); | 1264 | (unsigned long long)sh->sector); |
1265 | 1265 | ||
@@ -1288,7 +1288,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
1288 | spin_unlock_irq(&conf->device_lock); | 1288 | spin_unlock_irq(&conf->device_lock); |
1289 | spin_unlock(&sh->lock); | 1289 | spin_unlock(&sh->lock); |
1290 | 1290 | ||
1291 | PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n", | 1291 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", |
1292 | (unsigned long long)bi->bi_sector, | 1292 | (unsigned long long)bi->bi_sector, |
1293 | (unsigned long long)sh->sector, dd_idx); | 1293 | (unsigned long long)sh->sector, dd_idx); |
1294 | 1294 | ||
@@ -1448,14 +1448,14 @@ static void handle_issuing_new_read_requests5(struct stripe_head *sh, | |||
1448 | * by computing it, but we might not be able to | 1448 | * by computing it, but we might not be able to |
1449 | */ | 1449 | */ |
1450 | if (s->uptodate == disks-1) { | 1450 | if (s->uptodate == disks-1) { |
1451 | PRINTK("Computing block %d\n", i); | 1451 | pr_debug("Computing block %d\n", i); |
1452 | compute_block(sh, i); | 1452 | compute_block(sh, i); |
1453 | s->uptodate++; | 1453 | s->uptodate++; |
1454 | } else if (test_bit(R5_Insync, &dev->flags)) { | 1454 | } else if (test_bit(R5_Insync, &dev->flags)) { |
1455 | set_bit(R5_LOCKED, &dev->flags); | 1455 | set_bit(R5_LOCKED, &dev->flags); |
1456 | set_bit(R5_Wantread, &dev->flags); | 1456 | set_bit(R5_Wantread, &dev->flags); |
1457 | s->locked++; | 1457 | s->locked++; |
1458 | PRINTK("Reading block %d (sync=%d)\n", | 1458 | pr_debug("Reading block %d (sync=%d)\n", |
1459 | i, s->syncing); | 1459 | i, s->syncing); |
1460 | } | 1460 | } |
1461 | } | 1461 | } |
@@ -1485,7 +1485,7 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh, | |||
1485 | * by computing it, but we might not be able to | 1485 | * by computing it, but we might not be able to |
1486 | */ | 1486 | */ |
1487 | if (s->uptodate == disks-1) { | 1487 | if (s->uptodate == disks-1) { |
1488 | PRINTK("Computing stripe %llu block %d\n", | 1488 | pr_debug("Computing stripe %llu block %d\n", |
1489 | (unsigned long long)sh->sector, i); | 1489 | (unsigned long long)sh->sector, i); |
1490 | compute_block_1(sh, i, 0); | 1490 | compute_block_1(sh, i, 0); |
1491 | s->uptodate++; | 1491 | s->uptodate++; |
@@ -1502,7 +1502,7 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh, | |||
1502 | break; | 1502 | break; |
1503 | } | 1503 | } |
1504 | BUG_ON(other < 0); | 1504 | BUG_ON(other < 0); |
1505 | PRINTK("Computing stripe %llu blocks %d,%d\n", | 1505 | pr_debug("Computing stripe %llu blocks %d,%d\n", |
1506 | (unsigned long long)sh->sector, | 1506 | (unsigned long long)sh->sector, |
1507 | i, other); | 1507 | i, other); |
1508 | compute_block_2(sh, i, other); | 1508 | compute_block_2(sh, i, other); |
@@ -1511,7 +1511,7 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh, | |||
1511 | set_bit(R5_LOCKED, &dev->flags); | 1511 | set_bit(R5_LOCKED, &dev->flags); |
1512 | set_bit(R5_Wantread, &dev->flags); | 1512 | set_bit(R5_Wantread, &dev->flags); |
1513 | s->locked++; | 1513 | s->locked++; |
1514 | PRINTK("Reading block %d (sync=%d)\n", | 1514 | pr_debug("Reading block %d (sync=%d)\n", |
1515 | i, s->syncing); | 1515 | i, s->syncing); |
1516 | } | 1516 | } |
1517 | } | 1517 | } |
@@ -1539,7 +1539,7 @@ static void handle_completed_write_requests(raid5_conf_t *conf, | |||
1539 | /* We can return any write requests */ | 1539 | /* We can return any write requests */ |
1540 | struct bio *wbi, *wbi2; | 1540 | struct bio *wbi, *wbi2; |
1541 | int bitmap_end = 0; | 1541 | int bitmap_end = 0; |
1542 | PRINTK("Return write for disc %d\n", i); | 1542 | pr_debug("Return write for disc %d\n", i); |
1543 | spin_lock_irq(&conf->device_lock); | 1543 | spin_lock_irq(&conf->device_lock); |
1544 | wbi = dev->written; | 1544 | wbi = dev->written; |
1545 | dev->written = NULL; | 1545 | dev->written = NULL; |
@@ -1591,7 +1591,7 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf, | |||
1591 | rcw += 2*disks; | 1591 | rcw += 2*disks; |
1592 | } | 1592 | } |
1593 | } | 1593 | } |
1594 | PRINTK("for sector %llu, rmw=%d rcw=%d\n", | 1594 | pr_debug("for sector %llu, rmw=%d rcw=%d\n", |
1595 | (unsigned long long)sh->sector, rmw, rcw); | 1595 | (unsigned long long)sh->sector, rmw, rcw); |
1596 | set_bit(STRIPE_HANDLE, &sh->state); | 1596 | set_bit(STRIPE_HANDLE, &sh->state); |
1597 | if (rmw < rcw && rmw > 0) | 1597 | if (rmw < rcw && rmw > 0) |
@@ -1604,7 +1604,7 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf, | |||
1604 | test_bit(R5_Insync, &dev->flags)) { | 1604 | test_bit(R5_Insync, &dev->flags)) { |
1605 | if ( | 1605 | if ( |
1606 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | 1606 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
1607 | PRINTK("Read_old block " | 1607 | pr_debug("Read_old block " |
1608 | "%d for r-m-w\n", i); | 1608 | "%d for r-m-w\n", i); |
1609 | set_bit(R5_LOCKED, &dev->flags); | 1609 | set_bit(R5_LOCKED, &dev->flags); |
1610 | set_bit(R5_Wantread, &dev->flags); | 1610 | set_bit(R5_Wantread, &dev->flags); |
@@ -1626,7 +1626,7 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf, | |||
1626 | test_bit(R5_Insync, &dev->flags)) { | 1626 | test_bit(R5_Insync, &dev->flags)) { |
1627 | if ( | 1627 | if ( |
1628 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | 1628 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
1629 | PRINTK("Read_old block " | 1629 | pr_debug("Read_old block " |
1630 | "%d for Reconstruct\n", i); | 1630 | "%d for Reconstruct\n", i); |
1631 | set_bit(R5_LOCKED, &dev->flags); | 1631 | set_bit(R5_LOCKED, &dev->flags); |
1632 | set_bit(R5_Wantread, &dev->flags); | 1632 | set_bit(R5_Wantread, &dev->flags); |
@@ -1642,13 +1642,13 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf, | |||
1642 | */ | 1642 | */ |
1643 | if (s->locked == 0 && (rcw == 0 || rmw == 0) && | 1643 | if (s->locked == 0 && (rcw == 0 || rmw == 0) && |
1644 | !test_bit(STRIPE_BIT_DELAY, &sh->state)) { | 1644 | !test_bit(STRIPE_BIT_DELAY, &sh->state)) { |
1645 | PRINTK("Computing parity...\n"); | 1645 | pr_debug("Computing parity...\n"); |
1646 | compute_parity5(sh, rcw == 0 ? | 1646 | compute_parity5(sh, rcw == 0 ? |
1647 | RECONSTRUCT_WRITE : READ_MODIFY_WRITE); | 1647 | RECONSTRUCT_WRITE : READ_MODIFY_WRITE); |
1648 | /* now every locked buffer is ready to be written */ | 1648 | /* now every locked buffer is ready to be written */ |
1649 | for (i = disks; i--; ) | 1649 | for (i = disks; i--; ) |
1650 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { | 1650 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { |
1651 | PRINTK("Writing block %d\n", i); | 1651 | pr_debug("Writing block %d\n", i); |
1652 | s->locked++; | 1652 | s->locked++; |
1653 | set_bit(R5_Wantwrite, &sh->dev[i].flags); | 1653 | set_bit(R5_Wantwrite, &sh->dev[i].flags); |
1654 | if (!test_bit(R5_Insync, &sh->dev[i].flags) | 1654 | if (!test_bit(R5_Insync, &sh->dev[i].flags) |
@@ -1680,13 +1680,13 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf, | |||
1680 | !test_bit(R5_UPTODATE, &dev->flags)) { | 1680 | !test_bit(R5_UPTODATE, &dev->flags)) { |
1681 | if (test_bit(R5_Insync, &dev->flags)) rcw++; | 1681 | if (test_bit(R5_Insync, &dev->flags)) rcw++; |
1682 | else { | 1682 | else { |
1683 | PRINTK("raid6: must_compute: " | 1683 | pr_debug("raid6: must_compute: " |
1684 | "disk %d flags=%#lx\n", i, dev->flags); | 1684 | "disk %d flags=%#lx\n", i, dev->flags); |
1685 | must_compute++; | 1685 | must_compute++; |
1686 | } | 1686 | } |
1687 | } | 1687 | } |
1688 | } | 1688 | } |
1689 | PRINTK("for sector %llu, rcw=%d, must_compute=%d\n", | 1689 | pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", |
1690 | (unsigned long long)sh->sector, rcw, must_compute); | 1690 | (unsigned long long)sh->sector, rcw, must_compute); |
1691 | set_bit(STRIPE_HANDLE, &sh->state); | 1691 | set_bit(STRIPE_HANDLE, &sh->state); |
1692 | 1692 | ||
@@ -1701,14 +1701,14 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf, | |||
1701 | test_bit(R5_Insync, &dev->flags)) { | 1701 | test_bit(R5_Insync, &dev->flags)) { |
1702 | if ( | 1702 | if ( |
1703 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | 1703 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
1704 | PRINTK("Read_old stripe %llu " | 1704 | pr_debug("Read_old stripe %llu " |
1705 | "block %d for Reconstruct\n", | 1705 | "block %d for Reconstruct\n", |
1706 | (unsigned long long)sh->sector, i); | 1706 | (unsigned long long)sh->sector, i); |
1707 | set_bit(R5_LOCKED, &dev->flags); | 1707 | set_bit(R5_LOCKED, &dev->flags); |
1708 | set_bit(R5_Wantread, &dev->flags); | 1708 | set_bit(R5_Wantread, &dev->flags); |
1709 | s->locked++; | 1709 | s->locked++; |
1710 | } else { | 1710 | } else { |
1711 | PRINTK("Request delayed stripe %llu " | 1711 | pr_debug("Request delayed stripe %llu " |
1712 | "block %d for Reconstruct\n", | 1712 | "block %d for Reconstruct\n", |
1713 | (unsigned long long)sh->sector, i); | 1713 | (unsigned long long)sh->sector, i); |
1714 | set_bit(STRIPE_DELAYED, &sh->state); | 1714 | set_bit(STRIPE_DELAYED, &sh->state); |
@@ -1738,13 +1738,13 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf, | |||
1738 | } | 1738 | } |
1739 | } | 1739 | } |
1740 | 1740 | ||
1741 | PRINTK("Computing parity for stripe %llu\n", | 1741 | pr_debug("Computing parity for stripe %llu\n", |
1742 | (unsigned long long)sh->sector); | 1742 | (unsigned long long)sh->sector); |
1743 | compute_parity6(sh, RECONSTRUCT_WRITE); | 1743 | compute_parity6(sh, RECONSTRUCT_WRITE); |
1744 | /* now every locked buffer is ready to be written */ | 1744 | /* now every locked buffer is ready to be written */ |
1745 | for (i = disks; i--; ) | 1745 | for (i = disks; i--; ) |
1746 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { | 1746 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { |
1747 | PRINTK("Writing stripe %llu block %d\n", | 1747 | pr_debug("Writing stripe %llu block %d\n", |
1748 | (unsigned long long)sh->sector, i); | 1748 | (unsigned long long)sh->sector, i); |
1749 | s->locked++; | 1749 | s->locked++; |
1750 | set_bit(R5_Wantwrite, &sh->dev[i].flags); | 1750 | set_bit(R5_Wantwrite, &sh->dev[i].flags); |
@@ -1973,7 +1973,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
1973 | struct r5dev *dev; | 1973 | struct r5dev *dev; |
1974 | 1974 | ||
1975 | memset(&s, 0, sizeof(s)); | 1975 | memset(&s, 0, sizeof(s)); |
1976 | PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n", | 1976 | pr_debug("handling stripe %llu, cnt=%d, pd_idx=%d\n", |
1977 | (unsigned long long)sh->sector, atomic_read(&sh->count), | 1977 | (unsigned long long)sh->sector, atomic_read(&sh->count), |
1978 | sh->pd_idx); | 1978 | sh->pd_idx); |
1979 | 1979 | ||
@@ -1992,12 +1992,12 @@ static void handle_stripe5(struct stripe_head *sh) | |||
1992 | struct r5dev *dev = &sh->dev[i]; | 1992 | struct r5dev *dev = &sh->dev[i]; |
1993 | clear_bit(R5_Insync, &dev->flags); | 1993 | clear_bit(R5_Insync, &dev->flags); |
1994 | 1994 | ||
1995 | PRINTK("check %d: state 0x%lx read %p write %p written %p\n", | 1995 | pr_debug("check %d: state 0x%lx read %p write %p written %p\n", |
1996 | i, dev->flags, dev->toread, dev->towrite, dev->written); | 1996 | i, dev->flags, dev->toread, dev->towrite, dev->written); |
1997 | /* maybe we can reply to a read */ | 1997 | /* maybe we can reply to a read */ |
1998 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { | 1998 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { |
1999 | struct bio *rbi, *rbi2; | 1999 | struct bio *rbi, *rbi2; |
2000 | PRINTK("Return read for disc %d\n", i); | 2000 | pr_debug("Return read for disc %d\n", i); |
2001 | spin_lock_irq(&conf->device_lock); | 2001 | spin_lock_irq(&conf->device_lock); |
2002 | rbi = dev->toread; | 2002 | rbi = dev->toread; |
2003 | dev->toread = NULL; | 2003 | dev->toread = NULL; |
@@ -2044,7 +2044,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2044 | set_bit(R5_Insync, &dev->flags); | 2044 | set_bit(R5_Insync, &dev->flags); |
2045 | } | 2045 | } |
2046 | rcu_read_unlock(); | 2046 | rcu_read_unlock(); |
2047 | PRINTK("locked=%d uptodate=%d to_read=%d" | 2047 | pr_debug("locked=%d uptodate=%d to_read=%d" |
2048 | " to_write=%d failed=%d failed_num=%d\n", | 2048 | " to_write=%d failed=%d failed_num=%d\n", |
2049 | s.locked, s.uptodate, s.to_read, s.to_write, | 2049 | s.locked, s.uptodate, s.to_read, s.to_write, |
2050 | s.failed, s.failed_num); | 2050 | s.failed, s.failed_num); |
@@ -2174,7 +2174,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2174 | md_sync_acct(rdev->bdev, STRIPE_SECTORS); | 2174 | md_sync_acct(rdev->bdev, STRIPE_SECTORS); |
2175 | 2175 | ||
2176 | bi->bi_bdev = rdev->bdev; | 2176 | bi->bi_bdev = rdev->bdev; |
2177 | PRINTK("for %llu schedule op %ld on disc %d\n", | 2177 | pr_debug("for %llu schedule op %ld on disc %d\n", |
2178 | (unsigned long long)sh->sector, bi->bi_rw, i); | 2178 | (unsigned long long)sh->sector, bi->bi_rw, i); |
2179 | atomic_inc(&sh->count); | 2179 | atomic_inc(&sh->count); |
2180 | bi->bi_sector = sh->sector + rdev->data_offset; | 2180 | bi->bi_sector = sh->sector + rdev->data_offset; |
@@ -2194,7 +2194,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2194 | } else { | 2194 | } else { |
2195 | if (rw == WRITE) | 2195 | if (rw == WRITE) |
2196 | set_bit(STRIPE_DEGRADED, &sh->state); | 2196 | set_bit(STRIPE_DEGRADED, &sh->state); |
2197 | PRINTK("skip op %ld on disc %d for sector %llu\n", | 2197 | pr_debug("skip op %ld on disc %d for sector %llu\n", |
2198 | bi->bi_rw, i, (unsigned long long)sh->sector); | 2198 | bi->bi_rw, i, (unsigned long long)sh->sector); |
2199 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 2199 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
2200 | set_bit(STRIPE_HANDLE, &sh->state); | 2200 | set_bit(STRIPE_HANDLE, &sh->state); |
@@ -2213,7 +2213,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2213 | struct r5dev *dev, *pdev, *qdev; | 2213 | struct r5dev *dev, *pdev, *qdev; |
2214 | 2214 | ||
2215 | r6s.qd_idx = raid6_next_disk(pd_idx, disks); | 2215 | r6s.qd_idx = raid6_next_disk(pd_idx, disks); |
2216 | PRINTK("handling stripe %llu, state=%#lx cnt=%d, " | 2216 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " |
2217 | "pd_idx=%d, qd_idx=%d\n", | 2217 | "pd_idx=%d, qd_idx=%d\n", |
2218 | (unsigned long long)sh->sector, sh->state, | 2218 | (unsigned long long)sh->sector, sh->state, |
2219 | atomic_read(&sh->count), pd_idx, r6s.qd_idx); | 2219 | atomic_read(&sh->count), pd_idx, r6s.qd_idx); |
@@ -2234,12 +2234,12 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2234 | dev = &sh->dev[i]; | 2234 | dev = &sh->dev[i]; |
2235 | clear_bit(R5_Insync, &dev->flags); | 2235 | clear_bit(R5_Insync, &dev->flags); |
2236 | 2236 | ||
2237 | PRINTK("check %d: state 0x%lx read %p write %p written %p\n", | 2237 | pr_debug("check %d: state 0x%lx read %p write %p written %p\n", |
2238 | i, dev->flags, dev->toread, dev->towrite, dev->written); | 2238 | i, dev->flags, dev->toread, dev->towrite, dev->written); |
2239 | /* maybe we can reply to a read */ | 2239 | /* maybe we can reply to a read */ |
2240 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { | 2240 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { |
2241 | struct bio *rbi, *rbi2; | 2241 | struct bio *rbi, *rbi2; |
2242 | PRINTK("Return read for disc %d\n", i); | 2242 | pr_debug("Return read for disc %d\n", i); |
2243 | spin_lock_irq(&conf->device_lock); | 2243 | spin_lock_irq(&conf->device_lock); |
2244 | rbi = dev->toread; | 2244 | rbi = dev->toread; |
2245 | dev->toread = NULL; | 2245 | dev->toread = NULL; |
@@ -2288,7 +2288,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2288 | set_bit(R5_Insync, &dev->flags); | 2288 | set_bit(R5_Insync, &dev->flags); |
2289 | } | 2289 | } |
2290 | rcu_read_unlock(); | 2290 | rcu_read_unlock(); |
2291 | PRINTK("locked=%d uptodate=%d to_read=%d" | 2291 | pr_debug("locked=%d uptodate=%d to_read=%d" |
2292 | " to_write=%d failed=%d failed_num=%d,%d\n", | 2292 | " to_write=%d failed=%d failed_num=%d,%d\n", |
2293 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, | 2293 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, |
2294 | r6s.failed_num[0], r6s.failed_num[1]); | 2294 | r6s.failed_num[0], r6s.failed_num[1]); |
@@ -2428,7 +2428,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2428 | md_sync_acct(rdev->bdev, STRIPE_SECTORS); | 2428 | md_sync_acct(rdev->bdev, STRIPE_SECTORS); |
2429 | 2429 | ||
2430 | bi->bi_bdev = rdev->bdev; | 2430 | bi->bi_bdev = rdev->bdev; |
2431 | PRINTK("for %llu schedule op %ld on disc %d\n", | 2431 | pr_debug("for %llu schedule op %ld on disc %d\n", |
2432 | (unsigned long long)sh->sector, bi->bi_rw, i); | 2432 | (unsigned long long)sh->sector, bi->bi_rw, i); |
2433 | atomic_inc(&sh->count); | 2433 | atomic_inc(&sh->count); |
2434 | bi->bi_sector = sh->sector + rdev->data_offset; | 2434 | bi->bi_sector = sh->sector + rdev->data_offset; |
@@ -2448,7 +2448,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2448 | } else { | 2448 | } else { |
2449 | if (rw == WRITE) | 2449 | if (rw == WRITE) |
2450 | set_bit(STRIPE_DEGRADED, &sh->state); | 2450 | set_bit(STRIPE_DEGRADED, &sh->state); |
2451 | PRINTK("skip op %ld on disc %d for sector %llu\n", | 2451 | pr_debug("skip op %ld on disc %d for sector %llu\n", |
2452 | bi->bi_rw, i, (unsigned long long)sh->sector); | 2452 | bi->bi_rw, i, (unsigned long long)sh->sector); |
2453 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 2453 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
2454 | set_bit(STRIPE_HANDLE, &sh->state); | 2454 | set_bit(STRIPE_HANDLE, &sh->state); |
@@ -2692,7 +2692,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error) | |||
2692 | } | 2692 | } |
2693 | 2693 | ||
2694 | 2694 | ||
2695 | PRINTK("raid5_align_endio : io error...handing IO for a retry\n"); | 2695 | pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); |
2696 | 2696 | ||
2697 | add_bio_to_retry(raid_bi, conf); | 2697 | add_bio_to_retry(raid_bi, conf); |
2698 | return 0; | 2698 | return 0; |
@@ -2730,7 +2730,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) | |||
2730 | mdk_rdev_t *rdev; | 2730 | mdk_rdev_t *rdev; |
2731 | 2731 | ||
2732 | if (!in_chunk_boundary(mddev, raid_bio)) { | 2732 | if (!in_chunk_boundary(mddev, raid_bio)) { |
2733 | PRINTK("chunk_aligned_read : non aligned\n"); | 2733 | pr_debug("chunk_aligned_read : non aligned\n"); |
2734 | return 0; | 2734 | return 0; |
2735 | } | 2735 | } |
2736 | /* | 2736 | /* |
@@ -2854,7 +2854,7 @@ static int make_request(request_queue_t *q, struct bio * bi) | |||
2854 | 2854 | ||
2855 | new_sector = raid5_compute_sector(logical_sector, disks, data_disks, | 2855 | new_sector = raid5_compute_sector(logical_sector, disks, data_disks, |
2856 | &dd_idx, &pd_idx, conf); | 2856 | &dd_idx, &pd_idx, conf); |
2857 | PRINTK("raid5: make_request, sector %llu logical %llu\n", | 2857 | pr_debug("raid5: make_request, sector %llu logical %llu\n", |
2858 | (unsigned long long)new_sector, | 2858 | (unsigned long long)new_sector, |
2859 | (unsigned long long)logical_sector); | 2859 | (unsigned long long)logical_sector); |
2860 | 2860 | ||
@@ -3227,7 +3227,7 @@ static void raid5d (mddev_t *mddev) | |||
3227 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3227 | raid5_conf_t *conf = mddev_to_conf(mddev); |
3228 | int handled; | 3228 | int handled; |
3229 | 3229 | ||
3230 | PRINTK("+++ raid5d active\n"); | 3230 | pr_debug("+++ raid5d active\n"); |
3231 | 3231 | ||
3232 | md_check_recovery(mddev); | 3232 | md_check_recovery(mddev); |
3233 | 3233 | ||
@@ -3279,13 +3279,13 @@ static void raid5d (mddev_t *mddev) | |||
3279 | 3279 | ||
3280 | spin_lock_irq(&conf->device_lock); | 3280 | spin_lock_irq(&conf->device_lock); |
3281 | } | 3281 | } |
3282 | PRINTK("%d stripes handled\n", handled); | 3282 | pr_debug("%d stripes handled\n", handled); |
3283 | 3283 | ||
3284 | spin_unlock_irq(&conf->device_lock); | 3284 | spin_unlock_irq(&conf->device_lock); |
3285 | 3285 | ||
3286 | unplug_slaves(mddev); | 3286 | unplug_slaves(mddev); |
3287 | 3287 | ||
3288 | PRINTK("--- raid5d inactive\n"); | 3288 | pr_debug("--- raid5d inactive\n"); |
3289 | } | 3289 | } |
3290 | 3290 | ||
3291 | static ssize_t | 3291 | static ssize_t |
@@ -3461,7 +3461,7 @@ static int run(mddev_t *mddev) | |||
3461 | atomic_set(&conf->preread_active_stripes, 0); | 3461 | atomic_set(&conf->preread_active_stripes, 0); |
3462 | atomic_set(&conf->active_aligned_reads, 0); | 3462 | atomic_set(&conf->active_aligned_reads, 0); |
3463 | 3463 | ||
3464 | PRINTK("raid5: run(%s) called.\n", mdname(mddev)); | 3464 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); |
3465 | 3465 | ||
3466 | ITERATE_RDEV(mddev,rdev,tmp) { | 3466 | ITERATE_RDEV(mddev,rdev,tmp) { |
3467 | raid_disk = rdev->raid_disk; | 3467 | raid_disk = rdev->raid_disk; |
@@ -3644,7 +3644,7 @@ static int stop(mddev_t *mddev) | |||
3644 | return 0; | 3644 | return 0; |
3645 | } | 3645 | } |
3646 | 3646 | ||
3647 | #if RAID5_DEBUG | 3647 | #ifdef DEBUG |
3648 | static void print_sh (struct seq_file *seq, struct stripe_head *sh) | 3648 | static void print_sh (struct seq_file *seq, struct stripe_head *sh) |
3649 | { | 3649 | { |
3650 | int i; | 3650 | int i; |
@@ -3691,7 +3691,7 @@ static void status (struct seq_file *seq, mddev_t *mddev) | |||
3691 | conf->disks[i].rdev && | 3691 | conf->disks[i].rdev && |
3692 | test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); | 3692 | test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); |
3693 | seq_printf (seq, "]"); | 3693 | seq_printf (seq, "]"); |
3694 | #if RAID5_DEBUG | 3694 | #ifdef DEBUG |
3695 | seq_printf (seq, "\n"); | 3695 | seq_printf (seq, "\n"); |
3696 | printall(seq, conf); | 3696 | printall(seq, conf); |
3697 | #endif | 3697 | #endif |