aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_bitmap.c
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2011-01-21 04:56:44 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-03-10 05:48:02 -0500
commit20ceb2b22edaf51e59e76087efdc71a16a2858de (patch)
treea4f267242725bac2a915e879a6b6ac259218c5fa /drivers/block/drbd/drbd_bitmap.c
parent62b0da3a244ac33d25a77861ef1cc0080103f2ff (diff)
drbd: describe bitmap locking for bulk operation in finer detail
Now that we do no longer in-place endian-swap the bitmap, we allow selected bitmap operations (testing bits, sometimes even settting bits) during some bulk operations. This caused us to hit a lot of FIXME asserts similar to FIXME asender in drbd_bm_count_bits, bitmap locked for 'write from resync_finished' by worker Which now is nonsense: looking at the bitmap is perfectly legal as long as it is not being resized. This cosmetic patch defines some flags to describe expectations in finer detail, so the asserts in e.g. bm_change_bits_to() can be skipped if appropriate. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_bitmap.c')
-rw-r--r--drivers/block/drbd/drbd_bitmap.c48
1 files changed, 20 insertions, 28 deletions
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 25428bc28476..b62dd5f26c5d 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -104,26 +104,16 @@ struct drbd_bitmap {
104 104
105 wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ 105 wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
106 106
107 unsigned long bm_flags; 107 enum bm_flag bm_flags;
108 108
109 /* debugging aid, in case we are still racy somewhere */ 109 /* debugging aid, in case we are still racy somewhere */
110 char *bm_why; 110 char *bm_why;
111 struct task_struct *bm_task; 111 struct task_struct *bm_task;
112}; 112};
113 113
114/* definition of bits in bm_flags */
115#define BM_LOCKED 0
116// #define BM_MD_IO_ERROR 1 unused now.
117#define BM_P_VMALLOCED 2
118
119static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 114static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
120 unsigned long e, int val, const enum km_type km); 115 unsigned long e, int val, const enum km_type km);
121 116
122static int bm_is_locked(struct drbd_bitmap *b)
123{
124 return test_bit(BM_LOCKED, &b->bm_flags);
125}
126
127#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 117#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
128static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) 118static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
129{ 119{
@@ -140,7 +130,7 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
140 b->bm_task == mdev->worker.task ? "worker" : "?"); 130 b->bm_task == mdev->worker.task ? "worker" : "?");
141} 131}
142 132
143void drbd_bm_lock(struct drbd_conf *mdev, char *why) 133void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
144{ 134{
145 struct drbd_bitmap *b = mdev->bitmap; 135 struct drbd_bitmap *b = mdev->bitmap;
146 int trylock_failed; 136 int trylock_failed;
@@ -163,8 +153,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
163 b->bm_task == mdev->worker.task ? "worker" : "?"); 153 b->bm_task == mdev->worker.task ? "worker" : "?");
164 mutex_lock(&b->bm_change); 154 mutex_lock(&b->bm_change);
165 } 155 }
166 if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) 156 if (BM_LOCKED_MASK & b->bm_flags)
167 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 157 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
158 b->bm_flags |= flags & BM_LOCKED_MASK;
168 159
169 b->bm_why = why; 160 b->bm_why = why;
170 b->bm_task = current; 161 b->bm_task = current;
@@ -178,9 +169,10 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
178 return; 169 return;
179 } 170 }
180 171
181 if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags)) 172 if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
182 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); 173 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
183 174
175 b->bm_flags &= ~BM_LOCKED_MASK;
184 b->bm_why = NULL; 176 b->bm_why = NULL;
185 b->bm_task = NULL; 177 b->bm_task = NULL;
186 mutex_unlock(&b->bm_change); 178 mutex_unlock(&b->bm_change);
@@ -421,9 +413,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
421 } 413 }
422 414
423 if (vmalloced) 415 if (vmalloced)
424 set_bit(BM_P_VMALLOCED, &b->bm_flags); 416 b->bm_flags |= BM_P_VMALLOCED;
425 else 417 else
426 clear_bit(BM_P_VMALLOCED, &b->bm_flags); 418 b->bm_flags &= ~BM_P_VMALLOCED;
427 419
428 return new_pages; 420 return new_pages;
429} 421}
@@ -460,7 +452,7 @@ void drbd_bm_cleanup(struct drbd_conf *mdev)
460{ 452{
461 ERR_IF (!mdev->bitmap) return; 453 ERR_IF (!mdev->bitmap) return;
462 bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); 454 bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
463 bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags)); 455 bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
464 kfree(mdev->bitmap); 456 kfree(mdev->bitmap);
465 mdev->bitmap = NULL; 457 mdev->bitmap = NULL;
466} 458}
@@ -623,7 +615,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
623 615
624 ERR_IF(!b) return -ENOMEM; 616 ERR_IF(!b) return -ENOMEM;
625 617
626 drbd_bm_lock(mdev, "resize"); 618 drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
627 619
628 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", 620 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
629 (unsigned long long)capacity); 621 (unsigned long long)capacity);
@@ -631,7 +623,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
631 if (capacity == b->bm_dev_capacity) 623 if (capacity == b->bm_dev_capacity)
632 goto out; 624 goto out;
633 625
634 opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags); 626 opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
635 627
636 if (capacity == 0) { 628 if (capacity == 0) {
637 spin_lock_irq(&b->bm_lock); 629 spin_lock_irq(&b->bm_lock);
@@ -1030,7 +1022,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
1030 * as we submit copies of pages anyways. 1022 * as we submit copies of pages anyways.
1031 */ 1023 */
1032 if (!ctx.flags) 1024 if (!ctx.flags)
1033 WARN_ON(!bm_is_locked(b)); 1025 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1034 1026
1035 num_pages = b->bm_number_of_pages; 1027 num_pages = b->bm_number_of_pages;
1036 1028
@@ -1220,7 +1212,7 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
1220 ERR_IF(!b->bm_pages) return i; 1212 ERR_IF(!b->bm_pages) return i;
1221 1213
1222 spin_lock_irq(&b->bm_lock); 1214 spin_lock_irq(&b->bm_lock);
1223 if (bm_is_locked(b)) 1215 if (BM_DONT_TEST & b->bm_flags)
1224 bm_print_lock_info(mdev); 1216 bm_print_lock_info(mdev);
1225 1217
1226 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); 1218 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
@@ -1246,13 +1238,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
1246 * you must take drbd_bm_lock() first */ 1238 * you must take drbd_bm_lock() first */
1247unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1239unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1248{ 1240{
1249 /* WARN_ON(!bm_is_locked(mdev)); */ 1241 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1250 return __bm_find_next(mdev, bm_fo, 0, KM_USER1); 1242 return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
1251} 1243}
1252 1244
1253unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1245unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1254{ 1246{
1255 /* WARN_ON(!bm_is_locked(mdev)); */ 1247 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1256 return __bm_find_next(mdev, bm_fo, 1, KM_USER1); 1248 return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
1257} 1249}
1258 1250
@@ -1322,7 +1314,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1322 ERR_IF(!b->bm_pages) return 0; 1314 ERR_IF(!b->bm_pages) return 0;
1323 1315
1324 spin_lock_irqsave(&b->bm_lock, flags); 1316 spin_lock_irqsave(&b->bm_lock, flags);
1325 if (bm_is_locked(b)) 1317 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1326 bm_print_lock_info(mdev); 1318 bm_print_lock_info(mdev);
1327 1319
1328 c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); 1320 c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
@@ -1439,7 +1431,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1439 ERR_IF(!b->bm_pages) return 0; 1431 ERR_IF(!b->bm_pages) return 0;
1440 1432
1441 spin_lock_irqsave(&b->bm_lock, flags); 1433 spin_lock_irqsave(&b->bm_lock, flags);
1442 if (bm_is_locked(b)) 1434 if (BM_DONT_TEST & b->bm_flags)
1443 bm_print_lock_info(mdev); 1435 bm_print_lock_info(mdev);
1444 if (bitnr < b->bm_bits) { 1436 if (bitnr < b->bm_bits) {
1445 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); 1437 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
@@ -1474,7 +1466,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
1474 ERR_IF(!b->bm_pages) return 1; 1466 ERR_IF(!b->bm_pages) return 1;
1475 1467
1476 spin_lock_irqsave(&b->bm_lock, flags); 1468 spin_lock_irqsave(&b->bm_lock, flags);
1477 if (bm_is_locked(b)) 1469 if (BM_DONT_TEST & b->bm_flags)
1478 bm_print_lock_info(mdev); 1470 bm_print_lock_info(mdev);
1479 for (bitnr = s; bitnr <= e; bitnr++) { 1471 for (bitnr = s; bitnr <= e; bitnr++) {
1480 unsigned int idx = bm_bit_to_page_idx(b, bitnr); 1472 unsigned int idx = bm_bit_to_page_idx(b, bitnr);
@@ -1522,7 +1514,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1522 ERR_IF(!b->bm_pages) return 0; 1514 ERR_IF(!b->bm_pages) return 0;
1523 1515
1524 spin_lock_irqsave(&b->bm_lock, flags); 1516 spin_lock_irqsave(&b->bm_lock, flags);
1525 if (bm_is_locked(b)) 1517 if (BM_DONT_TEST & b->bm_flags)
1526 bm_print_lock_info(mdev); 1518 bm_print_lock_info(mdev);
1527 1519
1528 s = S2W(enr); 1520 s = S2W(enr);
@@ -1555,7 +1547,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
1555 ERR_IF(!b->bm_pages) return 0; 1547 ERR_IF(!b->bm_pages) return 0;
1556 1548
1557 spin_lock_irq(&b->bm_lock); 1549 spin_lock_irq(&b->bm_lock);
1558 if (bm_is_locked(b)) 1550 if (BM_DONT_SET & b->bm_flags)
1559 bm_print_lock_info(mdev); 1551 bm_print_lock_info(mdev);
1560 weight = b->bm_set; 1552 weight = b->bm_set;
1561 1553