diff options
Diffstat (limited to 'drivers/block/drbd/drbd_int.h')
-rw-r--r-- | drivers/block/drbd/drbd_int.h | 90 |
1 files changed, 55 insertions, 35 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 8d680562ba73..02f013a073a7 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -712,7 +712,6 @@ struct drbd_request { | |||
712 | struct list_head tl_requests; /* ring list in the transfer log */ | 712 | struct list_head tl_requests; /* ring list in the transfer log */ |
713 | struct bio *master_bio; /* master bio pointer */ | 713 | struct bio *master_bio; /* master bio pointer */ |
714 | unsigned long rq_state; /* see comments above _req_mod() */ | 714 | unsigned long rq_state; /* see comments above _req_mod() */ |
715 | int seq_num; | ||
716 | unsigned long start_time; | 715 | unsigned long start_time; |
717 | }; | 716 | }; |
718 | 717 | ||
@@ -851,6 +850,7 @@ enum { | |||
851 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ | 850 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ |
852 | AL_SUSPENDED, /* Activity logging is currently suspended. */ | 851 | AL_SUSPENDED, /* Activity logging is currently suspended. */ |
853 | AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ | 852 | AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ |
853 | STATE_SENT, /* Do not change state/UUIDs while this is set */ | ||
854 | }; | 854 | }; |
855 | 855 | ||
856 | struct drbd_bitmap; /* opaque for drbd_conf */ | 856 | struct drbd_bitmap; /* opaque for drbd_conf */ |
@@ -862,31 +862,30 @@ enum bm_flag { | |||
862 | BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ | 862 | BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ |
863 | 863 | ||
864 | /* currently locked for bulk operation */ | 864 | /* currently locked for bulk operation */ |
865 | BM_LOCKED_MASK = 0x7, | 865 | BM_LOCKED_MASK = 0xf, |
866 | 866 | ||
867 | /* in detail, that is: */ | 867 | /* in detail, that is: */ |
868 | BM_DONT_CLEAR = 0x1, | 868 | BM_DONT_CLEAR = 0x1, |
869 | BM_DONT_SET = 0x2, | 869 | BM_DONT_SET = 0x2, |
870 | BM_DONT_TEST = 0x4, | 870 | BM_DONT_TEST = 0x4, |
871 | 871 | ||
872 | /* so we can mark it locked for bulk operation, | ||
873 | * and still allow all non-bulk operations */ | ||
874 | BM_IS_LOCKED = 0x8, | ||
875 | |||
872 | /* (test bit, count bit) allowed (common case) */ | 876 | /* (test bit, count bit) allowed (common case) */ |
873 | BM_LOCKED_TEST_ALLOWED = 0x3, | 877 | BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED, |
874 | 878 | ||
875 | /* testing bits, as well as setting new bits allowed, but clearing bits | 879 | /* testing bits, as well as setting new bits allowed, but clearing bits |
876 | * would be unexpected. Used during bitmap receive. Setting new bits | 880 | * would be unexpected. Used during bitmap receive. Setting new bits |
877 | * requires sending of "out-of-sync" information, though. */ | 881 | * requires sending of "out-of-sync" information, though. */ |
878 | BM_LOCKED_SET_ALLOWED = 0x1, | 882 | BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED, |
879 | 883 | ||
880 | /* clear is not expected while bitmap is locked for bulk operation */ | 884 | /* for drbd_bm_write_copy_pages, everything is allowed, |
885 | * only concurrent bulk operations are locked out. */ | ||
886 | BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED, | ||
881 | }; | 887 | }; |
882 | 888 | ||
883 | |||
884 | /* TODO sort members for performance | ||
885 | * MAYBE group them further */ | ||
886 | |||
887 | /* THINK maybe we actually want to use the default "event/%s" worker threads | ||
888 | * or similar in linux 2.6, which uses per cpu data and threads. | ||
889 | */ | ||
890 | struct drbd_work_queue { | 889 | struct drbd_work_queue { |
891 | struct list_head q; | 890 | struct list_head q; |
892 | struct semaphore s; /* producers up it, worker down()s it */ | 891 | struct semaphore s; /* producers up it, worker down()s it */ |
@@ -938,8 +937,7 @@ struct drbd_backing_dev { | |||
938 | }; | 937 | }; |
939 | 938 | ||
940 | struct drbd_md_io { | 939 | struct drbd_md_io { |
941 | struct drbd_conf *mdev; | 940 | unsigned int done; |
942 | struct completion event; | ||
943 | int error; | 941 | int error; |
944 | }; | 942 | }; |
945 | 943 | ||
@@ -1022,6 +1020,7 @@ struct drbd_conf { | |||
1022 | struct drbd_tl_epoch *newest_tle; | 1020 | struct drbd_tl_epoch *newest_tle; |
1023 | struct drbd_tl_epoch *oldest_tle; | 1021 | struct drbd_tl_epoch *oldest_tle; |
1024 | struct list_head out_of_sequence_requests; | 1022 | struct list_head out_of_sequence_requests; |
1023 | struct list_head barrier_acked_requests; | ||
1025 | struct hlist_head *tl_hash; | 1024 | struct hlist_head *tl_hash; |
1026 | unsigned int tl_hash_s; | 1025 | unsigned int tl_hash_s; |
1027 | 1026 | ||
@@ -1056,6 +1055,8 @@ struct drbd_conf { | |||
1056 | struct crypto_hash *csums_tfm; | 1055 | struct crypto_hash *csums_tfm; |
1057 | struct crypto_hash *verify_tfm; | 1056 | struct crypto_hash *verify_tfm; |
1058 | 1057 | ||
1058 | unsigned long last_reattach_jif; | ||
1059 | unsigned long last_reconnect_jif; | ||
1059 | struct drbd_thread receiver; | 1060 | struct drbd_thread receiver; |
1060 | struct drbd_thread worker; | 1061 | struct drbd_thread worker; |
1061 | struct drbd_thread asender; | 1062 | struct drbd_thread asender; |
@@ -1094,7 +1095,8 @@ struct drbd_conf { | |||
1094 | wait_queue_head_t ee_wait; | 1095 | wait_queue_head_t ee_wait; |
1095 | struct page *md_io_page; /* one page buffer for md_io */ | 1096 | struct page *md_io_page; /* one page buffer for md_io */ |
1096 | struct page *md_io_tmpp; /* for logical_block_size != 512 */ | 1097 | struct page *md_io_tmpp; /* for logical_block_size != 512 */ |
1097 | struct mutex md_io_mutex; /* protects the md_io_buffer */ | 1098 | struct drbd_md_io md_io; |
1099 | atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */ | ||
1098 | spinlock_t al_lock; | 1100 | spinlock_t al_lock; |
1099 | wait_queue_head_t al_wait; | 1101 | wait_queue_head_t al_wait; |
1100 | struct lru_cache *act_log; /* activity log */ | 1102 | struct lru_cache *act_log; /* activity log */ |
@@ -1228,8 +1230,8 @@ extern int drbd_send_uuids(struct drbd_conf *mdev); | |||
1228 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); | 1230 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); |
1229 | extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); | 1231 | extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); |
1230 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); | 1232 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); |
1231 | extern int _drbd_send_state(struct drbd_conf *mdev); | 1233 | extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s); |
1232 | extern int drbd_send_state(struct drbd_conf *mdev); | 1234 | extern int drbd_send_current_state(struct drbd_conf *mdev); |
1233 | extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | 1235 | extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, |
1234 | enum drbd_packets cmd, struct p_header80 *h, | 1236 | enum drbd_packets cmd, struct p_header80 *h, |
1235 | size_t size, unsigned msg_flags); | 1237 | size_t size, unsigned msg_flags); |
@@ -1461,6 +1463,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); | |||
1461 | extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); | 1463 | extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); |
1462 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); | 1464 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); |
1463 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); | 1465 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); |
1466 | extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local); | ||
1464 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, | 1467 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, |
1465 | unsigned long al_enr); | 1468 | unsigned long al_enr); |
1466 | extern size_t drbd_bm_words(struct drbd_conf *mdev); | 1469 | extern size_t drbd_bm_words(struct drbd_conf *mdev); |
@@ -1493,11 +1496,38 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ | |||
1493 | extern mempool_t *drbd_request_mempool; | 1496 | extern mempool_t *drbd_request_mempool; |
1494 | extern mempool_t *drbd_ee_mempool; | 1497 | extern mempool_t *drbd_ee_mempool; |
1495 | 1498 | ||
1496 | extern struct page *drbd_pp_pool; /* drbd's page pool */ | 1499 | /* drbd's page pool, used to buffer data received from the peer, |
1500 | * or data requested by the peer. | ||
1501 | * | ||
1502 | * This does not have an emergency reserve. | ||
1503 | * | ||
1504 | * When allocating from this pool, it first takes pages from the pool. | ||
1505 | * Only if the pool is depleted will try to allocate from the system. | ||
1506 | * | ||
1507 | * The assumption is that pages taken from this pool will be processed, | ||
1508 | * and given back, "quickly", and then can be recycled, so we can avoid | ||
1509 | * frequent calls to alloc_page(), and still will be able to make progress even | ||
1510 | * under memory pressure. | ||
1511 | */ | ||
1512 | extern struct page *drbd_pp_pool; | ||
1497 | extern spinlock_t drbd_pp_lock; | 1513 | extern spinlock_t drbd_pp_lock; |
1498 | extern int drbd_pp_vacant; | 1514 | extern int drbd_pp_vacant; |
1499 | extern wait_queue_head_t drbd_pp_wait; | 1515 | extern wait_queue_head_t drbd_pp_wait; |
1500 | 1516 | ||
1517 | /* We also need a standard (emergency-reserve backed) page pool | ||
1518 | * for meta data IO (activity log, bitmap). | ||
1519 | * We can keep it global, as long as it is used as "N pages at a time". | ||
1520 | * 128 should be plenty, currently we probably can get away with as few as 1. | ||
1521 | */ | ||
1522 | #define DRBD_MIN_POOL_PAGES 128 | ||
1523 | extern mempool_t *drbd_md_io_page_pool; | ||
1524 | |||
1525 | /* We also need to make sure we get a bio | ||
1526 | * when we need it for housekeeping purposes */ | ||
1527 | extern struct bio_set *drbd_md_io_bio_set; | ||
1528 | /* to allocate from that set */ | ||
1529 | extern struct bio *bio_alloc_drbd(gfp_t gfp_mask); | ||
1530 | |||
1501 | extern rwlock_t global_state_lock; | 1531 | extern rwlock_t global_state_lock; |
1502 | 1532 | ||
1503 | extern struct drbd_conf *drbd_new_device(unsigned int minor); | 1533 | extern struct drbd_conf *drbd_new_device(unsigned int minor); |
@@ -1536,8 +1566,12 @@ extern void resume_next_sg(struct drbd_conf *mdev); | |||
1536 | extern void suspend_other_sg(struct drbd_conf *mdev); | 1566 | extern void suspend_other_sg(struct drbd_conf *mdev); |
1537 | extern int drbd_resync_finished(struct drbd_conf *mdev); | 1567 | extern int drbd_resync_finished(struct drbd_conf *mdev); |
1538 | /* maybe rather drbd_main.c ? */ | 1568 | /* maybe rather drbd_main.c ? */ |
1569 | extern void *drbd_md_get_buffer(struct drbd_conf *mdev); | ||
1570 | extern void drbd_md_put_buffer(struct drbd_conf *mdev); | ||
1539 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, | 1571 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, |
1540 | struct drbd_backing_dev *bdev, sector_t sector, int rw); | 1572 | struct drbd_backing_dev *bdev, sector_t sector, int rw); |
1573 | extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, | ||
1574 | unsigned int *done); | ||
1541 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); | 1575 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); |
1542 | extern void drbd_rs_controller_reset(struct drbd_conf *mdev); | 1576 | extern void drbd_rs_controller_reset(struct drbd_conf *mdev); |
1543 | 1577 | ||
@@ -1754,19 +1788,6 @@ static inline struct page *page_chain_next(struct page *page) | |||
1754 | #define page_chain_for_each_safe(page, n) \ | 1788 | #define page_chain_for_each_safe(page, n) \ |
1755 | for (; page && ({ n = page_chain_next(page); 1; }); page = n) | 1789 | for (; page && ({ n = page_chain_next(page); 1; }); page = n) |
1756 | 1790 | ||
1757 | static inline int drbd_bio_has_active_page(struct bio *bio) | ||
1758 | { | ||
1759 | struct bio_vec *bvec; | ||
1760 | int i; | ||
1761 | |||
1762 | __bio_for_each_segment(bvec, bio, i, 0) { | ||
1763 | if (page_count(bvec->bv_page) > 1) | ||
1764 | return 1; | ||
1765 | } | ||
1766 | |||
1767 | return 0; | ||
1768 | } | ||
1769 | |||
1770 | static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e) | 1791 | static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e) |
1771 | { | 1792 | { |
1772 | struct page *page = e->pages; | 1793 | struct page *page = e->pages; |
@@ -1777,7 +1798,6 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e) | |||
1777 | return 0; | 1798 | return 0; |
1778 | } | 1799 | } |
1779 | 1800 | ||
1780 | |||
1781 | static inline void drbd_state_lock(struct drbd_conf *mdev) | 1801 | static inline void drbd_state_lock(struct drbd_conf *mdev) |
1782 | { | 1802 | { |
1783 | wait_event(mdev->misc_wait, | 1803 | wait_event(mdev->misc_wait, |
@@ -2230,7 +2250,7 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, | |||
2230 | * Note: currently we don't support such large bitmaps on 32bit | 2250 | * Note: currently we don't support such large bitmaps on 32bit |
2231 | * arch anyways, but no harm done to be prepared for it here. | 2251 | * arch anyways, but no harm done to be prepared for it here. |
2232 | */ | 2252 | */ |
2233 | unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10; | 2253 | unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10; |
2234 | unsigned long left = *bits_left >> shift; | 2254 | unsigned long left = *bits_left >> shift; |
2235 | unsigned long total = 1UL + (mdev->rs_total >> shift); | 2255 | unsigned long total = 1UL + (mdev->rs_total >> shift); |
2236 | unsigned long tmp = 1000UL - left * 1000UL/total; | 2256 | unsigned long tmp = 1000UL - left * 1000UL/total; |
@@ -2306,12 +2326,12 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev) | |||
2306 | case D_OUTDATED: | 2326 | case D_OUTDATED: |
2307 | case D_CONSISTENT: | 2327 | case D_CONSISTENT: |
2308 | case D_UP_TO_DATE: | 2328 | case D_UP_TO_DATE: |
2329 | case D_FAILED: | ||
2309 | /* disk state is stable as well. */ | 2330 | /* disk state is stable as well. */ |
2310 | break; | 2331 | break; |
2311 | 2332 | ||
2312 | /* no new io accepted during tansitional states */ | 2333 | /* no new io accepted during tansitional states */ |
2313 | case D_ATTACHING: | 2334 | case D_ATTACHING: |
2314 | case D_FAILED: | ||
2315 | case D_NEGOTIATING: | 2335 | case D_NEGOTIATING: |
2316 | case D_UNKNOWN: | 2336 | case D_UNKNOWN: |
2317 | case D_MASK: | 2337 | case D_MASK: |