diff options
Diffstat (limited to 'fs/ocfs2/dlm/dlmcommon.h')
-rw-r--r-- | fs/ocfs2/dlm/dlmcommon.h | 63 |
1 files changed, 53 insertions, 10 deletions
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 88cc43df18f1..9bdc9cf65991 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -37,7 +37,17 @@ | |||
37 | #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes | 37 | #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes |
38 | #define DLM_THREAD_MS 200 // flush at least every 200 ms | 38 | #define DLM_THREAD_MS 200 // flush at least every 200 ms |
39 | 39 | ||
40 | #define DLM_HASH_BUCKETS (PAGE_SIZE / sizeof(struct hlist_head)) | 40 | #define DLM_HASH_SIZE_DEFAULT (1 << 14) |
41 | #if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE | ||
42 | # define DLM_HASH_PAGES 1 | ||
43 | #else | ||
44 | # define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE) | ||
45 | #endif | ||
46 | #define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head)) | ||
47 | #define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE) | ||
48 | |||
49 | /* Intended to make it easier for us to switch out hash functions */ | ||
50 | #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) | ||
41 | 51 | ||
42 | enum dlm_ast_type { | 52 | enum dlm_ast_type { |
43 | DLM_AST = 0, | 53 | DLM_AST = 0, |
@@ -61,7 +71,8 @@ static inline int dlm_is_recovery_lock(const char *lock_name, int name_len) | |||
61 | return 0; | 71 | return 0; |
62 | } | 72 | } |
63 | 73 | ||
64 | #define DLM_RECO_STATE_ACTIVE 0x0001 | 74 | #define DLM_RECO_STATE_ACTIVE 0x0001 |
75 | #define DLM_RECO_STATE_FINALIZE 0x0002 | ||
65 | 76 | ||
66 | struct dlm_recovery_ctxt | 77 | struct dlm_recovery_ctxt |
67 | { | 78 | { |
@@ -85,7 +96,7 @@ enum dlm_ctxt_state { | |||
85 | struct dlm_ctxt | 96 | struct dlm_ctxt |
86 | { | 97 | { |
87 | struct list_head list; | 98 | struct list_head list; |
88 | struct hlist_head *lockres_hash; | 99 | struct hlist_head **lockres_hash; |
89 | struct list_head dirty_list; | 100 | struct list_head dirty_list; |
90 | struct list_head purge_list; | 101 | struct list_head purge_list; |
91 | struct list_head pending_asts; | 102 | struct list_head pending_asts; |
@@ -120,6 +131,7 @@ struct dlm_ctxt | |||
120 | struct o2hb_callback_func dlm_hb_down; | 131 | struct o2hb_callback_func dlm_hb_down; |
121 | struct task_struct *dlm_thread_task; | 132 | struct task_struct *dlm_thread_task; |
122 | struct task_struct *dlm_reco_thread_task; | 133 | struct task_struct *dlm_reco_thread_task; |
134 | struct workqueue_struct *dlm_worker; | ||
123 | wait_queue_head_t dlm_thread_wq; | 135 | wait_queue_head_t dlm_thread_wq; |
124 | wait_queue_head_t dlm_reco_thread_wq; | 136 | wait_queue_head_t dlm_reco_thread_wq; |
125 | wait_queue_head_t ast_wq; | 137 | wait_queue_head_t ast_wq; |
@@ -132,6 +144,11 @@ struct dlm_ctxt | |||
132 | struct list_head dlm_eviction_callbacks; | 144 | struct list_head dlm_eviction_callbacks; |
133 | }; | 145 | }; |
134 | 146 | ||
147 | static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) | ||
148 | { | ||
149 | return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE); | ||
150 | } | ||
151 | |||
135 | /* these keventd work queue items are for less-frequently | 152 | /* these keventd work queue items are for less-frequently |
136 | * called functions that cannot be directly called from the | 153 | * called functions that cannot be directly called from the |
137 | * net message handlers for some reason, usually because | 154 | * net message handlers for some reason, usually because |
@@ -216,20 +233,29 @@ struct dlm_lock_resource | |||
216 | /* WARNING: Please see the comment in dlm_init_lockres before | 233 | /* WARNING: Please see the comment in dlm_init_lockres before |
217 | * adding fields here. */ | 234 | * adding fields here. */ |
218 | struct hlist_node hash_node; | 235 | struct hlist_node hash_node; |
236 | struct qstr lockname; | ||
219 | struct kref refs; | 237 | struct kref refs; |
220 | 238 | ||
221 | /* please keep these next 3 in this order | 239 | /* |
222 | * some funcs want to iterate over all lists */ | 240 | * Please keep granted, converting, and blocked in this order, |
241 | * as some funcs want to iterate over all lists. | ||
242 | * | ||
243 | * All four lists are protected by the hash's reference. | ||
244 | */ | ||
223 | struct list_head granted; | 245 | struct list_head granted; |
224 | struct list_head converting; | 246 | struct list_head converting; |
225 | struct list_head blocked; | 247 | struct list_head blocked; |
248 | struct list_head purge; | ||
226 | 249 | ||
250 | /* | ||
251 | * These two lists require you to hold an additional reference | ||
252 | * while they are on the list. | ||
253 | */ | ||
227 | struct list_head dirty; | 254 | struct list_head dirty; |
228 | struct list_head recovering; // dlm_recovery_ctxt.resources list | 255 | struct list_head recovering; // dlm_recovery_ctxt.resources list |
229 | 256 | ||
230 | /* unused lock resources have their last_used stamped and are | 257 | /* unused lock resources have their last_used stamped and are |
231 | * put on a list for the dlm thread to run. */ | 258 | * put on a list for the dlm thread to run. */ |
232 | struct list_head purge; | ||
233 | unsigned long last_used; | 259 | unsigned long last_used; |
234 | 260 | ||
235 | unsigned migration_pending:1; | 261 | unsigned migration_pending:1; |
@@ -238,7 +264,6 @@ struct dlm_lock_resource | |||
238 | wait_queue_head_t wq; | 264 | wait_queue_head_t wq; |
239 | u8 owner; //node which owns the lock resource, or unknown | 265 | u8 owner; //node which owns the lock resource, or unknown |
240 | u16 state; | 266 | u16 state; |
241 | struct qstr lockname; | ||
242 | char lvb[DLM_LVB_LEN]; | 267 | char lvb[DLM_LVB_LEN]; |
243 | }; | 268 | }; |
244 | 269 | ||
@@ -300,6 +325,15 @@ enum dlm_lockres_list { | |||
300 | DLM_BLOCKED_LIST | 325 | DLM_BLOCKED_LIST |
301 | }; | 326 | }; |
302 | 327 | ||
328 | static inline int dlm_lvb_is_empty(char *lvb) | ||
329 | { | ||
330 | int i; | ||
331 | for (i=0; i<DLM_LVB_LEN; i++) | ||
332 | if (lvb[i]) | ||
333 | return 0; | ||
334 | return 1; | ||
335 | } | ||
336 | |||
303 | static inline struct list_head * | 337 | static inline struct list_head * |
304 | dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) | 338 | dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) |
305 | { | 339 | { |
@@ -609,7 +643,8 @@ struct dlm_finalize_reco | |||
609 | { | 643 | { |
610 | u8 node_idx; | 644 | u8 node_idx; |
611 | u8 dead_node; | 645 | u8 dead_node; |
612 | __be16 pad1; | 646 | u8 flags; |
647 | u8 pad1; | ||
613 | __be32 pad2; | 648 | __be32 pad2; |
614 | }; | 649 | }; |
615 | 650 | ||
@@ -676,6 +711,7 @@ void dlm_wait_for_recovery(struct dlm_ctxt *dlm); | |||
676 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); | 711 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); |
677 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); | 712 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); |
678 | int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); | 713 | int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); |
714 | int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); | ||
679 | 715 | ||
680 | void dlm_put(struct dlm_ctxt *dlm); | 716 | void dlm_put(struct dlm_ctxt *dlm); |
681 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); | 717 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); |
@@ -687,14 +723,20 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
687 | struct dlm_lock_resource *res); | 723 | struct dlm_lock_resource *res); |
688 | void dlm_purge_lockres(struct dlm_ctxt *dlm, | 724 | void dlm_purge_lockres(struct dlm_ctxt *dlm, |
689 | struct dlm_lock_resource *lockres); | 725 | struct dlm_lock_resource *lockres); |
690 | void dlm_lockres_get(struct dlm_lock_resource *res); | 726 | static inline void dlm_lockres_get(struct dlm_lock_resource *res) |
727 | { | ||
728 | /* This is called on every lookup, so it might be worth | ||
729 | * inlining. */ | ||
730 | kref_get(&res->refs); | ||
731 | } | ||
691 | void dlm_lockres_put(struct dlm_lock_resource *res); | 732 | void dlm_lockres_put(struct dlm_lock_resource *res); |
692 | void __dlm_unhash_lockres(struct dlm_lock_resource *res); | 733 | void __dlm_unhash_lockres(struct dlm_lock_resource *res); |
693 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | 734 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, |
694 | struct dlm_lock_resource *res); | 735 | struct dlm_lock_resource *res); |
695 | struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, | 736 | struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, |
696 | const char *name, | 737 | const char *name, |
697 | unsigned int len); | 738 | unsigned int len, |
739 | unsigned int hash); | ||
698 | struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, | 740 | struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, |
699 | const char *name, | 741 | const char *name, |
700 | unsigned int len); | 742 | unsigned int len); |
@@ -819,6 +861,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, | |||
819 | u8 dead_node); | 861 | u8 dead_node); |
820 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 862 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
821 | 863 | ||
864 | int __dlm_lockres_unused(struct dlm_lock_resource *res); | ||
822 | 865 | ||
823 | static inline const char * dlm_lock_mode_name(int mode) | 866 | static inline const char * dlm_lock_mode_name(int mode) |
824 | { | 867 | { |