diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.c | 126 |
1 files changed, 51 insertions, 75 deletions
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 58e45795a6c6..53a9e6093e6f 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * debug functionality for the dlm | 6 | * debug functionality for the dlm |
7 | * | 7 | * |
8 | * Copyright (C) 2004 Oracle. All rights reserved. | 8 | * Copyright (C) 2004, 2008 Oracle. All rights reserved. |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public | 11 | * modify it under the terms of the GNU General Public |
@@ -44,11 +44,10 @@ | |||
44 | #define MLOG_MASK_PREFIX ML_DLM | 44 | #define MLOG_MASK_PREFIX ML_DLM |
45 | #include "cluster/masklog.h" | 45 | #include "cluster/masklog.h" |
46 | 46 | ||
47 | int stringify_lockname(const char *lockname, int locklen, char *buf, int len); | ||
48 | |||
47 | void dlm_print_one_lock_resource(struct dlm_lock_resource *res) | 49 | void dlm_print_one_lock_resource(struct dlm_lock_resource *res) |
48 | { | 50 | { |
49 | mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n", | ||
50 | res->lockname.len, res->lockname.name, | ||
51 | res->owner, res->state); | ||
52 | spin_lock(&res->spinlock); | 51 | spin_lock(&res->spinlock); |
53 | __dlm_print_one_lock_resource(res); | 52 | __dlm_print_one_lock_resource(res); |
54 | spin_unlock(&res->spinlock); | 53 | spin_unlock(&res->spinlock); |
@@ -59,75 +58,78 @@ static void dlm_print_lockres_refmap(struct dlm_lock_resource *res) | |||
59 | int bit; | 58 | int bit; |
60 | assert_spin_locked(&res->spinlock); | 59 | assert_spin_locked(&res->spinlock); |
61 | 60 | ||
62 | mlog(ML_NOTICE, " refmap nodes: [ "); | 61 | printk(KERN_NOTICE " refmap nodes: [ "); |
63 | bit = 0; | 62 | bit = 0; |
64 | while (1) { | 63 | while (1) { |
65 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); | 64 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); |
66 | if (bit >= O2NM_MAX_NODES) | 65 | if (bit >= O2NM_MAX_NODES) |
67 | break; | 66 | break; |
68 | printk("%u ", bit); | 67 | printk(KERN_NOTICE "%u ", bit); |
69 | bit++; | 68 | bit++; |
70 | } | 69 | } |
71 | printk("], inflight=%u\n", res->inflight_locks); | 70 | printk(KERN_NOTICE "], inflight=%u\n", res->inflight_locks); |
71 | } | ||
72 | |||
73 | static void __dlm_print_lock(struct dlm_lock *lock) | ||
74 | { | ||
75 | spin_lock(&lock->spinlock); | ||
76 | |||
77 | printk(KERN_NOTICE " type=%d, conv=%d, node=%u, cookie=%u:%llu, " | ||
78 | "ref=%u, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c), " | ||
79 | "pending=(conv=%c,lock=%c,cancel=%c,unlock=%c)\n", | ||
80 | lock->ml.type, lock->ml.convert_type, lock->ml.node, | ||
81 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
82 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
83 | atomic_read(&lock->lock_refs.refcount), | ||
84 | (list_empty(&lock->ast_list) ? 'y' : 'n'), | ||
85 | (lock->ast_pending ? 'y' : 'n'), | ||
86 | (list_empty(&lock->bast_list) ? 'y' : 'n'), | ||
87 | (lock->bast_pending ? 'y' : 'n'), | ||
88 | (lock->convert_pending ? 'y' : 'n'), | ||
89 | (lock->lock_pending ? 'y' : 'n'), | ||
90 | (lock->cancel_pending ? 'y' : 'n'), | ||
91 | (lock->unlock_pending ? 'y' : 'n')); | ||
92 | |||
93 | spin_unlock(&lock->spinlock); | ||
72 | } | 94 | } |
73 | 95 | ||
74 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) | 96 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) |
75 | { | 97 | { |
76 | struct list_head *iter2; | 98 | struct list_head *iter2; |
77 | struct dlm_lock *lock; | 99 | struct dlm_lock *lock; |
100 | char buf[DLM_LOCKID_NAME_MAX]; | ||
78 | 101 | ||
79 | assert_spin_locked(&res->spinlock); | 102 | assert_spin_locked(&res->spinlock); |
80 | 103 | ||
81 | mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n", | 104 | stringify_lockname(res->lockname.name, res->lockname.len, |
82 | res->lockname.len, res->lockname.name, | 105 | buf, sizeof(buf) - 1); |
83 | res->owner, res->state); | 106 | printk(KERN_NOTICE "lockres: %s, owner=%u, state=%u\n", |
84 | mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n", | 107 | buf, res->owner, res->state); |
85 | res->last_used, list_empty(&res->purge) ? "no" : "yes"); | 108 | printk(KERN_NOTICE " last used: %lu, refcnt: %u, on purge list: %s\n", |
109 | res->last_used, atomic_read(&res->refs.refcount), | ||
110 | list_empty(&res->purge) ? "no" : "yes"); | ||
111 | printk(KERN_NOTICE " on dirty list: %s, on reco list: %s, " | ||
112 | "migrating pending: %s\n", | ||
113 | list_empty(&res->dirty) ? "no" : "yes", | ||
114 | list_empty(&res->recovering) ? "no" : "yes", | ||
115 | res->migration_pending ? "yes" : "no"); | ||
116 | printk(KERN_NOTICE " inflight locks: %d, asts reserved: %d\n", | ||
117 | res->inflight_locks, atomic_read(&res->asts_reserved)); | ||
86 | dlm_print_lockres_refmap(res); | 118 | dlm_print_lockres_refmap(res); |
87 | mlog(ML_NOTICE, " granted queue: \n"); | 119 | printk(KERN_NOTICE " granted queue:\n"); |
88 | list_for_each(iter2, &res->granted) { | 120 | list_for_each(iter2, &res->granted) { |
89 | lock = list_entry(iter2, struct dlm_lock, list); | 121 | lock = list_entry(iter2, struct dlm_lock, list); |
90 | spin_lock(&lock->spinlock); | 122 | __dlm_print_lock(lock); |
91 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
92 | "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
93 | lock->ml.type, lock->ml.convert_type, lock->ml.node, | ||
94 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
95 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
96 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
97 | lock->ast_pending ? 'y' : 'n', | ||
98 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
99 | lock->bast_pending ? 'y' : 'n'); | ||
100 | spin_unlock(&lock->spinlock); | ||
101 | } | 123 | } |
102 | mlog(ML_NOTICE, " converting queue: \n"); | 124 | printk(KERN_NOTICE " converting queue:\n"); |
103 | list_for_each(iter2, &res->converting) { | 125 | list_for_each(iter2, &res->converting) { |
104 | lock = list_entry(iter2, struct dlm_lock, list); | 126 | lock = list_entry(iter2, struct dlm_lock, list); |
105 | spin_lock(&lock->spinlock); | 127 | __dlm_print_lock(lock); |
106 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
107 | "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
108 | lock->ml.type, lock->ml.convert_type, lock->ml.node, | ||
109 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
110 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
111 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
112 | lock->ast_pending ? 'y' : 'n', | ||
113 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
114 | lock->bast_pending ? 'y' : 'n'); | ||
115 | spin_unlock(&lock->spinlock); | ||
116 | } | 128 | } |
117 | mlog(ML_NOTICE, " blocked queue: \n"); | 129 | printk(KERN_NOTICE " blocked queue:\n"); |
118 | list_for_each(iter2, &res->blocked) { | 130 | list_for_each(iter2, &res->blocked) { |
119 | lock = list_entry(iter2, struct dlm_lock, list); | 131 | lock = list_entry(iter2, struct dlm_lock, list); |
120 | spin_lock(&lock->spinlock); | 132 | __dlm_print_lock(lock); |
121 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
122 | "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
123 | lock->ml.type, lock->ml.convert_type, lock->ml.node, | ||
124 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
125 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
126 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
127 | lock->ast_pending ? 'y' : 'n', | ||
128 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
129 | lock->bast_pending ? 'y' : 'n'); | ||
130 | spin_unlock(&lock->spinlock); | ||
131 | } | 133 | } |
132 | } | 134 | } |
133 | 135 | ||
@@ -137,31 +139,6 @@ void dlm_print_one_lock(struct dlm_lock *lockid) | |||
137 | } | 139 | } |
138 | EXPORT_SYMBOL_GPL(dlm_print_one_lock); | 140 | EXPORT_SYMBOL_GPL(dlm_print_one_lock); |
139 | 141 | ||
140 | #if 0 | ||
141 | void dlm_dump_lock_resources(struct dlm_ctxt *dlm) | ||
142 | { | ||
143 | struct dlm_lock_resource *res; | ||
144 | struct hlist_node *iter; | ||
145 | struct hlist_head *bucket; | ||
146 | int i; | ||
147 | |||
148 | mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n", | ||
149 | dlm->name, dlm->node_num, dlm->key); | ||
150 | if (!dlm || !dlm->name) { | ||
151 | mlog(ML_ERROR, "dlm=%p\n", dlm); | ||
152 | return; | ||
153 | } | ||
154 | |||
155 | spin_lock(&dlm->spinlock); | ||
156 | for (i=0; i<DLM_HASH_BUCKETS; i++) { | ||
157 | bucket = dlm_lockres_hash(dlm, i); | ||
158 | hlist_for_each_entry(res, iter, bucket, hash_node) | ||
159 | dlm_print_one_lock_resource(res); | ||
160 | } | ||
161 | spin_unlock(&dlm->spinlock); | ||
162 | } | ||
163 | #endif /* 0 */ | ||
164 | |||
165 | static const char *dlm_errnames[] = { | 142 | static const char *dlm_errnames[] = { |
166 | [DLM_NORMAL] = "DLM_NORMAL", | 143 | [DLM_NORMAL] = "DLM_NORMAL", |
167 | [DLM_GRANTED] = "DLM_GRANTED", | 144 | [DLM_GRANTED] = "DLM_GRANTED", |
@@ -274,8 +251,7 @@ EXPORT_SYMBOL_GPL(dlm_errname); | |||
274 | * | 251 | * |
275 | * For more on lockname formats, please refer to dlmglue.c and ocfs2_lockid.h. | 252 | * For more on lockname formats, please refer to dlmglue.c and ocfs2_lockid.h. |
276 | */ | 253 | */ |
277 | static int stringify_lockname(const char *lockname, int locklen, | 254 | int stringify_lockname(const char *lockname, int locklen, char *buf, int len) |
278 | char *buf, int len) | ||
279 | { | 255 | { |
280 | int out = 0; | 256 | int out = 0; |
281 | __be64 inode_blkno_be; | 257 | __be64 inode_blkno_be; |