aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm/ast.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dlm/ast.c')
-rw-r--r--fs/dlm/ast.c257
1 files changed, 198 insertions, 59 deletions
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 4314f0d48d8..abc49f29245 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -18,6 +18,7 @@
18 18
19#define WAKE_ASTS 0 19#define WAKE_ASTS 0
20 20
21static uint64_t ast_seq_count;
21static struct list_head ast_queue; 22static struct list_head ast_queue;
22static spinlock_t ast_queue_lock; 23static spinlock_t ast_queue_lock;
23static struct task_struct * astd_task; 24static struct task_struct * astd_task;
@@ -25,40 +26,186 @@ static unsigned long astd_wakeflags;
25static struct mutex astd_running; 26static struct mutex astd_running;
26 27
27 28
29static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
30{
31 int i;
32
33 log_print("last_bast %x %llu flags %x mode %d sb %d %x",
34 lkb->lkb_id,
35 (unsigned long long)lkb->lkb_last_bast.seq,
36 lkb->lkb_last_bast.flags,
37 lkb->lkb_last_bast.mode,
38 lkb->lkb_last_bast.sb_status,
39 lkb->lkb_last_bast.sb_flags);
40
41 log_print("last_cast %x %llu flags %x mode %d sb %d %x",
42 lkb->lkb_id,
43 (unsigned long long)lkb->lkb_last_cast.seq,
44 lkb->lkb_last_cast.flags,
45 lkb->lkb_last_cast.mode,
46 lkb->lkb_last_cast.sb_status,
47 lkb->lkb_last_cast.sb_flags);
48
49 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
50 log_print("cb %x %llu flags %x mode %d sb %d %x",
51 lkb->lkb_id,
52 (unsigned long long)lkb->lkb_callbacks[i].seq,
53 lkb->lkb_callbacks[i].flags,
54 lkb->lkb_callbacks[i].mode,
55 lkb->lkb_callbacks[i].sb_status,
56 lkb->lkb_callbacks[i].sb_flags);
57 }
58}
59
28void dlm_del_ast(struct dlm_lkb *lkb) 60void dlm_del_ast(struct dlm_lkb *lkb)
29{ 61{
30 spin_lock(&ast_queue_lock); 62 spin_lock(&ast_queue_lock);
31 if (lkb->lkb_ast_type & (AST_COMP | AST_BAST)) 63 if (!list_empty(&lkb->lkb_astqueue))
32 list_del(&lkb->lkb_astqueue); 64 list_del_init(&lkb->lkb_astqueue);
33 spin_unlock(&ast_queue_lock); 65 spin_unlock(&ast_queue_lock);
34} 66}
35 67
36void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode) 68int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
69 int status, uint32_t sbflags, uint64_t seq)
37{ 70{
71 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
72 uint64_t prev_seq;
73 int prev_mode;
74 int i;
75
76 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
77 if (lkb->lkb_callbacks[i].seq)
78 continue;
79
80 /*
81 * Suppress some redundant basts here, do more on removal.
82 * Don't even add a bast if the callback just before it
83 * is a bast for the same mode or a more restrictive mode.
84 * (the addional > PR check is needed for PR/CW inversion)
85 */
86
87 if ((i > 0) && (flags & DLM_CB_BAST) &&
88 (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) {
89
90 prev_seq = lkb->lkb_callbacks[i-1].seq;
91 prev_mode = lkb->lkb_callbacks[i-1].mode;
92
93 if ((prev_mode == mode) ||
94 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
95
96 log_debug(ls, "skip %x add bast %llu mode %d "
97 "for bast %llu mode %d",
98 lkb->lkb_id,
99 (unsigned long long)seq,
100 mode,
101 (unsigned long long)prev_seq,
102 prev_mode);
103 return 0;
104 }
105 }
106
107 lkb->lkb_callbacks[i].seq = seq;
108 lkb->lkb_callbacks[i].flags = flags;
109 lkb->lkb_callbacks[i].mode = mode;
110 lkb->lkb_callbacks[i].sb_status = status;
111 lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
112 break;
113 }
114
115 if (i == DLM_CALLBACKS_SIZE) {
116 log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x",
117 lkb->lkb_id, (unsigned long long)seq,
118 flags, mode, status, sbflags);
119 dlm_dump_lkb_callbacks(lkb);
120 return -1;
121 }
122
123 return 0;
124}
125
126int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
127 struct dlm_callback *cb, int *resid)
128{
129 int i;
130
131 *resid = 0;
132
133 if (!lkb->lkb_callbacks[0].seq)
134 return -ENOENT;
135
136 /* oldest undelivered cb is callbacks[0] */
137
138 memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback));
139 memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback));
140
141 /* shift others down */
142
143 for (i = 1; i < DLM_CALLBACKS_SIZE; i++) {
144 if (!lkb->lkb_callbacks[i].seq)
145 break;
146 memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i],
147 sizeof(struct dlm_callback));
148 memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback));
149 (*resid)++;
150 }
151
152 /* if cb is a bast, it should be skipped if the blocking mode is
153 compatible with the last granted mode */
154
155 if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) {
156 if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) {
157 cb->flags |= DLM_CB_SKIP;
158
159 log_debug(ls, "skip %x bast %llu mode %d "
160 "for cast %llu mode %d",
161 lkb->lkb_id,
162 (unsigned long long)cb->seq,
163 cb->mode,
164 (unsigned long long)lkb->lkb_last_cast.seq,
165 lkb->lkb_last_cast.mode);
166 return 0;
167 }
168 }
169
170 if (cb->flags & DLM_CB_CAST) {
171 memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback));
172 lkb->lkb_last_cast_time = ktime_get();
173 }
174
175 if (cb->flags & DLM_CB_BAST) {
176 memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
177 lkb->lkb_last_bast_time = ktime_get();
178 }
179
180 return 0;
181}
182
183void dlm_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
184 uint32_t sbflags)
185{
186 uint64_t seq;
187 int rv;
188
189 spin_lock(&ast_queue_lock);
190
191 seq = ++ast_seq_count;
192
38 if (lkb->lkb_flags & DLM_IFL_USER) { 193 if (lkb->lkb_flags & DLM_IFL_USER) {
39 dlm_user_add_ast(lkb, type, mode); 194 spin_unlock(&ast_queue_lock);
195 dlm_user_add_ast(lkb, flags, mode, status, sbflags, seq);
40 return; 196 return;
41 } 197 }
42 198
43 spin_lock(&ast_queue_lock); 199 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
44 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) { 200 if (rv < 0) {
201 spin_unlock(&ast_queue_lock);
202 return;
203 }
204
205 if (list_empty(&lkb->lkb_astqueue)) {
45 kref_get(&lkb->lkb_ref); 206 kref_get(&lkb->lkb_ref);
46 list_add_tail(&lkb->lkb_astqueue, &ast_queue); 207 list_add_tail(&lkb->lkb_astqueue, &ast_queue);
47 lkb->lkb_ast_first = type;
48 } 208 }
49
50 /* sanity check, this should not happen */
51
52 if ((type == AST_COMP) && (lkb->lkb_ast_type & AST_COMP))
53 log_print("repeat cast %d castmode %d lock %x %s",
54 mode, lkb->lkb_castmode,
55 lkb->lkb_id, lkb->lkb_resource->res_name);
56
57 lkb->lkb_ast_type |= type;
58 if (type == AST_BAST)
59 lkb->lkb_bastmode = mode;
60 else
61 lkb->lkb_castmode = mode;
62 spin_unlock(&ast_queue_lock); 209 spin_unlock(&ast_queue_lock);
63 210
64 set_bit(WAKE_ASTS, &astd_wakeflags); 211 set_bit(WAKE_ASTS, &astd_wakeflags);
@@ -72,7 +219,8 @@ static void process_asts(void)
72 struct dlm_lkb *lkb; 219 struct dlm_lkb *lkb;
73 void (*castfn) (void *astparam); 220 void (*castfn) (void *astparam);
74 void (*bastfn) (void *astparam, int mode); 221 void (*bastfn) (void *astparam, int mode);
75 int type, first, bastmode, castmode, do_bast, do_cast, last_castmode; 222 struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
223 int i, rv, resid;
76 224
77repeat: 225repeat:
78 spin_lock(&ast_queue_lock); 226 spin_lock(&ast_queue_lock);
@@ -83,54 +231,45 @@ repeat:
83 if (dlm_locking_stopped(ls)) 231 if (dlm_locking_stopped(ls))
84 continue; 232 continue;
85 233
86 list_del(&lkb->lkb_astqueue); 234 /* we remove from astqueue list and remove everything in
87 type = lkb->lkb_ast_type; 235 lkb_callbacks before releasing the spinlock so empty
88 lkb->lkb_ast_type = 0; 236 lkb_astqueue is always consistent with empty lkb_callbacks */
89 first = lkb->lkb_ast_first; 237
90 lkb->lkb_ast_first = 0; 238 list_del_init(&lkb->lkb_astqueue);
91 bastmode = lkb->lkb_bastmode; 239
92 castmode = lkb->lkb_castmode;
93 castfn = lkb->lkb_astfn; 240 castfn = lkb->lkb_astfn;
94 bastfn = lkb->lkb_bastfn; 241 bastfn = lkb->lkb_bastfn;
95 spin_unlock(&ast_queue_lock);
96 242
97 do_cast = (type & AST_COMP) && castfn; 243 memset(&callbacks, 0, sizeof(callbacks));
98 do_bast = (type & AST_BAST) && bastfn;
99 244
100 /* Skip a bast if its blocking mode is compatible with the 245 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
101 granted mode of the preceding cast. */ 246 rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
247 if (rv < 0)
248 break;
249 }
250 spin_unlock(&ast_queue_lock);
102 251
103 if (do_bast) { 252 if (resid) {
104 if (first == AST_COMP) 253 /* shouldn't happen, for loop should have removed all */
105 last_castmode = castmode; 254 log_error(ls, "callback resid %d lkb %x",
106 else 255 resid, lkb->lkb_id);
107 last_castmode = lkb->lkb_castmode_done;
108 if (dlm_modes_compat(bastmode, last_castmode))
109 do_bast = 0;
110 } 256 }
111 257
112 if (first == AST_COMP) { 258 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
113 if (do_cast) 259 if (!callbacks[i].seq)
114 castfn(lkb->lkb_astparam); 260 break;
115 if (do_bast) 261 if (callbacks[i].flags & DLM_CB_SKIP) {
116 bastfn(lkb->lkb_astparam, bastmode); 262 continue;
117 } else if (first == AST_BAST) { 263 } else if (callbacks[i].flags & DLM_CB_BAST) {
118 if (do_bast) 264 bastfn(lkb->lkb_astparam, callbacks[i].mode);
119 bastfn(lkb->lkb_astparam, bastmode); 265 } else if (callbacks[i].flags & DLM_CB_CAST) {
120 if (do_cast) 266 lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
267 lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
121 castfn(lkb->lkb_astparam); 268 castfn(lkb->lkb_astparam);
122 } else { 269 }
123 log_error(ls, "bad ast_first %d ast_type %d",
124 first, type);
125 } 270 }
126 271
127 if (do_cast) 272 /* removes ref for ast_queue, may cause lkb to be freed */
128 lkb->lkb_castmode_done = castmode;
129 if (do_bast)
130 lkb->lkb_bastmode_done = bastmode;
131
132 /* this removes the reference added by dlm_add_ast
133 and may result in the lkb being freed */
134 dlm_put_lkb(lkb); 273 dlm_put_lkb(lkb);
135 274
136 cond_resched(); 275 cond_resched();