diff options
Diffstat (limited to 'fs/gfs2/glops.c')
-rw-r--r-- | fs/gfs2/glops.c | 615 |
1 files changed, 615 insertions, 0 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c new file mode 100644 index 000000000000..41a6b6818a50 --- /dev/null +++ b/fs/gfs2/glops.c | |||
@@ -0,0 +1,615 @@ | |||
1 | /* | ||
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | ||
3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * This copyrighted material is made available to anyone wishing to use, | ||
6 | * modify, copy, or redistribute it subject to the terms and conditions | ||
7 | * of the GNU General Public License version 2. | ||
8 | */ | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/completion.h> | ||
14 | #include <linux/buffer_head.h> | ||
15 | #include <linux/gfs2_ondisk.h> | ||
16 | #include <linux/lm_interface.h> | ||
17 | |||
18 | #include "gfs2.h" | ||
19 | #include "incore.h" | ||
20 | #include "bmap.h" | ||
21 | #include "glock.h" | ||
22 | #include "glops.h" | ||
23 | #include "inode.h" | ||
24 | #include "log.h" | ||
25 | #include "meta_io.h" | ||
26 | #include "recovery.h" | ||
27 | #include "rgrp.h" | ||
28 | #include "util.h" | ||
29 | #include "trans.h" | ||
30 | |||
31 | /** | ||
32 | * ail_empty_gl - remove all buffers for a given lock from the AIL | ||
33 | * @gl: the glock | ||
34 | * | ||
35 | * None of the buffers should be dirty, locked, or pinned. | ||
36 | */ | ||
37 | |||
38 | static void gfs2_ail_empty_gl(struct gfs2_glock *gl) | ||
39 | { | ||
40 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
41 | unsigned int blocks; | ||
42 | struct list_head *head = &gl->gl_ail_list; | ||
43 | struct gfs2_bufdata *bd; | ||
44 | struct buffer_head *bh; | ||
45 | u64 blkno; | ||
46 | int error; | ||
47 | |||
48 | blocks = atomic_read(&gl->gl_ail_count); | ||
49 | if (!blocks) | ||
50 | return; | ||
51 | |||
52 | error = gfs2_trans_begin(sdp, 0, blocks); | ||
53 | if (gfs2_assert_withdraw(sdp, !error)) | ||
54 | return; | ||
55 | |||
56 | gfs2_log_lock(sdp); | ||
57 | while (!list_empty(head)) { | ||
58 | bd = list_entry(head->next, struct gfs2_bufdata, | ||
59 | bd_ail_gl_list); | ||
60 | bh = bd->bd_bh; | ||
61 | blkno = bh->b_blocknr; | ||
62 | gfs2_assert_withdraw(sdp, !buffer_busy(bh)); | ||
63 | |||
64 | bd->bd_ail = NULL; | ||
65 | list_del(&bd->bd_ail_st_list); | ||
66 | list_del(&bd->bd_ail_gl_list); | ||
67 | atomic_dec(&gl->gl_ail_count); | ||
68 | brelse(bh); | ||
69 | gfs2_log_unlock(sdp); | ||
70 | |||
71 | gfs2_trans_add_revoke(sdp, blkno); | ||
72 | |||
73 | gfs2_log_lock(sdp); | ||
74 | } | ||
75 | gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); | ||
76 | gfs2_log_unlock(sdp); | ||
77 | |||
78 | gfs2_trans_end(sdp); | ||
79 | gfs2_log_flush(sdp, NULL); | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock | ||
84 | * @gl: the glock | ||
85 | * | ||
86 | */ | ||
87 | |||
88 | static void gfs2_pte_inval(struct gfs2_glock *gl) | ||
89 | { | ||
90 | struct gfs2_inode *ip; | ||
91 | struct inode *inode; | ||
92 | |||
93 | ip = gl->gl_object; | ||
94 | inode = &ip->i_inode; | ||
95 | if (!ip || !S_ISREG(ip->i_di.di_mode)) | ||
96 | return; | ||
97 | |||
98 | if (!test_bit(GIF_PAGED, &ip->i_flags)) | ||
99 | return; | ||
100 | |||
101 | unmap_shared_mapping_range(inode->i_mapping, 0, 0); | ||
102 | |||
103 | if (test_bit(GIF_SW_PAGED, &ip->i_flags)) | ||
104 | set_bit(GLF_DIRTY, &gl->gl_flags); | ||
105 | |||
106 | clear_bit(GIF_SW_PAGED, &ip->i_flags); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * gfs2_page_inval - Invalidate all pages associated with a glock | ||
111 | * @gl: the glock | ||
112 | * | ||
113 | */ | ||
114 | |||
115 | static void gfs2_page_inval(struct gfs2_glock *gl) | ||
116 | { | ||
117 | struct gfs2_inode *ip; | ||
118 | struct inode *inode; | ||
119 | |||
120 | ip = gl->gl_object; | ||
121 | inode = &ip->i_inode; | ||
122 | if (!ip || !S_ISREG(ip->i_di.di_mode)) | ||
123 | return; | ||
124 | |||
125 | truncate_inode_pages(inode->i_mapping, 0); | ||
126 | gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages); | ||
127 | clear_bit(GIF_PAGED, &ip->i_flags); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * gfs2_page_wait - Wait for writeback of data | ||
132 | * @gl: the glock | ||
133 | * | ||
134 | * Syncs data (not metadata) for a regular file. | ||
135 | * No-op for all other types. | ||
136 | */ | ||
137 | |||
138 | static void gfs2_page_wait(struct gfs2_glock *gl) | ||
139 | { | ||
140 | struct gfs2_inode *ip = gl->gl_object; | ||
141 | struct inode *inode = &ip->i_inode; | ||
142 | struct address_space *mapping = inode->i_mapping; | ||
143 | int error; | ||
144 | |||
145 | if (!S_ISREG(ip->i_di.di_mode)) | ||
146 | return; | ||
147 | |||
148 | error = filemap_fdatawait(mapping); | ||
149 | |||
150 | /* Put back any errors cleared by filemap_fdatawait() | ||
151 | so they can be caught by someone who can pass them | ||
152 | up to user space. */ | ||
153 | |||
154 | if (error == -ENOSPC) | ||
155 | set_bit(AS_ENOSPC, &mapping->flags); | ||
156 | else if (error) | ||
157 | set_bit(AS_EIO, &mapping->flags); | ||
158 | |||
159 | } | ||
160 | |||
161 | static void gfs2_page_writeback(struct gfs2_glock *gl) | ||
162 | { | ||
163 | struct gfs2_inode *ip = gl->gl_object; | ||
164 | struct inode *inode = &ip->i_inode; | ||
165 | struct address_space *mapping = inode->i_mapping; | ||
166 | |||
167 | if (!S_ISREG(ip->i_di.di_mode)) | ||
168 | return; | ||
169 | |||
170 | filemap_fdatawrite(mapping); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * meta_go_sync - sync out the metadata for this glock | ||
175 | * @gl: the glock | ||
176 | * @flags: DIO_* | ||
177 | * | ||
178 | * Called when demoting or unlocking an EX glock. We must flush | ||
179 | * to disk all dirty buffers/pages relating to this glock, and must not | ||
180 | * not return to caller to demote/unlock the glock until I/O is complete. | ||
181 | */ | ||
182 | |||
183 | static void meta_go_sync(struct gfs2_glock *gl, int flags) | ||
184 | { | ||
185 | if (!(flags & DIO_METADATA)) | ||
186 | return; | ||
187 | |||
188 | if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { | ||
189 | gfs2_log_flush(gl->gl_sbd, gl); | ||
190 | gfs2_meta_sync(gl); | ||
191 | if (flags & DIO_RELEASE) | ||
192 | gfs2_ail_empty_gl(gl); | ||
193 | } | ||
194 | |||
195 | } | ||
196 | |||
197 | /** | ||
198 | * meta_go_inval - invalidate the metadata for this glock | ||
199 | * @gl: the glock | ||
200 | * @flags: | ||
201 | * | ||
202 | */ | ||
203 | |||
204 | static void meta_go_inval(struct gfs2_glock *gl, int flags) | ||
205 | { | ||
206 | if (!(flags & DIO_METADATA)) | ||
207 | return; | ||
208 | |||
209 | gfs2_meta_inval(gl); | ||
210 | gl->gl_vn++; | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * inode_go_xmote_th - promote/demote a glock | ||
215 | * @gl: the glock | ||
216 | * @state: the requested state | ||
217 | * @flags: | ||
218 | * | ||
219 | */ | ||
220 | |||
221 | static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state, | ||
222 | int flags) | ||
223 | { | ||
224 | if (gl->gl_state != LM_ST_UNLOCKED) | ||
225 | gfs2_pte_inval(gl); | ||
226 | gfs2_glock_xmote_th(gl, state, flags); | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * inode_go_xmote_bh - After promoting/demoting a glock | ||
231 | * @gl: the glock | ||
232 | * | ||
233 | */ | ||
234 | |||
235 | static void inode_go_xmote_bh(struct gfs2_glock *gl) | ||
236 | { | ||
237 | struct gfs2_holder *gh = gl->gl_req_gh; | ||
238 | struct buffer_head *bh; | ||
239 | int error; | ||
240 | |||
241 | if (gl->gl_state != LM_ST_UNLOCKED && | ||
242 | (!gh || !(gh->gh_flags & GL_SKIP))) { | ||
243 | error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh); | ||
244 | if (!error) | ||
245 | brelse(bh); | ||
246 | } | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * inode_go_drop_th - unlock a glock | ||
251 | * @gl: the glock | ||
252 | * | ||
253 | * Invoked from rq_demote(). | ||
254 | * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long) | ||
255 | * is being purged from our node's glock cache; we're dropping lock. | ||
256 | */ | ||
257 | |||
258 | static void inode_go_drop_th(struct gfs2_glock *gl) | ||
259 | { | ||
260 | gfs2_pte_inval(gl); | ||
261 | gfs2_glock_drop_th(gl); | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * inode_go_sync - Sync the dirty data and/or metadata for an inode glock | ||
266 | * @gl: the glock protecting the inode | ||
267 | * @flags: | ||
268 | * | ||
269 | */ | ||
270 | |||
271 | static void inode_go_sync(struct gfs2_glock *gl, int flags) | ||
272 | { | ||
273 | int meta = (flags & DIO_METADATA); | ||
274 | int data = (flags & DIO_DATA); | ||
275 | |||
276 | if (test_bit(GLF_DIRTY, &gl->gl_flags)) { | ||
277 | if (meta && data) { | ||
278 | gfs2_page_writeback(gl); | ||
279 | gfs2_log_flush(gl->gl_sbd, gl); | ||
280 | gfs2_meta_sync(gl); | ||
281 | gfs2_page_wait(gl); | ||
282 | clear_bit(GLF_DIRTY, &gl->gl_flags); | ||
283 | } else if (meta) { | ||
284 | gfs2_log_flush(gl->gl_sbd, gl); | ||
285 | gfs2_meta_sync(gl); | ||
286 | } else if (data) { | ||
287 | gfs2_page_writeback(gl); | ||
288 | gfs2_page_wait(gl); | ||
289 | } | ||
290 | if (flags & DIO_RELEASE) | ||
291 | gfs2_ail_empty_gl(gl); | ||
292 | } | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * inode_go_inval - prepare a inode glock to be released | ||
297 | * @gl: the glock | ||
298 | * @flags: | ||
299 | * | ||
300 | */ | ||
301 | |||
302 | static void inode_go_inval(struct gfs2_glock *gl, int flags) | ||
303 | { | ||
304 | int meta = (flags & DIO_METADATA); | ||
305 | int data = (flags & DIO_DATA); | ||
306 | |||
307 | if (meta) { | ||
308 | gfs2_meta_inval(gl); | ||
309 | gl->gl_vn++; | ||
310 | } | ||
311 | if (data) | ||
312 | gfs2_page_inval(gl); | ||
313 | } | ||
314 | |||
315 | /** | ||
316 | * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock | ||
317 | * @gl: the glock | ||
318 | * | ||
319 | * Returns: 1 if it's ok | ||
320 | */ | ||
321 | |||
322 | static int inode_go_demote_ok(struct gfs2_glock *gl) | ||
323 | { | ||
324 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
325 | int demote = 0; | ||
326 | |||
327 | if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages) | ||
328 | demote = 1; | ||
329 | else if (!sdp->sd_args.ar_localcaching && | ||
330 | time_after_eq(jiffies, gl->gl_stamp + | ||
331 | gfs2_tune_get(sdp, gt_demote_secs) * HZ)) | ||
332 | demote = 1; | ||
333 | |||
334 | return demote; | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * inode_go_lock - operation done after an inode lock is locked by a process | ||
339 | * @gl: the glock | ||
340 | * @flags: | ||
341 | * | ||
342 | * Returns: errno | ||
343 | */ | ||
344 | |||
345 | static int inode_go_lock(struct gfs2_holder *gh) | ||
346 | { | ||
347 | struct gfs2_glock *gl = gh->gh_gl; | ||
348 | struct gfs2_inode *ip = gl->gl_object; | ||
349 | int error = 0; | ||
350 | |||
351 | if (!ip) | ||
352 | return 0; | ||
353 | |||
354 | if (ip->i_vn != gl->gl_vn) { | ||
355 | error = gfs2_inode_refresh(ip); | ||
356 | if (error) | ||
357 | return error; | ||
358 | gfs2_inode_attr_in(ip); | ||
359 | } | ||
360 | |||
361 | if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && | ||
362 | (gl->gl_state == LM_ST_EXCLUSIVE) && | ||
363 | (gh->gh_flags & GL_LOCAL_EXCL)) | ||
364 | error = gfs2_truncatei_resume(ip); | ||
365 | |||
366 | return error; | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * inode_go_unlock - operation done before an inode lock is unlocked by a | ||
371 | * process | ||
372 | * @gl: the glock | ||
373 | * @flags: | ||
374 | * | ||
375 | */ | ||
376 | |||
377 | static void inode_go_unlock(struct gfs2_holder *gh) | ||
378 | { | ||
379 | struct gfs2_glock *gl = gh->gh_gl; | ||
380 | struct gfs2_inode *ip = gl->gl_object; | ||
381 | |||
382 | if (ip == NULL) | ||
383 | return; | ||
384 | if (test_bit(GLF_DIRTY, &gl->gl_flags)) | ||
385 | gfs2_inode_attr_in(ip); | ||
386 | gfs2_meta_cache_flush(ip); | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * inode_greedy - | ||
391 | * @gl: the glock | ||
392 | * | ||
393 | */ | ||
394 | |||
395 | static void inode_greedy(struct gfs2_glock *gl) | ||
396 | { | ||
397 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
398 | struct gfs2_inode *ip = gl->gl_object; | ||
399 | unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum); | ||
400 | unsigned int max = gfs2_tune_get(sdp, gt_greedy_max); | ||
401 | unsigned int new_time; | ||
402 | |||
403 | spin_lock(&ip->i_spin); | ||
404 | |||
405 | if (time_after(ip->i_last_pfault + quantum, jiffies)) { | ||
406 | new_time = ip->i_greedy + quantum; | ||
407 | if (new_time > max) | ||
408 | new_time = max; | ||
409 | } else { | ||
410 | new_time = ip->i_greedy - quantum; | ||
411 | if (!new_time || new_time > max) | ||
412 | new_time = 1; | ||
413 | } | ||
414 | |||
415 | ip->i_greedy = new_time; | ||
416 | |||
417 | spin_unlock(&ip->i_spin); | ||
418 | |||
419 | iput(&ip->i_inode); | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock | ||
424 | * @gl: the glock | ||
425 | * | ||
426 | * Returns: 1 if it's ok | ||
427 | */ | ||
428 | |||
429 | static int rgrp_go_demote_ok(struct gfs2_glock *gl) | ||
430 | { | ||
431 | return !gl->gl_aspace->i_mapping->nrpages; | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | * rgrp_go_lock - operation done after an rgrp lock is locked by | ||
436 | * a first holder on this node. | ||
437 | * @gl: the glock | ||
438 | * @flags: | ||
439 | * | ||
440 | * Returns: errno | ||
441 | */ | ||
442 | |||
443 | static int rgrp_go_lock(struct gfs2_holder *gh) | ||
444 | { | ||
445 | return gfs2_rgrp_bh_get(gh->gh_gl->gl_object); | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * rgrp_go_unlock - operation done before an rgrp lock is unlocked by | ||
450 | * a last holder on this node. | ||
451 | * @gl: the glock | ||
452 | * @flags: | ||
453 | * | ||
454 | */ | ||
455 | |||
456 | static void rgrp_go_unlock(struct gfs2_holder *gh) | ||
457 | { | ||
458 | gfs2_rgrp_bh_put(gh->gh_gl->gl_object); | ||
459 | } | ||
460 | |||
461 | /** | ||
462 | * trans_go_xmote_th - promote/demote the transaction glock | ||
463 | * @gl: the glock | ||
464 | * @state: the requested state | ||
465 | * @flags: | ||
466 | * | ||
467 | */ | ||
468 | |||
469 | static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state, | ||
470 | int flags) | ||
471 | { | ||
472 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
473 | |||
474 | if (gl->gl_state != LM_ST_UNLOCKED && | ||
475 | test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | ||
476 | gfs2_meta_syncfs(sdp); | ||
477 | gfs2_log_shutdown(sdp); | ||
478 | } | ||
479 | |||
480 | gfs2_glock_xmote_th(gl, state, flags); | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * trans_go_xmote_bh - After promoting/demoting the transaction glock | ||
485 | * @gl: the glock | ||
486 | * | ||
487 | */ | ||
488 | |||
489 | static void trans_go_xmote_bh(struct gfs2_glock *gl) | ||
490 | { | ||
491 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
492 | struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); | ||
493 | struct gfs2_glock *j_gl = ip->i_gl; | ||
494 | struct gfs2_log_header head; | ||
495 | int error; | ||
496 | |||
497 | if (gl->gl_state != LM_ST_UNLOCKED && | ||
498 | test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | ||
499 | gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode)); | ||
500 | j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); | ||
501 | |||
502 | error = gfs2_find_jhead(sdp->sd_jdesc, &head); | ||
503 | if (error) | ||
504 | gfs2_consist(sdp); | ||
505 | if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) | ||
506 | gfs2_consist(sdp); | ||
507 | |||
508 | /* Initialize some head of the log stuff */ | ||
509 | if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { | ||
510 | sdp->sd_log_sequence = head.lh_sequence + 1; | ||
511 | gfs2_log_pointers_init(sdp, head.lh_blkno); | ||
512 | } | ||
513 | } | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * trans_go_drop_th - unlock the transaction glock | ||
518 | * @gl: the glock | ||
519 | * | ||
520 | * We want to sync the device even with localcaching. Remember | ||
521 | * that localcaching journal replay only marks buffers dirty. | ||
522 | */ | ||
523 | |||
524 | static void trans_go_drop_th(struct gfs2_glock *gl) | ||
525 | { | ||
526 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
527 | |||
528 | if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | ||
529 | gfs2_meta_syncfs(sdp); | ||
530 | gfs2_log_shutdown(sdp); | ||
531 | } | ||
532 | |||
533 | gfs2_glock_drop_th(gl); | ||
534 | } | ||
535 | |||
536 | /** | ||
537 | * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock | ||
538 | * @gl: the glock | ||
539 | * | ||
540 | * Returns: 1 if it's ok | ||
541 | */ | ||
542 | |||
543 | static int quota_go_demote_ok(struct gfs2_glock *gl) | ||
544 | { | ||
545 | return !atomic_read(&gl->gl_lvb_count); | ||
546 | } | ||
547 | |||
548 | const struct gfs2_glock_operations gfs2_meta_glops = { | ||
549 | .go_xmote_th = gfs2_glock_xmote_th, | ||
550 | .go_drop_th = gfs2_glock_drop_th, | ||
551 | .go_type = LM_TYPE_META, | ||
552 | }; | ||
553 | |||
554 | const struct gfs2_glock_operations gfs2_inode_glops = { | ||
555 | .go_xmote_th = inode_go_xmote_th, | ||
556 | .go_xmote_bh = inode_go_xmote_bh, | ||
557 | .go_drop_th = inode_go_drop_th, | ||
558 | .go_sync = inode_go_sync, | ||
559 | .go_inval = inode_go_inval, | ||
560 | .go_demote_ok = inode_go_demote_ok, | ||
561 | .go_lock = inode_go_lock, | ||
562 | .go_unlock = inode_go_unlock, | ||
563 | .go_greedy = inode_greedy, | ||
564 | .go_type = LM_TYPE_INODE, | ||
565 | }; | ||
566 | |||
567 | const struct gfs2_glock_operations gfs2_rgrp_glops = { | ||
568 | .go_xmote_th = gfs2_glock_xmote_th, | ||
569 | .go_drop_th = gfs2_glock_drop_th, | ||
570 | .go_sync = meta_go_sync, | ||
571 | .go_inval = meta_go_inval, | ||
572 | .go_demote_ok = rgrp_go_demote_ok, | ||
573 | .go_lock = rgrp_go_lock, | ||
574 | .go_unlock = rgrp_go_unlock, | ||
575 | .go_type = LM_TYPE_RGRP, | ||
576 | }; | ||
577 | |||
578 | const struct gfs2_glock_operations gfs2_trans_glops = { | ||
579 | .go_xmote_th = trans_go_xmote_th, | ||
580 | .go_xmote_bh = trans_go_xmote_bh, | ||
581 | .go_drop_th = trans_go_drop_th, | ||
582 | .go_type = LM_TYPE_NONDISK, | ||
583 | }; | ||
584 | |||
585 | const struct gfs2_glock_operations gfs2_iopen_glops = { | ||
586 | .go_xmote_th = gfs2_glock_xmote_th, | ||
587 | .go_drop_th = gfs2_glock_drop_th, | ||
588 | .go_type = LM_TYPE_IOPEN, | ||
589 | }; | ||
590 | |||
591 | const struct gfs2_glock_operations gfs2_flock_glops = { | ||
592 | .go_xmote_th = gfs2_glock_xmote_th, | ||
593 | .go_drop_th = gfs2_glock_drop_th, | ||
594 | .go_type = LM_TYPE_FLOCK, | ||
595 | }; | ||
596 | |||
597 | const struct gfs2_glock_operations gfs2_nondisk_glops = { | ||
598 | .go_xmote_th = gfs2_glock_xmote_th, | ||
599 | .go_drop_th = gfs2_glock_drop_th, | ||
600 | .go_type = LM_TYPE_NONDISK, | ||
601 | }; | ||
602 | |||
603 | const struct gfs2_glock_operations gfs2_quota_glops = { | ||
604 | .go_xmote_th = gfs2_glock_xmote_th, | ||
605 | .go_drop_th = gfs2_glock_drop_th, | ||
606 | .go_demote_ok = quota_go_demote_ok, | ||
607 | .go_type = LM_TYPE_QUOTA, | ||
608 | }; | ||
609 | |||
610 | const struct gfs2_glock_operations gfs2_journal_glops = { | ||
611 | .go_xmote_th = gfs2_glock_xmote_th, | ||
612 | .go_drop_th = gfs2_glock_drop_th, | ||
613 | .go_type = LM_TYPE_JOURNAL, | ||
614 | }; | ||
615 | |||