diff options
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/incore.h | 2 | ||||
-rw-r--r-- | fs/gfs2/log.c | 57 | ||||
-rw-r--r-- | fs/gfs2/log.h | 2 | ||||
-rw-r--r-- | fs/gfs2/lops.c | 353 | ||||
-rw-r--r-- | fs/gfs2/lops.h | 2 | ||||
-rw-r--r-- | fs/gfs2/main.c | 16 | ||||
-rw-r--r-- | fs/gfs2/util.c | 2 | ||||
-rw-r--r-- | fs/gfs2/util.h | 2 |
8 files changed, 264 insertions, 172 deletions
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 47d0bda5ac2b..dd97f64a8bd4 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -716,7 +716,9 @@ struct gfs2_sbd { | |||
716 | 716 | ||
717 | struct rw_semaphore sd_log_flush_lock; | 717 | struct rw_semaphore sd_log_flush_lock; |
718 | atomic_t sd_log_in_flight; | 718 | atomic_t sd_log_in_flight; |
719 | struct bio *sd_log_bio; | ||
719 | wait_queue_head_t sd_log_flush_wait; | 720 | wait_queue_head_t sd_log_flush_wait; |
721 | int sd_log_error; | ||
720 | 722 | ||
721 | unsigned int sd_log_flush_head; | 723 | unsigned int sd_log_flush_head; |
722 | u64 sd_log_flush_wrapped; | 724 | u64 sd_log_flush_wrapped; |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index d886a17f671a..f5eacb3589ba 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -357,18 +357,6 @@ retry: | |||
357 | return 0; | 357 | return 0; |
358 | } | 358 | } |
359 | 359 | ||
360 | u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) | ||
361 | { | ||
362 | struct gfs2_journal_extent *je; | ||
363 | |||
364 | list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) { | ||
365 | if (lbn >= je->lblock && lbn < je->lblock + je->blocks) | ||
366 | return je->dblock + lbn - je->lblock; | ||
367 | } | ||
368 | |||
369 | return -1; | ||
370 | } | ||
371 | |||
372 | /** | 360 | /** |
373 | * log_distance - Compute distance between two journal blocks | 361 | * log_distance - Compute distance between two journal blocks |
374 | * @sdp: The GFS2 superblock | 362 | * @sdp: The GFS2 superblock |
@@ -464,17 +452,6 @@ static unsigned int current_tail(struct gfs2_sbd *sdp) | |||
464 | return tail; | 452 | return tail; |
465 | } | 453 | } |
466 | 454 | ||
467 | void gfs2_log_incr_head(struct gfs2_sbd *sdp) | ||
468 | { | ||
469 | BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && | ||
470 | (sdp->sd_log_flush_head != sdp->sd_log_head)); | ||
471 | |||
472 | if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { | ||
473 | sdp->sd_log_flush_head = 0; | ||
474 | sdp->sd_log_flush_wrapped = 1; | ||
475 | } | ||
476 | } | ||
477 | |||
478 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) | 455 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) |
479 | { | 456 | { |
480 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); | 457 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); |
@@ -580,23 +557,17 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |||
580 | 557 | ||
581 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags) | 558 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags) |
582 | { | 559 | { |
583 | u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); | ||
584 | struct buffer_head *bh; | ||
585 | struct gfs2_log_header *lh; | 560 | struct gfs2_log_header *lh; |
586 | unsigned int tail; | 561 | unsigned int tail; |
587 | u32 hash; | 562 | u32 hash; |
588 | 563 | int rw = WRITE_FLUSH_FUA | REQ_META; | |
589 | bh = sb_getblk(sdp->sd_vfs, blkno); | 564 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
590 | lock_buffer(bh); | 565 | lh = page_address(page); |
591 | memset(bh->b_data, 0, bh->b_size); | 566 | clear_page(lh); |
592 | set_buffer_uptodate(bh); | ||
593 | clear_buffer_dirty(bh); | ||
594 | 567 | ||
595 | gfs2_ail1_empty(sdp); | 568 | gfs2_ail1_empty(sdp); |
596 | tail = current_tail(sdp); | 569 | tail = current_tail(sdp); |
597 | 570 | ||
598 | lh = (struct gfs2_log_header *)bh->b_data; | ||
599 | memset(lh, 0, sizeof(struct gfs2_log_header)); | ||
600 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); | 571 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
601 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | 572 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); |
602 | lh->lh_header.__pad0 = cpu_to_be64(0); | 573 | lh->lh_header.__pad0 = cpu_to_be64(0); |
@@ -606,29 +577,22 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) | |||
606 | lh->lh_flags = cpu_to_be32(flags); | 577 | lh->lh_flags = cpu_to_be32(flags); |
607 | lh->lh_tail = cpu_to_be32(tail); | 578 | lh->lh_tail = cpu_to_be32(tail); |
608 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); | 579 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); |
609 | hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header)); | 580 | hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header)); |
610 | lh->lh_hash = cpu_to_be32(hash); | 581 | lh->lh_hash = cpu_to_be32(hash); |
611 | 582 | ||
612 | bh->b_end_io = end_buffer_write_sync; | ||
613 | get_bh(bh); | ||
614 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { | 583 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
615 | gfs2_ordered_wait(sdp); | 584 | gfs2_ordered_wait(sdp); |
616 | log_flush_wait(sdp); | 585 | log_flush_wait(sdp); |
617 | submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); | 586 | rw = WRITE_SYNC | REQ_META | REQ_PRIO; |
618 | } else { | ||
619 | submit_bh(WRITE_FLUSH_FUA | REQ_META, bh); | ||
620 | } | 587 | } |
621 | wait_on_buffer(bh); | ||
622 | 588 | ||
623 | if (!buffer_uptodate(bh)) | 589 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
624 | gfs2_io_error_bh(sdp, bh); | 590 | gfs2_log_write_page(sdp, page); |
625 | brelse(bh); | 591 | gfs2_log_flush_bio(sdp, rw); |
592 | log_flush_wait(sdp); | ||
626 | 593 | ||
627 | if (sdp->sd_log_tail != tail) | 594 | if (sdp->sd_log_tail != tail) |
628 | log_pull_tail(sdp, tail); | 595 | log_pull_tail(sdp, tail); |
629 | |||
630 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); | ||
631 | gfs2_log_incr_head(sdp); | ||
632 | } | 596 | } |
633 | 597 | ||
634 | /** | 598 | /** |
@@ -674,6 +638,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) | |||
674 | 638 | ||
675 | gfs2_ordered_write(sdp); | 639 | gfs2_ordered_write(sdp); |
676 | lops_before_commit(sdp); | 640 | lops_before_commit(sdp); |
641 | gfs2_log_flush_bio(sdp, WRITE); | ||
677 | 642 | ||
678 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { | 643 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
679 | log_write_header(sdp, 0); | 644 | log_write_header(sdp, 0); |
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index ff07454b582c..3fd5215ea25f 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h | |||
@@ -52,8 +52,6 @@ extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |||
52 | unsigned int ssize); | 52 | unsigned int ssize); |
53 | 53 | ||
54 | extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); | 54 | extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); |
55 | extern void gfs2_log_incr_head(struct gfs2_sbd *sdp); | ||
56 | extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn); | ||
57 | extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl); | 55 | extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl); |
58 | extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); | 56 | extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); |
59 | extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd); | 57 | extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index a5937b3c9913..872d3e6ae05e 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -127,118 +127,256 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, | |||
127 | atomic_dec(&sdp->sd_log_pinned); | 127 | atomic_dec(&sdp->sd_log_pinned); |
128 | } | 128 | } |
129 | 129 | ||
130 | 130 | static void gfs2_log_incr_head(struct gfs2_sbd *sdp) | |
131 | static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh) | ||
132 | { | 131 | { |
133 | return (struct gfs2_log_descriptor *)bh->b_data; | 132 | BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && |
133 | (sdp->sd_log_flush_head != sdp->sd_log_head)); | ||
134 | |||
135 | if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { | ||
136 | sdp->sd_log_flush_head = 0; | ||
137 | sdp->sd_log_flush_wrapped = 1; | ||
138 | } | ||
134 | } | 139 | } |
135 | 140 | ||
136 | static inline __be64 *bh_log_ptr(struct buffer_head *bh) | 141 | static u64 gfs2_log_bmap(struct gfs2_sbd *sdp) |
137 | { | 142 | { |
138 | struct gfs2_log_descriptor *ld = bh_log_desc(bh); | 143 | unsigned int lbn = sdp->sd_log_flush_head; |
139 | return (__force __be64 *)(ld + 1); | 144 | struct gfs2_journal_extent *je; |
145 | u64 block; | ||
146 | |||
147 | list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) { | ||
148 | if (lbn >= je->lblock && lbn < je->lblock + je->blocks) { | ||
149 | block = je->dblock + lbn - je->lblock; | ||
150 | gfs2_log_incr_head(sdp); | ||
151 | return block; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | return -1; | ||
140 | } | 156 | } |
141 | 157 | ||
142 | static inline __be64 *bh_ptr_end(struct buffer_head *bh) | 158 | /** |
159 | * gfs2_end_log_write_bh - end log write of pagecache data with buffers | ||
160 | * @sdp: The superblock | ||
161 | * @bvec: The bio_vec | ||
162 | * @error: The i/o status | ||
163 | * | ||
164 | * This finds the relavent buffers and unlocks then and sets the | ||
165 | * error flag according to the status of the i/o request. This is | ||
166 | * used when the log is writing data which has an in-place version | ||
167 | * that is pinned in the pagecache. | ||
168 | */ | ||
169 | |||
170 | static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, | ||
171 | int error) | ||
143 | { | 172 | { |
144 | return (__force __be64 *)(bh->b_data + bh->b_size); | 173 | struct buffer_head *bh, *next; |
174 | struct page *page = bvec->bv_page; | ||
175 | unsigned size; | ||
176 | |||
177 | bh = page_buffers(page); | ||
178 | size = bvec->bv_len; | ||
179 | while (bh_offset(bh) < bvec->bv_offset) | ||
180 | bh = bh->b_this_page; | ||
181 | do { | ||
182 | if (error) | ||
183 | set_buffer_write_io_error(bh); | ||
184 | unlock_buffer(bh); | ||
185 | next = bh->b_this_page; | ||
186 | size -= bh->b_size; | ||
187 | brelse(bh); | ||
188 | bh = next; | ||
189 | } while(bh && size); | ||
145 | } | 190 | } |
146 | 191 | ||
147 | /** | 192 | /** |
148 | * gfs2_log_write_endio - End of I/O for a log buffer | 193 | * gfs2_end_log_write - end of i/o to the log |
149 | * @bh: The buffer head | 194 | * @bio: The bio |
150 | * @uptodate: I/O Status | 195 | * @error: Status of i/o request |
196 | * | ||
197 | * Each bio_vec contains either data from the pagecache or data | ||
198 | * relating to the log itself. Here we iterate over the bio_vec | ||
199 | * array, processing both kinds of data. | ||
151 | * | 200 | * |
152 | */ | 201 | */ |
153 | 202 | ||
154 | static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate) | 203 | static void gfs2_end_log_write(struct bio *bio, int error) |
155 | { | 204 | { |
156 | struct gfs2_sbd *sdp = bh->b_private; | 205 | struct gfs2_sbd *sdp = bio->bi_private; |
157 | bh->b_private = NULL; | 206 | struct bio_vec *bvec; |
207 | struct page *page; | ||
208 | int i; | ||
209 | |||
210 | if (error) { | ||
211 | sdp->sd_log_error = error; | ||
212 | fs_err(sdp, "Error %d writing to log\n", error); | ||
213 | } | ||
214 | |||
215 | bio_for_each_segment(bvec, bio, i) { | ||
216 | page = bvec->bv_page; | ||
217 | if (page_has_buffers(page)) | ||
218 | gfs2_end_log_write_bh(sdp, bvec, error); | ||
219 | else | ||
220 | mempool_free(page, gfs2_page_pool); | ||
221 | } | ||
158 | 222 | ||
159 | end_buffer_write_sync(bh, uptodate); | 223 | bio_put(bio); |
160 | if (atomic_dec_and_test(&sdp->sd_log_in_flight)) | 224 | if (atomic_dec_and_test(&sdp->sd_log_in_flight)) |
161 | wake_up(&sdp->sd_log_flush_wait); | 225 | wake_up(&sdp->sd_log_flush_wait); |
162 | } | 226 | } |
163 | 227 | ||
164 | /** | 228 | /** |
165 | * gfs2_log_get_buf - Get and initialize a buffer to use for log control data | 229 | * gfs2_log_flush_bio - Submit any pending log bio |
166 | * @sdp: The GFS2 superblock | 230 | * @sdp: The superblock |
231 | * @rw: The rw flags | ||
167 | * | 232 | * |
168 | * tReturns: the buffer_head | 233 | * Submit any pending part-built or full bio to the block device. If |
234 | * there is no pending bio, then this is a no-op. | ||
169 | */ | 235 | */ |
170 | 236 | ||
171 | static struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp) | 237 | void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw) |
172 | { | 238 | { |
173 | u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); | 239 | if (sdp->sd_log_bio) { |
174 | struct buffer_head *bh; | 240 | atomic_inc(&sdp->sd_log_in_flight); |
241 | submit_bio(rw, sdp->sd_log_bio); | ||
242 | sdp->sd_log_bio = NULL; | ||
243 | } | ||
244 | } | ||
175 | 245 | ||
176 | bh = sb_getblk(sdp->sd_vfs, blkno); | 246 | /** |
177 | lock_buffer(bh); | 247 | * gfs2_log_alloc_bio - Allocate a new bio for log writing |
178 | memset(bh->b_data, 0, bh->b_size); | 248 | * @sdp: The superblock |
179 | set_buffer_uptodate(bh); | 249 | * @blkno: The next device block number we want to write to |
180 | clear_buffer_dirty(bh); | 250 | * |
181 | gfs2_log_incr_head(sdp); | 251 | * This should never be called when there is a cached bio in the |
182 | atomic_inc(&sdp->sd_log_in_flight); | 252 | * super block. When it returns, there will be a cached bio in the |
183 | bh->b_private = sdp; | 253 | * super block which will have as many bio_vecs as the device is |
184 | bh->b_end_io = gfs2_log_write_endio; | 254 | * happy to handle. |
255 | * | ||
256 | * Returns: Newly allocated bio | ||
257 | */ | ||
185 | 258 | ||
186 | return bh; | 259 | static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) |
260 | { | ||
261 | struct super_block *sb = sdp->sd_vfs; | ||
262 | unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev); | ||
263 | struct bio *bio; | ||
264 | |||
265 | BUG_ON(sdp->sd_log_bio); | ||
266 | |||
267 | while (1) { | ||
268 | bio = bio_alloc(GFP_NOIO, nrvecs); | ||
269 | if (likely(bio)) | ||
270 | break; | ||
271 | nrvecs = max(nrvecs/2, 1U); | ||
272 | } | ||
273 | |||
274 | bio->bi_sector = blkno * (sb->s_blocksize >> 9); | ||
275 | bio->bi_bdev = sb->s_bdev; | ||
276 | bio->bi_end_io = gfs2_end_log_write; | ||
277 | bio->bi_private = sdp; | ||
278 | |||
279 | sdp->sd_log_bio = bio; | ||
280 | |||
281 | return bio; | ||
187 | } | 282 | } |
188 | 283 | ||
189 | /** | 284 | /** |
190 | * gfs2_fake_write_endio - | 285 | * gfs2_log_get_bio - Get cached log bio, or allocate a new one |
191 | * @bh: The buffer head | 286 | * @sdp: The superblock |
192 | * @uptodate: The I/O Status | 287 | * @blkno: The device block number we want to write to |
288 | * | ||
289 | * If there is a cached bio, then if the next block number is sequential | ||
290 | * with the previous one, return it, otherwise flush the bio to the | ||
291 | * device. If there is not a cached bio, or we just flushed it, then | ||
292 | * allocate a new one. | ||
193 | * | 293 | * |
294 | * Returns: The bio to use for log writes | ||
194 | */ | 295 | */ |
195 | 296 | ||
196 | static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate) | 297 | static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno) |
197 | { | 298 | { |
198 | struct buffer_head *real_bh = bh->b_private; | 299 | struct bio *bio = sdp->sd_log_bio; |
199 | struct gfs2_bufdata *bd = real_bh->b_private; | 300 | u64 nblk; |
200 | struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd; | 301 | |
302 | if (bio) { | ||
303 | nblk = bio->bi_sector + bio_sectors(bio); | ||
304 | nblk >>= sdp->sd_fsb2bb_shift; | ||
305 | if (blkno == nblk) | ||
306 | return bio; | ||
307 | gfs2_log_flush_bio(sdp, WRITE); | ||
308 | } | ||
201 | 309 | ||
202 | end_buffer_write_sync(bh, uptodate); | 310 | return gfs2_log_alloc_bio(sdp, blkno); |
203 | mempool_free(bh, gfs2_bh_pool); | ||
204 | unlock_buffer(real_bh); | ||
205 | brelse(real_bh); | ||
206 | if (atomic_dec_and_test(&sdp->sd_log_in_flight)) | ||
207 | wake_up(&sdp->sd_log_flush_wait); | ||
208 | } | 311 | } |
209 | 312 | ||
313 | |||
210 | /** | 314 | /** |
211 | * gfs2_log_write_buf - write metadata buffer to log | 315 | * gfs2_log_write - write to log |
212 | * @sdp: the filesystem | 316 | * @sdp: the filesystem |
213 | * @real: the in-place buffer head | 317 | * @page: the page to write |
318 | * @size: the size of the data to write | ||
319 | * @offset: the offset within the page | ||
214 | * | 320 | * |
321 | * Try and add the page segment to the current bio. If that fails, | ||
322 | * submit the current bio to the device and create a new one, and | ||
323 | * then add the page segment to that. | ||
215 | */ | 324 | */ |
216 | 325 | ||
217 | static void gfs2_log_write_buf(struct gfs2_sbd *sdp, struct buffer_head *real) | 326 | static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, |
327 | unsigned size, unsigned offset) | ||
218 | { | 328 | { |
219 | u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); | 329 | u64 blkno = gfs2_log_bmap(sdp); |
220 | struct buffer_head *bh; | 330 | struct bio *bio; |
331 | int ret; | ||
332 | |||
333 | bio = gfs2_log_get_bio(sdp, blkno); | ||
334 | ret = bio_add_page(bio, page, size, offset); | ||
335 | if (ret == 0) { | ||
336 | gfs2_log_flush_bio(sdp, WRITE); | ||
337 | bio = gfs2_log_alloc_bio(sdp, blkno); | ||
338 | ret = bio_add_page(bio, page, size, offset); | ||
339 | WARN_ON(ret == 0); | ||
340 | } | ||
341 | } | ||
221 | 342 | ||
222 | bh = mempool_alloc(gfs2_bh_pool, GFP_NOFS); | 343 | /** |
223 | atomic_set(&bh->b_count, 1); | 344 | * gfs2_log_write_bh - write a buffer's content to the log |
224 | bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock); | 345 | * @sdp: The super block |
225 | set_bh_page(bh, real->b_page, bh_offset(real)); | 346 | * @bh: The buffer pointing to the in-place location |
226 | bh->b_blocknr = blkno; | 347 | * |
227 | bh->b_size = sdp->sd_sb.sb_bsize; | 348 | * This writes the content of the buffer to the next available location |
228 | bh->b_bdev = sdp->sd_vfs->s_bdev; | 349 | * in the log. The buffer will be unlocked once the i/o to the log has |
229 | bh->b_private = real; | 350 | * completed. |
230 | bh->b_end_io = gfs2_fake_write_endio; | 351 | */ |
352 | |||
353 | static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) | ||
354 | { | ||
355 | gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh)); | ||
356 | } | ||
231 | 357 | ||
232 | gfs2_log_incr_head(sdp); | 358 | /** |
233 | atomic_inc(&sdp->sd_log_in_flight); | 359 | * gfs2_log_write_page - write one block stored in a page, into the log |
360 | * @sdp: The superblock | ||
361 | * @page: The struct page | ||
362 | * | ||
363 | * This writes the first block-sized part of the page into the log. Note | ||
364 | * that the page must have been allocated from the gfs2_page_pool mempool | ||
365 | * and that after this has been called, ownership has been transferred and | ||
366 | * the page may be freed at any time. | ||
367 | */ | ||
234 | 368 | ||
235 | submit_bh(WRITE, bh); | 369 | void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) |
370 | { | ||
371 | struct super_block *sb = sdp->sd_vfs; | ||
372 | gfs2_log_write(sdp, page, sb->s_blocksize, 0); | ||
236 | } | 373 | } |
237 | 374 | ||
238 | static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) | 375 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) |
239 | { | 376 | { |
240 | struct buffer_head *bh = gfs2_log_get_buf(sdp); | 377 | void *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
241 | struct gfs2_log_descriptor *ld = bh_log_desc(bh); | 378 | struct gfs2_log_descriptor *ld = page_address(page); |
379 | clear_page(ld); | ||
242 | ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); | 380 | ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
243 | ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); | 381 | ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); |
244 | ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); | 382 | ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); |
@@ -246,8 +384,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) | |||
246 | ld->ld_length = 0; | 384 | ld->ld_length = 0; |
247 | ld->ld_data1 = 0; | 385 | ld->ld_data1 = 0; |
248 | ld->ld_data2 = 0; | 386 | ld->ld_data2 = 0; |
249 | memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved)); | 387 | return page; |
250 | return bh; | ||
251 | } | 388 | } |
252 | 389 | ||
253 | static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | 390 | static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) |
@@ -283,9 +420,9 @@ out: | |||
283 | 420 | ||
284 | static void buf_lo_before_commit(struct gfs2_sbd *sdp) | 421 | static void buf_lo_before_commit(struct gfs2_sbd *sdp) |
285 | { | 422 | { |
286 | struct buffer_head *bh; | ||
287 | struct gfs2_log_descriptor *ld; | 423 | struct gfs2_log_descriptor *ld; |
288 | struct gfs2_bufdata *bd1 = NULL, *bd2; | 424 | struct gfs2_bufdata *bd1 = NULL, *bd2; |
425 | struct page *page; | ||
289 | unsigned int total; | 426 | unsigned int total; |
290 | unsigned int limit; | 427 | unsigned int limit; |
291 | unsigned int num; | 428 | unsigned int num; |
@@ -303,10 +440,10 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) | |||
303 | if (total > limit) | 440 | if (total > limit) |
304 | num = limit; | 441 | num = limit; |
305 | gfs2_log_unlock(sdp); | 442 | gfs2_log_unlock(sdp); |
306 | bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA); | 443 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA); |
444 | ld = page_address(page); | ||
307 | gfs2_log_lock(sdp); | 445 | gfs2_log_lock(sdp); |
308 | ld = bh_log_desc(bh); | 446 | ptr = (__be64 *)(ld + 1); |
309 | ptr = bh_log_ptr(bh); | ||
310 | ld->ld_length = cpu_to_be32(num + 1); | 447 | ld->ld_length = cpu_to_be32(num + 1); |
311 | ld->ld_data1 = cpu_to_be32(num); | 448 | ld->ld_data1 = cpu_to_be32(num); |
312 | 449 | ||
@@ -319,7 +456,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) | |||
319 | } | 456 | } |
320 | 457 | ||
321 | gfs2_log_unlock(sdp); | 458 | gfs2_log_unlock(sdp); |
322 | submit_bh(WRITE, bh); | 459 | gfs2_log_write_page(sdp, page); |
323 | gfs2_log_lock(sdp); | 460 | gfs2_log_lock(sdp); |
324 | 461 | ||
325 | n = 0; | 462 | n = 0; |
@@ -328,7 +465,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) | |||
328 | get_bh(bd2->bd_bh); | 465 | get_bh(bd2->bd_bh); |
329 | gfs2_log_unlock(sdp); | 466 | gfs2_log_unlock(sdp); |
330 | lock_buffer(bd2->bd_bh); | 467 | lock_buffer(bd2->bd_bh); |
331 | gfs2_log_write_buf(sdp, bd2->bd_bh); | 468 | gfs2_log_write_bh(sdp, bd2->bd_bh); |
332 | gfs2_log_lock(sdp); | 469 | gfs2_log_lock(sdp); |
333 | if (++n >= num) | 470 | if (++n >= num) |
334 | break; | 471 | break; |
@@ -453,16 +590,16 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | |||
453 | { | 590 | { |
454 | struct gfs2_log_descriptor *ld; | 591 | struct gfs2_log_descriptor *ld; |
455 | struct gfs2_meta_header *mh; | 592 | struct gfs2_meta_header *mh; |
456 | struct buffer_head *bh; | ||
457 | unsigned int offset; | 593 | unsigned int offset; |
458 | struct list_head *head = &sdp->sd_log_le_revoke; | 594 | struct list_head *head = &sdp->sd_log_le_revoke; |
459 | struct gfs2_bufdata *bd; | 595 | struct gfs2_bufdata *bd; |
596 | struct page *page; | ||
460 | 597 | ||
461 | if (!sdp->sd_log_num_revoke) | 598 | if (!sdp->sd_log_num_revoke) |
462 | return; | 599 | return; |
463 | 600 | ||
464 | bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE); | 601 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE); |
465 | ld = bh_log_desc(bh); | 602 | ld = page_address(page); |
466 | ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, | 603 | ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, |
467 | sizeof(u64))); | 604 | sizeof(u64))); |
468 | ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); | 605 | ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); |
@@ -472,22 +609,23 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | |||
472 | sdp->sd_log_num_revoke--; | 609 | sdp->sd_log_num_revoke--; |
473 | 610 | ||
474 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { | 611 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { |
475 | submit_bh(WRITE, bh); | ||
476 | 612 | ||
477 | bh = gfs2_log_get_buf(sdp); | 613 | gfs2_log_write_page(sdp, page); |
478 | mh = (struct gfs2_meta_header *)bh->b_data; | 614 | page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
615 | mh = page_address(page); | ||
616 | clear_page(mh); | ||
479 | mh->mh_magic = cpu_to_be32(GFS2_MAGIC); | 617 | mh->mh_magic = cpu_to_be32(GFS2_MAGIC); |
480 | mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); | 618 | mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); |
481 | mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); | 619 | mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); |
482 | offset = sizeof(struct gfs2_meta_header); | 620 | offset = sizeof(struct gfs2_meta_header); |
483 | } | 621 | } |
484 | 622 | ||
485 | *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno); | 623 | *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); |
486 | offset += sizeof(u64); | 624 | offset += sizeof(u64); |
487 | } | 625 | } |
488 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | 626 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
489 | 627 | ||
490 | submit_bh(WRITE, bh); | 628 | gfs2_log_write_page(sdp, page); |
491 | } | 629 | } |
492 | 630 | ||
493 | static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | 631 | static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) |
@@ -650,57 +788,51 @@ static void gfs2_check_magic(struct buffer_head *bh) | |||
650 | kunmap_atomic(kaddr); | 788 | kunmap_atomic(kaddr); |
651 | } | 789 | } |
652 | 790 | ||
653 | static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, | 791 | static void gfs2_write_blocks(struct gfs2_sbd *sdp, |
792 | struct gfs2_log_descriptor *ld, | ||
793 | struct page *page, | ||
654 | struct list_head *list, struct list_head *done, | 794 | struct list_head *list, struct list_head *done, |
655 | unsigned int n) | 795 | unsigned int n) |
656 | { | 796 | { |
657 | struct buffer_head *bh1; | ||
658 | struct gfs2_log_descriptor *ld; | ||
659 | struct gfs2_bufdata *bd; | 797 | struct gfs2_bufdata *bd; |
660 | __be64 *ptr; | 798 | __be64 *ptr; |
661 | 799 | ||
662 | if (!bh) | 800 | if (!ld) |
663 | return; | 801 | return; |
664 | 802 | ||
665 | ld = bh_log_desc(bh); | ||
666 | ld->ld_length = cpu_to_be32(n + 1); | 803 | ld->ld_length = cpu_to_be32(n + 1); |
667 | ld->ld_data1 = cpu_to_be32(n); | 804 | ld->ld_data1 = cpu_to_be32(n); |
668 | 805 | ptr = (__force __be64 *)(ld + 1); | |
669 | ptr = bh_log_ptr(bh); | ||
670 | 806 | ||
671 | get_bh(bh); | 807 | gfs2_log_write_page(sdp, page); |
672 | submit_bh(WRITE, bh); | ||
673 | gfs2_log_lock(sdp); | 808 | gfs2_log_lock(sdp); |
674 | while(!list_empty(list)) { | 809 | while (!list_empty(list)) { |
675 | bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); | 810 | bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); |
676 | list_move_tail(&bd->bd_le.le_list, done); | 811 | list_move_tail(&bd->bd_le.le_list, done); |
677 | get_bh(bd->bd_bh); | 812 | get_bh(bd->bd_bh); |
678 | while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) { | ||
679 | gfs2_log_incr_head(sdp); | ||
680 | ptr += 2; | ||
681 | } | ||
682 | gfs2_log_unlock(sdp); | 813 | gfs2_log_unlock(sdp); |
683 | lock_buffer(bd->bd_bh); | 814 | lock_buffer(bd->bd_bh); |
684 | if (buffer_escaped(bd->bd_bh)) { | 815 | if (buffer_escaped(bd->bd_bh)) { |
685 | void *kaddr; | 816 | void *kaddr; |
686 | bh1 = gfs2_log_get_buf(sdp); | 817 | page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
818 | ptr = page_address(page); | ||
687 | kaddr = kmap_atomic(bd->bd_bh->b_page); | 819 | kaddr = kmap_atomic(bd->bd_bh->b_page); |
688 | memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh), | 820 | memcpy(ptr, kaddr + bh_offset(bd->bd_bh), |
689 | bh1->b_size); | 821 | bd->bd_bh->b_size); |
690 | kunmap_atomic(kaddr); | 822 | kunmap_atomic(kaddr); |
691 | *(__be32 *)bh1->b_data = 0; | 823 | *(__be32 *)ptr = 0; |
692 | clear_buffer_escaped(bd->bd_bh); | 824 | clear_buffer_escaped(bd->bd_bh); |
693 | unlock_buffer(bd->bd_bh); | 825 | unlock_buffer(bd->bd_bh); |
694 | brelse(bd->bd_bh); | 826 | brelse(bd->bd_bh); |
695 | submit_bh(WRITE, bh1); | 827 | gfs2_log_write_page(sdp, page); |
696 | } else { | 828 | } else { |
697 | gfs2_log_write_buf(sdp, bd->bd_bh); | 829 | gfs2_log_write_bh(sdp, bd->bd_bh); |
698 | } | 830 | } |
831 | n--; | ||
699 | gfs2_log_lock(sdp); | 832 | gfs2_log_lock(sdp); |
700 | ptr += 2; | ||
701 | } | 833 | } |
702 | gfs2_log_unlock(sdp); | 834 | gfs2_log_unlock(sdp); |
703 | brelse(bh); | 835 | BUG_ON(n != 0); |
704 | } | 836 | } |
705 | 837 | ||
706 | /** | 838 | /** |
@@ -711,7 +843,8 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, | |||
711 | static void databuf_lo_before_commit(struct gfs2_sbd *sdp) | 843 | static void databuf_lo_before_commit(struct gfs2_sbd *sdp) |
712 | { | 844 | { |
713 | struct gfs2_bufdata *bd = NULL; | 845 | struct gfs2_bufdata *bd = NULL; |
714 | struct buffer_head *bh = NULL; | 846 | struct gfs2_log_descriptor *ld = NULL; |
847 | struct page *page = NULL; | ||
715 | unsigned int n = 0; | 848 | unsigned int n = 0; |
716 | __be64 *ptr = NULL, *end = NULL; | 849 | __be64 *ptr = NULL, *end = NULL; |
717 | LIST_HEAD(processed); | 850 | LIST_HEAD(processed); |
@@ -721,11 +854,13 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp) | |||
721 | while (!list_empty(&sdp->sd_log_le_databuf)) { | 854 | while (!list_empty(&sdp->sd_log_le_databuf)) { |
722 | if (ptr == end) { | 855 | if (ptr == end) { |
723 | gfs2_log_unlock(sdp); | 856 | gfs2_log_unlock(sdp); |
724 | gfs2_write_blocks(sdp, bh, &in_progress, &processed, n); | 857 | gfs2_write_blocks(sdp, ld, page, &in_progress, &processed, n); |
725 | n = 0; | 858 | n = 0; |
726 | bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA); | 859 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA); |
727 | ptr = bh_log_ptr(bh); | 860 | ld = page_address(page); |
728 | end = bh_ptr_end(bh) - 1; | 861 | ptr = (__force __be64 *)(ld + 1); |
862 | end = (__force __be64 *)(page_address(page) + sdp->sd_vfs->s_blocksize); | ||
863 | end--; | ||
729 | gfs2_log_lock(sdp); | 864 | gfs2_log_lock(sdp); |
730 | continue; | 865 | continue; |
731 | } | 866 | } |
@@ -733,11 +868,11 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp) | |||
733 | list_move_tail(&bd->bd_le.le_list, &in_progress); | 868 | list_move_tail(&bd->bd_le.le_list, &in_progress); |
734 | gfs2_check_magic(bd->bd_bh); | 869 | gfs2_check_magic(bd->bd_bh); |
735 | *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr); | 870 | *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr); |
736 | *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0); | 871 | *ptr++ = cpu_to_be64(buffer_escaped(bd->bd_bh) ? 1 : 0); |
737 | n++; | 872 | n++; |
738 | } | 873 | } |
739 | gfs2_log_unlock(sdp); | 874 | gfs2_log_unlock(sdp); |
740 | gfs2_write_blocks(sdp, bh, &in_progress, &processed, n); | 875 | gfs2_write_blocks(sdp, ld, page, &in_progress, &processed, n); |
741 | gfs2_log_lock(sdp); | 876 | gfs2_log_lock(sdp); |
742 | list_splice(&processed, &sdp->sd_log_le_databuf); | 877 | list_splice(&processed, &sdp->sd_log_le_databuf); |
743 | gfs2_log_unlock(sdp); | 878 | gfs2_log_unlock(sdp); |
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 3c0b2737658a..825356d9dc14 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h | |||
@@ -27,6 +27,8 @@ extern const struct gfs2_log_operations gfs2_rg_lops; | |||
27 | extern const struct gfs2_log_operations gfs2_databuf_lops; | 27 | extern const struct gfs2_log_operations gfs2_databuf_lops; |
28 | 28 | ||
29 | extern const struct gfs2_log_operations *gfs2_log_ops[]; | 29 | extern const struct gfs2_log_operations *gfs2_log_ops[]; |
30 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); | ||
31 | extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw); | ||
30 | 32 | ||
31 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) | 33 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) |
32 | { | 34 | { |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index ce1794428ee4..6cdb0f2a1b09 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -70,16 +70,6 @@ static void gfs2_init_gl_aspace_once(void *foo) | |||
70 | address_space_init_once(mapping); | 70 | address_space_init_once(mapping); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void *gfs2_bh_alloc(gfp_t mask, void *data) | ||
74 | { | ||
75 | return alloc_buffer_head(mask); | ||
76 | } | ||
77 | |||
78 | static void gfs2_bh_free(void *ptr, void *data) | ||
79 | { | ||
80 | return free_buffer_head(ptr); | ||
81 | } | ||
82 | |||
83 | /** | 73 | /** |
84 | * init_gfs2_fs - Register GFS2 as a filesystem | 74 | * init_gfs2_fs - Register GFS2 as a filesystem |
85 | * | 75 | * |
@@ -170,8 +160,8 @@ static int __init init_gfs2_fs(void) | |||
170 | if (!gfs2_control_wq) | 160 | if (!gfs2_control_wq) |
171 | goto fail_recovery; | 161 | goto fail_recovery; |
172 | 162 | ||
173 | gfs2_bh_pool = mempool_create(1024, gfs2_bh_alloc, gfs2_bh_free, NULL); | 163 | gfs2_page_pool = mempool_create_page_pool(64, 0); |
174 | if (!gfs2_bh_pool) | 164 | if (!gfs2_page_pool) |
175 | goto fail_control; | 165 | goto fail_control; |
176 | 166 | ||
177 | gfs2_register_debugfs(); | 167 | gfs2_register_debugfs(); |
@@ -234,7 +224,7 @@ static void __exit exit_gfs2_fs(void) | |||
234 | 224 | ||
235 | rcu_barrier(); | 225 | rcu_barrier(); |
236 | 226 | ||
237 | mempool_destroy(gfs2_bh_pool); | 227 | mempool_destroy(gfs2_page_pool); |
238 | kmem_cache_destroy(gfs2_rsrv_cachep); | 228 | kmem_cache_destroy(gfs2_rsrv_cachep); |
239 | kmem_cache_destroy(gfs2_quotad_cachep); | 229 | kmem_cache_destroy(gfs2_quotad_cachep); |
240 | kmem_cache_destroy(gfs2_rgrpd_cachep); | 230 | kmem_cache_destroy(gfs2_rgrpd_cachep); |
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index 3afc6ac6fe0d..f00d7c5744f6 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c | |||
@@ -26,7 +26,7 @@ struct kmem_cache *gfs2_bufdata_cachep __read_mostly; | |||
26 | struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; | 26 | struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; |
27 | struct kmem_cache *gfs2_quotad_cachep __read_mostly; | 27 | struct kmem_cache *gfs2_quotad_cachep __read_mostly; |
28 | struct kmem_cache *gfs2_rsrv_cachep __read_mostly; | 28 | struct kmem_cache *gfs2_rsrv_cachep __read_mostly; |
29 | mempool_t *gfs2_bh_pool __read_mostly; | 29 | mempool_t *gfs2_page_pool __read_mostly; |
30 | 30 | ||
31 | void gfs2_assert_i(struct gfs2_sbd *sdp) | 31 | void gfs2_assert_i(struct gfs2_sbd *sdp) |
32 | { | 32 | { |
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h index 8fbe6cffc118..3586b0dd6aa7 100644 --- a/fs/gfs2/util.h +++ b/fs/gfs2/util.h | |||
@@ -153,7 +153,7 @@ extern struct kmem_cache *gfs2_bufdata_cachep; | |||
153 | extern struct kmem_cache *gfs2_rgrpd_cachep; | 153 | extern struct kmem_cache *gfs2_rgrpd_cachep; |
154 | extern struct kmem_cache *gfs2_quotad_cachep; | 154 | extern struct kmem_cache *gfs2_quotad_cachep; |
155 | extern struct kmem_cache *gfs2_rsrv_cachep; | 155 | extern struct kmem_cache *gfs2_rsrv_cachep; |
156 | extern mempool_t *gfs2_bh_pool; | 156 | extern mempool_t *gfs2_page_pool; |
157 | 157 | ||
158 | static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, | 158 | static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, |
159 | unsigned int *p) | 159 | unsigned int *p) |