aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/unlinked.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/unlinked.c')
-rw-r--r--fs/gfs2/unlinked.c453
1 files changed, 453 insertions, 0 deletions
diff --git a/fs/gfs2/unlinked.c b/fs/gfs2/unlinked.c
new file mode 100644
index 000000000000..4a993af58c1a
--- /dev/null
+++ b/fs/gfs2/unlinked.c
@@ -0,0 +1,453 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/kthread.h>
16#include <asm/semaphore.h>
17
18#include "gfs2.h"
19#include "bmap.h"
20#include "inode.h"
21#include "meta_io.h"
22#include "trans.h"
23#include "unlinked.h"
24
25static int munge_ondisk(struct gfs2_sbd *sdp, unsigned int slot,
26 struct gfs2_unlinked_tag *ut)
27{
28 struct gfs2_inode *ip = sdp->sd_ut_inode;
29 unsigned int block, offset;
30 uint64_t dblock;
31 int new = 0;
32 struct buffer_head *bh;
33 int error;
34
35 block = slot / sdp->sd_ut_per_block;
36 offset = slot % sdp->sd_ut_per_block;
37
38 error = gfs2_block_map(ip, block, &new, &dblock, NULL);
39 if (error)
40 return error;
41 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
42 if (error)
43 return error;
44 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
45 error = -EIO;
46 goto out;
47 }
48
49 down(&sdp->sd_unlinked_mutex);
50 gfs2_trans_add_bh(ip->i_gl, bh);
51 gfs2_unlinked_tag_out(ut, bh->b_data +
52 sizeof(struct gfs2_meta_header) +
53 offset * sizeof(struct gfs2_unlinked_tag));
54 up(&sdp->sd_unlinked_mutex);
55
56 out:
57 brelse(bh);
58
59 return error;
60}
61
62static void ul_hash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
63{
64 spin_lock(&sdp->sd_unlinked_spin);
65 list_add(&ul->ul_list, &sdp->sd_unlinked_list);
66 gfs2_assert(sdp, ul->ul_count);
67 ul->ul_count++;
68 atomic_inc(&sdp->sd_unlinked_count);
69 spin_unlock(&sdp->sd_unlinked_spin);
70}
71
72static void ul_unhash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
73{
74 spin_lock(&sdp->sd_unlinked_spin);
75 list_del_init(&ul->ul_list);
76 gfs2_assert(sdp, ul->ul_count > 1);
77 ul->ul_count--;
78 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_unlinked_count) > 0);
79 atomic_dec(&sdp->sd_unlinked_count);
80 spin_unlock(&sdp->sd_unlinked_spin);
81}
82
83static struct gfs2_unlinked *ul_fish(struct gfs2_sbd *sdp)
84{
85 struct list_head *head;
86 struct gfs2_unlinked *ul;
87 int found = 0;
88
89 if (sdp->sd_vfs->s_flags & MS_RDONLY)
90 return NULL;
91
92 spin_lock(&sdp->sd_unlinked_spin);
93
94 head = &sdp->sd_unlinked_list;
95
96 list_for_each_entry(ul, head, ul_list) {
97 if (test_bit(ULF_LOCKED, &ul->ul_flags))
98 continue;
99
100 list_move_tail(&ul->ul_list, head);
101 ul->ul_count++;
102 set_bit(ULF_LOCKED, &ul->ul_flags);
103 found = 1;
104
105 break;
106 }
107
108 if (!found)
109 ul = NULL;
110
111 spin_unlock(&sdp->sd_unlinked_spin);
112
113 return ul;
114}
115
116/**
117 * enforce_limit - limit the number of inodes waiting to be deallocated
118 * @sdp: the filesystem
119 *
120 * Returns: errno
121 */
122
123static void enforce_limit(struct gfs2_sbd *sdp)
124{
125 unsigned int tries = 0, min = 0;
126 int error;
127
128 if (atomic_read(&sdp->sd_unlinked_count) >=
129 gfs2_tune_get(sdp, gt_ilimit)) {
130 tries = gfs2_tune_get(sdp, gt_ilimit_tries);
131 min = gfs2_tune_get(sdp, gt_ilimit_min);
132 }
133
134 while (tries--) {
135 struct gfs2_unlinked *ul = ul_fish(sdp);
136 if (!ul)
137 break;
138 error = gfs2_inode_dealloc(sdp, ul);
139 gfs2_unlinked_put(sdp, ul);
140
141 if (!error) {
142 if (!--min)
143 break;
144 } else if (error != 1)
145 break;
146 }
147}
148
149static struct gfs2_unlinked *ul_alloc(struct gfs2_sbd *sdp)
150{
151 struct gfs2_unlinked *ul;
152
153 ul = kzalloc(sizeof(struct gfs2_unlinked), GFP_KERNEL);
154 if (ul) {
155 INIT_LIST_HEAD(&ul->ul_list);
156 ul->ul_count = 1;
157 set_bit(ULF_LOCKED, &ul->ul_flags);
158 }
159
160 return ul;
161}
162
163int gfs2_unlinked_get(struct gfs2_sbd *sdp, struct gfs2_unlinked **ul)
164{
165 unsigned int c, o = 0, b;
166 unsigned char byte = 0;
167
168 enforce_limit(sdp);
169
170 *ul = ul_alloc(sdp);
171 if (!*ul)
172 return -ENOMEM;
173
174 spin_lock(&sdp->sd_unlinked_spin);
175
176 for (c = 0; c < sdp->sd_unlinked_chunks; c++)
177 for (o = 0; o < PAGE_SIZE; o++) {
178 byte = sdp->sd_unlinked_bitmap[c][o];
179 if (byte != 0xFF)
180 goto found;
181 }
182
183 goto fail;
184
185 found:
186 for (b = 0; b < 8; b++)
187 if (!(byte & (1 << b)))
188 break;
189 (*ul)->ul_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
190
191 if ((*ul)->ul_slot >= sdp->sd_unlinked_slots)
192 goto fail;
193
194 sdp->sd_unlinked_bitmap[c][o] |= 1 << b;
195
196 spin_unlock(&sdp->sd_unlinked_spin);
197
198 return 0;
199
200 fail:
201 spin_unlock(&sdp->sd_unlinked_spin);
202 kfree(*ul);
203 return -ENOSPC;
204}
205
206void gfs2_unlinked_put(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
207{
208 gfs2_assert_warn(sdp, test_and_clear_bit(ULF_LOCKED, &ul->ul_flags));
209
210 spin_lock(&sdp->sd_unlinked_spin);
211 gfs2_assert(sdp, ul->ul_count);
212 ul->ul_count--;
213 if (!ul->ul_count) {
214 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, ul->ul_slot, 0);
215 spin_unlock(&sdp->sd_unlinked_spin);
216 kfree(ul);
217 } else
218 spin_unlock(&sdp->sd_unlinked_spin);
219}
220
221int gfs2_unlinked_ondisk_add(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
222{
223 int error;
224
225 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
226 gfs2_assert_warn(sdp, list_empty(&ul->ul_list));
227
228 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
229 if (!error)
230 ul_hash(sdp, ul);
231
232 return error;
233}
234
235int gfs2_unlinked_ondisk_munge(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
236{
237 int error;
238
239 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
240 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
241
242 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
243
244 return error;
245}
246
247int gfs2_unlinked_ondisk_rm(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
248{
249 struct gfs2_unlinked_tag ut;
250 int error;
251
252 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
253 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
254
255 memset(&ut, 0, sizeof(struct gfs2_unlinked_tag));
256
257 error = munge_ondisk(sdp, ul->ul_slot, &ut);
258 if (error)
259 return error;
260
261 ul_unhash(sdp, ul);
262
263 return 0;
264}
265
266/**
267 * gfs2_unlinked_dealloc - Go through the list of inodes to be deallocated
268 * @sdp: the filesystem
269 *
270 * Returns: errno
271 */
272
273int gfs2_unlinked_dealloc(struct gfs2_sbd *sdp)
274{
275 unsigned int hits, strikes;
276 int error;
277
278 for (;;) {
279 hits = 0;
280 strikes = 0;
281
282 for (;;) {
283 struct gfs2_unlinked *ul = ul_fish(sdp);
284 if (!ul)
285 return 0;
286 error = gfs2_inode_dealloc(sdp, ul);
287 gfs2_unlinked_put(sdp, ul);
288
289 if (!error) {
290 hits++;
291 if (strikes)
292 strikes--;
293 } else if (error == 1) {
294 strikes++;
295 if (strikes >=
296 atomic_read(&sdp->sd_unlinked_count)) {
297 error = 0;
298 break;
299 }
300 } else
301 return error;
302 }
303
304 if (!hits || kthread_should_stop())
305 break;
306
307 cond_resched();
308 }
309
310 return 0;
311}
312
313int gfs2_unlinked_init(struct gfs2_sbd *sdp)
314{
315 struct gfs2_inode *ip = sdp->sd_ut_inode;
316 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
317 unsigned int x, slot = 0;
318 unsigned int found = 0;
319 uint64_t dblock;
320 uint32_t extlen = 0;
321 int error;
322
323 if (!ip->i_di.di_size ||
324 ip->i_di.di_size > (64 << 20) ||
325 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
326 gfs2_consist_inode(ip);
327 return -EIO;
328 }
329 sdp->sd_unlinked_slots = blocks * sdp->sd_ut_per_block;
330 sdp->sd_unlinked_chunks = DIV_RU(sdp->sd_unlinked_slots, 8 * PAGE_SIZE);
331
332 error = -ENOMEM;
333
334 sdp->sd_unlinked_bitmap = kcalloc(sdp->sd_unlinked_chunks,
335 sizeof(unsigned char *),
336 GFP_KERNEL);
337 if (!sdp->sd_unlinked_bitmap)
338 return error;
339
340 for (x = 0; x < sdp->sd_unlinked_chunks; x++) {
341 sdp->sd_unlinked_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
342 if (!sdp->sd_unlinked_bitmap[x])
343 goto fail;
344 }
345
346 for (x = 0; x < blocks; x++) {
347 struct buffer_head *bh;
348 unsigned int y;
349
350 if (!extlen) {
351 int new = 0;
352 error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
353 if (error)
354 goto fail;
355 }
356 gfs2_meta_ra(ip->i_gl, dblock, extlen);
357 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
358 &bh);
359 if (error)
360 goto fail;
361 error = -EIO;
362 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
363 brelse(bh);
364 goto fail;
365 }
366
367 for (y = 0;
368 y < sdp->sd_ut_per_block && slot < sdp->sd_unlinked_slots;
369 y++, slot++) {
370 struct gfs2_unlinked_tag ut;
371 struct gfs2_unlinked *ul;
372
373 gfs2_unlinked_tag_in(&ut, bh->b_data +
374 sizeof(struct gfs2_meta_header) +
375 y * sizeof(struct gfs2_unlinked_tag));
376 if (!ut.ut_inum.no_addr)
377 continue;
378
379 error = -ENOMEM;
380 ul = ul_alloc(sdp);
381 if (!ul) {
382 brelse(bh);
383 goto fail;
384 }
385 ul->ul_ut = ut;
386 ul->ul_slot = slot;
387
388 spin_lock(&sdp->sd_unlinked_spin);
389 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, slot, 1);
390 spin_unlock(&sdp->sd_unlinked_spin);
391 ul_hash(sdp, ul);
392
393 gfs2_unlinked_put(sdp, ul);
394 found++;
395 }
396
397 brelse(bh);
398 dblock++;
399 extlen--;
400 }
401
402 if (found)
403 fs_info(sdp, "found %u unlinked inodes\n", found);
404
405 return 0;
406
407 fail:
408 gfs2_unlinked_cleanup(sdp);
409 return error;
410}
411
412/**
413 * gfs2_unlinked_cleanup - get rid of any extra struct gfs2_unlinked structures
414 * @sdp: the filesystem
415 *
416 */
417
418void gfs2_unlinked_cleanup(struct gfs2_sbd *sdp)
419{
420 struct list_head *head = &sdp->sd_unlinked_list;
421 struct gfs2_unlinked *ul;
422 unsigned int x;
423
424 spin_lock(&sdp->sd_unlinked_spin);
425 while (!list_empty(head)) {
426 ul = list_entry(head->next, struct gfs2_unlinked, ul_list);
427
428 if (ul->ul_count > 1) {
429 list_move_tail(&ul->ul_list, head);
430 spin_unlock(&sdp->sd_unlinked_spin);
431 schedule();
432 spin_lock(&sdp->sd_unlinked_spin);
433 continue;
434 }
435
436 list_del_init(&ul->ul_list);
437 atomic_dec(&sdp->sd_unlinked_count);
438
439 gfs2_assert_warn(sdp, ul->ul_count == 1);
440 gfs2_assert_warn(sdp, !test_bit(ULF_LOCKED, &ul->ul_flags));
441 kfree(ul);
442 }
443 spin_unlock(&sdp->sd_unlinked_spin);
444
445 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_unlinked_count));
446
447 if (sdp->sd_unlinked_bitmap) {
448 for (x = 0; x < sdp->sd_unlinked_chunks; x++)
449 kfree(sdp->sd_unlinked_bitmap[x]);
450 kfree(sdp->sd_unlinked_bitmap);
451 }
452}
453