2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/kthread.h>
16 #include <linux/gfs2_ondisk.h>
19 #include "lm_interface.h"
28 static int munge_ondisk(struct gfs2_sbd *sdp, unsigned int slot,
29 struct gfs2_unlinked_tag *ut)
31 struct gfs2_inode *ip = sdp->sd_ut_inode->u.generic_ip;
32 unsigned int block, offset;
35 struct buffer_head *bh;
39 block = slot / sdp->sd_ut_per_block;
40 offset = slot % sdp->sd_ut_per_block;
42 error = gfs2_block_map(ip->i_vnode, block, &new, &dblock, &boundary);
45 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
48 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
53 mutex_lock(&sdp->sd_unlinked_mutex);
54 gfs2_trans_add_bh(ip->i_gl, bh, 1);
55 gfs2_unlinked_tag_out(ut, bh->b_data +
56 sizeof(struct gfs2_meta_header) +
57 offset * sizeof(struct gfs2_unlinked_tag));
58 mutex_unlock(&sdp->sd_unlinked_mutex);
66 static void ul_hash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
68 spin_lock(&sdp->sd_unlinked_spin);
69 list_add(&ul->ul_list, &sdp->sd_unlinked_list);
70 gfs2_assert(sdp, ul->ul_count);
72 atomic_inc(&sdp->sd_unlinked_count);
73 spin_unlock(&sdp->sd_unlinked_spin);
76 static void ul_unhash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
78 spin_lock(&sdp->sd_unlinked_spin);
79 list_del_init(&ul->ul_list);
80 gfs2_assert(sdp, ul->ul_count > 1);
82 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_unlinked_count) > 0);
83 atomic_dec(&sdp->sd_unlinked_count);
84 spin_unlock(&sdp->sd_unlinked_spin);
87 static struct gfs2_unlinked *ul_fish(struct gfs2_sbd *sdp)
89 struct list_head *head;
90 struct gfs2_unlinked *ul;
93 if (sdp->sd_vfs->s_flags & MS_RDONLY)
96 spin_lock(&sdp->sd_unlinked_spin);
98 head = &sdp->sd_unlinked_list;
100 list_for_each_entry(ul, head, ul_list) {
101 if (test_bit(ULF_LOCKED, &ul->ul_flags))
104 list_move_tail(&ul->ul_list, head);
106 set_bit(ULF_LOCKED, &ul->ul_flags);
115 spin_unlock(&sdp->sd_unlinked_spin);
121 * enforce_limit - limit the number of inodes waiting to be deallocated
122 * @sdp: the filesystem
127 static void enforce_limit(struct gfs2_sbd *sdp)
129 unsigned int tries = 0, min = 0;
132 if (atomic_read(&sdp->sd_unlinked_count) <
133 gfs2_tune_get(sdp, gt_ilimit))
136 tries = gfs2_tune_get(sdp, gt_ilimit_tries);
137 min = gfs2_tune_get(sdp, gt_ilimit_min);
140 struct gfs2_unlinked *ul = ul_fish(sdp);
143 error = gfs2_inode_dealloc(sdp, ul);
144 gfs2_unlinked_put(sdp, ul);
149 } else if (error != 1)
154 static struct gfs2_unlinked *ul_alloc(struct gfs2_sbd *sdp)
156 struct gfs2_unlinked *ul;
158 ul = kzalloc(sizeof(struct gfs2_unlinked), GFP_KERNEL);
160 INIT_LIST_HEAD(&ul->ul_list);
162 set_bit(ULF_LOCKED, &ul->ul_flags);
168 int gfs2_unlinked_get(struct gfs2_sbd *sdp, struct gfs2_unlinked **ul)
170 unsigned int c, o = 0, b;
171 unsigned char byte = 0;
179 spin_lock(&sdp->sd_unlinked_spin);
181 for (c = 0; c < sdp->sd_unlinked_chunks; c++)
182 for (o = 0; o < PAGE_SIZE; o++) {
183 byte = sdp->sd_unlinked_bitmap[c][o];
191 for (b = 0; b < 8; b++)
192 if (!(byte & (1 << b)))
194 (*ul)->ul_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
196 if ((*ul)->ul_slot >= sdp->sd_unlinked_slots)
199 sdp->sd_unlinked_bitmap[c][o] |= 1 << b;
201 spin_unlock(&sdp->sd_unlinked_spin);
206 spin_unlock(&sdp->sd_unlinked_spin);
211 void gfs2_unlinked_put(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
213 gfs2_assert_warn(sdp, test_and_clear_bit(ULF_LOCKED, &ul->ul_flags));
215 spin_lock(&sdp->sd_unlinked_spin);
216 gfs2_assert(sdp, ul->ul_count);
219 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, ul->ul_slot, 0);
220 spin_unlock(&sdp->sd_unlinked_spin);
223 spin_unlock(&sdp->sd_unlinked_spin);
226 int gfs2_unlinked_ondisk_add(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
230 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
231 gfs2_assert_warn(sdp, list_empty(&ul->ul_list));
233 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
240 int gfs2_unlinked_ondisk_munge(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
244 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
245 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
247 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
252 int gfs2_unlinked_ondisk_rm(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
254 struct gfs2_unlinked_tag ut;
257 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
258 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
260 memset(&ut, 0, sizeof(struct gfs2_unlinked_tag));
262 error = munge_ondisk(sdp, ul->ul_slot, &ut);
272 * gfs2_unlinked_dealloc - Go through the list of inodes to be deallocated
273 * @sdp: the filesystem
278 int gfs2_unlinked_dealloc(struct gfs2_sbd *sdp)
280 unsigned int hits, strikes;
288 struct gfs2_unlinked *ul = ul_fish(sdp);
291 error = gfs2_inode_dealloc(sdp, ul);
292 gfs2_unlinked_put(sdp, ul);
298 } else if (error == 1) {
301 atomic_read(&sdp->sd_unlinked_count)) {
309 if (!hits || kthread_should_stop())
318 int gfs2_unlinked_init(struct gfs2_sbd *sdp)
320 struct gfs2_inode *ip = sdp->sd_ut_inode->u.generic_ip;
321 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
322 unsigned int x, slot = 0;
323 unsigned int found = 0;
328 if (!ip->i_di.di_size ||
329 ip->i_di.di_size > (64 << 20) ||
330 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
331 gfs2_consist_inode(ip);
334 sdp->sd_unlinked_slots = blocks * sdp->sd_ut_per_block;
335 sdp->sd_unlinked_chunks = DIV_ROUND_UP(sdp->sd_unlinked_slots,
340 sdp->sd_unlinked_bitmap = kcalloc(sdp->sd_unlinked_chunks,
341 sizeof(unsigned char *),
343 if (!sdp->sd_unlinked_bitmap)
346 for (x = 0; x < sdp->sd_unlinked_chunks; x++) {
347 sdp->sd_unlinked_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
348 if (!sdp->sd_unlinked_bitmap[x])
352 for (x = 0; x < blocks; x++) {
353 struct buffer_head *bh;
358 error = gfs2_extent_map(ip->i_vnode, x, &new, &dblock, &extlen);
362 gfs2_meta_ra(ip->i_gl, dblock, extlen);
363 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
368 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
374 y < sdp->sd_ut_per_block && slot < sdp->sd_unlinked_slots;
376 struct gfs2_unlinked_tag ut;
377 struct gfs2_unlinked *ul;
379 gfs2_unlinked_tag_in(&ut, bh->b_data +
380 sizeof(struct gfs2_meta_header) +
381 y * sizeof(struct gfs2_unlinked_tag));
382 if (!ut.ut_inum.no_addr)
394 spin_lock(&sdp->sd_unlinked_spin);
395 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, slot, 1);
396 spin_unlock(&sdp->sd_unlinked_spin);
399 gfs2_unlinked_put(sdp, ul);
409 fs_info(sdp, "found %u unlinked inodes\n", found);
414 gfs2_unlinked_cleanup(sdp);
419 * gfs2_unlinked_cleanup - get rid of any extra struct gfs2_unlinked structures
420 * @sdp: the filesystem
424 void gfs2_unlinked_cleanup(struct gfs2_sbd *sdp)
426 struct list_head *head = &sdp->sd_unlinked_list;
427 struct gfs2_unlinked *ul;
430 spin_lock(&sdp->sd_unlinked_spin);
431 while (!list_empty(head)) {
432 ul = list_entry(head->next, struct gfs2_unlinked, ul_list);
434 if (ul->ul_count > 1) {
435 list_move_tail(&ul->ul_list, head);
436 spin_unlock(&sdp->sd_unlinked_spin);
438 spin_lock(&sdp->sd_unlinked_spin);
442 list_del_init(&ul->ul_list);
443 atomic_dec(&sdp->sd_unlinked_count);
445 gfs2_assert_warn(sdp, ul->ul_count == 1);
446 gfs2_assert_warn(sdp, !test_bit(ULF_LOCKED, &ul->ul_flags));
449 spin_unlock(&sdp->sd_unlinked_spin);
451 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_unlinked_count));
453 if (sdp->sd_unlinked_bitmap) {
454 for (x = 0; x < sdp->sd_unlinked_chunks; x++)
455 kfree(sdp->sd_unlinked_bitmap[x]);
456 kfree(sdp->sd_unlinked_bitmap);