2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
32 * Free 'count' fragments from fragment number 'fragment'
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 ucpi = ufs_load_cylinder (sb, cgno);
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 * Trying to reassemble free fragments into block
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 UFSD("EXIT (FAILED)\n");
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
168 ucpi = ufs_load_cylinder (sb, cgno);
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
215 UFSD("EXIT (FAILED)\n");
219 static struct page *ufs_get_locked_page(struct address_space *mapping,
225 page = find_lock_page(mapping, index);
227 page = read_cache_page(mapping, index,
228 (filler_t*)mapping->a_ops->readpage,
231 printk(KERN_ERR "ufs_change_blocknr: "
232 "read_cache_page error: ino %lu, index: %lu\n",
233 mapping->host->i_ino, index);
239 if (!PageUptodate(page) || PageError(page)) {
241 page_cache_release(page);
243 printk(KERN_ERR "ufs_change_blocknr: "
244 "can not read page: ino %lu, index: %lu\n",
245 mapping->host->i_ino, index);
247 page = ERR_PTR(-EIO);
252 if (unlikely(!page->mapping || !page_has_buffers(page))) {
254 page_cache_release(page);
255 goto try_again;/*we really need these buffers*/
262 * Modify inode page cache in such way:
263 * have - blocks with b_blocknr equal to oldb...oldb+count-1
264 * get - blocks with b_blocknr equal to newb...newb+count-1
265 * also we suppose that oldb...oldb+count-1 blocks
266 * situated at the end of file.
268 * We can come here from ufs_writepage or ufs_prepare_write,
269 * locked_page is argument of these functions, so we already lock it.
271 static void ufs_change_blocknr(struct inode *inode, unsigned int count,
272 unsigned int oldb, unsigned int newb,
273 struct page *locked_page)
275 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
277 struct address_space *mapping = inode->i_mapping;
278 pgoff_t index, cur_index = locked_page->index;
281 struct buffer_head *head, *bh;
283 baseblk = ((i_size_read(inode) - 1) >> inode->i_blkbits) + 1 - count;
285 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
286 inode->i_ino, count, oldb, newb);
288 BUG_ON(!PageLocked(locked_page));
290 for (i = 0; i < count; i += blk_per_page) {
291 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
293 if (likely(cur_index != index)) {
294 page = ufs_get_locked_page(mapping, index);
301 head = page_buffers(page);
304 if (likely(bh->b_blocknr == j + oldb && j < count)) {
305 unmap_underlying_metadata(bh->b_bdev,
307 bh->b_blocknr = newb + j++;
308 mark_buffer_dirty(bh);
311 bh = bh->b_this_page;
312 } while (bh != head);
314 set_page_dirty(page);
316 if (likely(cur_index != index)) {
318 page_cache_release(page);
324 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
325 unsigned goal, unsigned count, int * err, struct page *locked_page)
327 struct super_block * sb;
328 struct ufs_sb_private_info * uspi;
329 struct ufs_super_block_first * usb1;
330 unsigned cgno, oldcount, newcount, tmp, request, result;
332 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
335 uspi = UFS_SB(sb)->s_uspi;
336 usb1 = ubh_get_usb_first(uspi);
341 tmp = fs32_to_cpu(sb, *p);
342 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
343 ufs_warning (sb, "ufs_new_fragments", "internal warning"
344 " fragment %u, count %u", fragment, count);
345 count = uspi->s_fpb - ufs_fragnum(fragment);
347 oldcount = ufs_fragnum (fragment);
348 newcount = oldcount + count;
351 * Somebody else has just allocated our fragments
355 ufs_error (sb, "ufs_new_fragments", "internal error, "
356 "fragment %u, tmp %u\n", fragment, tmp);
360 if (fragment < UFS_I(inode)->i_lastfrag) {
361 UFSD("EXIT (ALREADY ALLOCATED)\n");
368 UFSD("EXIT (ALREADY ALLOCATED)\n");
375 * There is not enough space for user on the device
377 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
379 UFSD("EXIT (FAILED)\n");
383 if (goal >= uspi->s_size)
386 cgno = ufs_inotocg (inode->i_ino);
388 cgno = ufs_dtog (goal);
391 * allocate new fragment
394 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
396 *p = cpu_to_fs32(sb, result);
398 inode->i_blocks += count << uspi->s_nspfshift;
399 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
402 UFSD("EXIT, result %u\n", result);
409 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
412 inode->i_blocks += count << uspi->s_nspfshift;
413 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
415 UFSD("EXIT, result %u\n", result);
420 * allocate new block and move data
422 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
425 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
426 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
428 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
431 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
434 request = uspi->s_fpb;
435 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
436 (uspi->s_minfree - 2) / 100)
438 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
441 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
443 ufs_change_blocknr(inode, oldcount, tmp, result, locked_page);
445 *p = cpu_to_fs32(sb, result);
447 inode->i_blocks += count << uspi->s_nspfshift;
448 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
450 if (newcount < request)
451 ufs_free_fragments (inode, result + newcount, request - newcount);
452 ufs_free_fragments (inode, tmp, oldcount);
453 UFSD("EXIT, result %u\n", result);
458 UFSD("EXIT (FAILED)\n");
463 ufs_add_fragments (struct inode * inode, unsigned fragment,
464 unsigned oldcount, unsigned newcount, int * err)
466 struct super_block * sb;
467 struct ufs_sb_private_info * uspi;
468 struct ufs_super_block_first * usb1;
469 struct ufs_cg_private_info * ucpi;
470 struct ufs_cylinder_group * ucg;
471 unsigned cgno, fragno, fragoff, count, fragsize, i;
473 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
476 uspi = UFS_SB(sb)->s_uspi;
477 usb1 = ubh_get_usb_first (uspi);
478 count = newcount - oldcount;
480 cgno = ufs_dtog(fragment);
481 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
483 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
485 ucpi = ufs_load_cylinder (sb, cgno);
488 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
489 if (!ufs_cg_chkmagic(sb, ucg)) {
490 ufs_panic (sb, "ufs_add_fragments",
491 "internal error, bad magic number on cg %u", cgno);
495 fragno = ufs_dtogd (fragment);
496 fragoff = ufs_fragnum (fragno);
497 for (i = oldcount; i < newcount; i++)
498 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
501 * Block can be extended
503 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
504 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
505 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
507 fragsize = i - oldcount;
508 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
509 ufs_panic (sb, "ufs_add_fragments",
510 "internal error or corrupted bitmap on cg %u", cgno);
511 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
512 if (fragsize != count)
513 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
514 for (i = oldcount; i < newcount; i++)
515 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
516 if(DQUOT_ALLOC_BLOCK(inode, count)) {
521 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
522 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
523 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
525 ubh_mark_buffer_dirty (USPI_UBH(uspi));
526 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
527 if (sb->s_flags & MS_SYNCHRONOUS) {
528 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
529 ubh_wait_on_buffer (UCPI_UBH(ucpi));
533 UFSD("EXIT, fragment %u\n", fragment);
538 #define UFS_TEST_FREE_SPACE_CG \
539 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
540 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
542 for (k = count; k < uspi->s_fpb; k++) \
543 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
546 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
547 unsigned goal, unsigned count, int * err)
549 struct super_block * sb;
550 struct ufs_sb_private_info * uspi;
551 struct ufs_super_block_first * usb1;
552 struct ufs_cg_private_info * ucpi;
553 struct ufs_cylinder_group * ucg;
554 unsigned oldcg, i, j, k, result, allocsize;
556 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
559 uspi = UFS_SB(sb)->s_uspi;
560 usb1 = ubh_get_usb_first(uspi);
564 * 1. searching on preferred cylinder group
566 UFS_TEST_FREE_SPACE_CG
569 * 2. quadratic rehash
571 for (j = 1; j < uspi->s_ncg; j *= 2) {
573 if (cgno >= uspi->s_ncg)
575 UFS_TEST_FREE_SPACE_CG
579 * 3. brute force search
580 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
582 cgno = (oldcg + 1) % uspi->s_ncg;
583 for (j = 2; j < uspi->s_ncg; j++) {
585 if (cgno >= uspi->s_ncg)
587 UFS_TEST_FREE_SPACE_CG
590 UFSD("EXIT (FAILED)\n");
594 ucpi = ufs_load_cylinder (sb, cgno);
597 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
598 if (!ufs_cg_chkmagic(sb, ucg))
599 ufs_panic (sb, "ufs_alloc_fragments",
600 "internal error, bad magic number on cg %u", cgno);
601 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
603 if (count == uspi->s_fpb) {
604 result = ufs_alloccg_block (inode, ucpi, goal, err);
605 if (result == (unsigned)-1)
610 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
611 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
614 if (allocsize == uspi->s_fpb) {
615 result = ufs_alloccg_block (inode, ucpi, goal, err);
616 if (result == (unsigned)-1)
618 goal = ufs_dtogd (result);
619 for (i = count; i < uspi->s_fpb; i++)
620 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
621 i = uspi->s_fpb - count;
622 DQUOT_FREE_BLOCK(inode, i);
624 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
625 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
626 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
627 fs32_add(sb, &ucg->cg_frsum[i], 1);
631 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
632 if (result == (unsigned)-1)
634 if(DQUOT_ALLOC_BLOCK(inode, count)) {
638 for (i = 0; i < count; i++)
639 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
641 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
642 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
643 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
644 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
646 if (count != allocsize)
647 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
650 ubh_mark_buffer_dirty (USPI_UBH(uspi));
651 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
652 if (sb->s_flags & MS_SYNCHRONOUS) {
653 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
654 ubh_wait_on_buffer (UCPI_UBH(ucpi));
658 result += cgno * uspi->s_fpg;
659 UFSD("EXIT3, result %u\n", result);
663 static unsigned ufs_alloccg_block (struct inode * inode,
664 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
666 struct super_block * sb;
667 struct ufs_sb_private_info * uspi;
668 struct ufs_super_block_first * usb1;
669 struct ufs_cylinder_group * ucg;
670 unsigned result, cylno, blkno;
672 UFSD("ENTER, goal %u\n", goal);
675 uspi = UFS_SB(sb)->s_uspi;
676 usb1 = ubh_get_usb_first(uspi);
677 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
680 goal = ucpi->c_rotor;
683 goal = ufs_blknum (goal);
684 goal = ufs_dtogd (goal);
687 * If the requested block is available, use it.
689 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
695 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
696 if (result == (unsigned)-1)
698 ucpi->c_rotor = result;
700 blkno = ufs_fragstoblks(result);
701 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
702 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
703 ufs_clusteracct (sb, ucpi, blkno, -1);
704 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
709 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
710 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
711 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
712 cylno = ufs_cbtocylno(result);
713 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
714 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
716 UFSD("EXIT, result %u\n", result);
721 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
722 struct ufs_buffer_head *ubh,
723 unsigned begin, unsigned size,
724 unsigned char *table, unsigned char mask)
726 unsigned rest, offset;
730 offset = begin & ~uspi->s_fmask;
731 begin >>= uspi->s_fshift;
733 if ((offset + size) < uspi->s_fsize)
736 rest = uspi->s_fsize - offset;
738 cp = ubh->bh[begin]->b_data + offset;
739 while ((table[*cp++] & mask) == 0 && --rest)
746 return (size + rest);
750 * Find a block of the specified size in the specified cylinder group.
751 * @sp: pointer to super block
752 * @ucpi: pointer to cylinder group info
753 * @goal: near which block we want find new one
754 * @count: specified size
756 static unsigned ufs_bitmap_search(struct super_block *sb,
757 struct ufs_cg_private_info *ucpi,
758 unsigned goal, unsigned count)
761 * Bit patterns for identifying fragments in the block map
762 * used as ((map & mask_arr) == want_arr)
764 static const int mask_arr[9] = {
765 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
767 static const int want_arr[9] = {
768 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
770 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
771 struct ufs_super_block_first *usb1;
772 struct ufs_cylinder_group *ucg;
773 unsigned start, length, loc, result;
774 unsigned pos, want, blockmap, mask, end;
776 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
778 usb1 = ubh_get_usb_first (uspi);
779 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
782 start = ufs_dtogd(goal) >> 3;
784 start = ucpi->c_frotor >> 3;
786 length = ((uspi->s_fpg + 7) >> 3) - start;
787 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
788 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
789 1 << (count - 1 + (uspi->s_fpb & 7)));
792 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
793 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
795 1 << (count - 1 + (uspi->s_fpb & 7)));
797 ufs_error(sb, "ufs_bitmap_search",
798 "bitmap corrupted on cg %u, start %u,"
799 " length %u, count %u, freeoff %u\n",
800 ucpi->c_cgx, start, length, count,
806 result = (start + length - loc) << 3;
807 ucpi->c_frotor = result;
810 * found the byte in the map
813 for (end = result + 8; result < end; result += uspi->s_fpb) {
814 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
816 mask = mask_arr[count];
817 want = want_arr[count];
818 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
819 if ((blockmap & mask) == want) {
820 UFSD("EXIT, result %u\n", result);
828 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
830 UFSD("EXIT (FAILED)\n");
834 static void ufs_clusteracct(struct super_block * sb,
835 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
837 struct ufs_sb_private_info * uspi;
838 int i, start, end, forw, back;
840 uspi = UFS_SB(sb)->s_uspi;
841 if (uspi->s_contigsumsize <= 0)
845 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
847 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
850 * Find the size of the cluster going forward.
853 end = start + uspi->s_contigsumsize;
854 if ( end >= ucpi->c_nclusterblks)
855 end = ucpi->c_nclusterblks;
856 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
862 * Find the size of the cluster going backward.
865 end = start - uspi->s_contigsumsize;
868 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
874 * Account for old cluster and the possibly new forward and
878 if (i > uspi->s_contigsumsize)
879 i = uspi->s_contigsumsize;
880 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
882 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
884 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
888 static unsigned char ufs_fragtable_8fpb[] = {
889 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
890 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
891 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
892 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
893 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
894 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
895 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
896 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
897 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
898 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
899 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
900 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
901 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
902 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
903 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
904 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
907 static unsigned char ufs_fragtable_other[] = {
908 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
909 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
910 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
911 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
912 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
913 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
914 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
915 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
916 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
917 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
918 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
919 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
920 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
921 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
922 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
923 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,