Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt...
[pandora-kernel.git] / fs / ufs / balloc.c
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
19
20 #include "ufs_fs.h"
21 #include "ufs.h"
22 #include "swab.h"
23 #include "util.h"
24
25 #define INVBLOCK ((u64)-1L)
26
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33
34 /*
35  * Free 'count' fragments from fragment number 'fragment'
36  */
37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38 {
39         struct super_block * sb;
40         struct ufs_sb_private_info * uspi;
41         struct ufs_super_block_first * usb1;
42         struct ufs_cg_private_info * ucpi;
43         struct ufs_cylinder_group * ucg;
44         unsigned cgno, bit, end_bit, bbase, blkmap, i;
45         u64 blkno;
46         
47         sb = inode->i_sb;
48         uspi = UFS_SB(sb)->s_uspi;
49         usb1 = ubh_get_usb_first(uspi);
50         
51         UFSD("ENTER, fragment %llu, count %u\n",
52              (unsigned long long)fragment, count);
53         
54         if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55                 ufs_error (sb, "ufs_free_fragments", "internal error");
56         
57         lock_super(sb);
58         
59         cgno = ufs_dtog(uspi, fragment);
60         bit = ufs_dtogd(uspi, fragment);
61         if (cgno >= uspi->s_ncg) {
62                 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63                 goto failed;
64         }
65                 
66         ucpi = ufs_load_cylinder (sb, cgno);
67         if (!ucpi) 
68                 goto failed;
69         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70         if (!ufs_cg_chkmagic(sb, ucg)) {
71                 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72                 goto failed;
73         }
74
75         end_bit = bit + count;
76         bbase = ufs_blknum (bit);
77         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78         ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79         for (i = bit; i < end_bit; i++) {
80                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82                 else 
83                         ufs_error (sb, "ufs_free_fragments",
84                                    "bit already cleared for fragment %u", i);
85         }
86         
87         fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
88         uspi->cs_total.cs_nffree += count;
89         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
90         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
91         ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92
93         /*
94          * Trying to reassemble free fragments into block
95          */
96         blkno = ufs_fragstoblks (bbase);
97         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
98                 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
99                 uspi->cs_total.cs_nffree -= uspi->s_fpb;
100                 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102                         ufs_clusteracct (sb, ucpi, blkno, 1);
103                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
104                 uspi->cs_total.cs_nbfree++;
105                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
106                 if (uspi->fs_magic != UFS2_MAGIC) {
107                         unsigned cylno = ufs_cbtocylno (bbase);
108
109                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110                                                   ufs_cbtorpos(bbase)), 1);
111                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112                 }
113         }
114         
115         ubh_mark_buffer_dirty (USPI_UBH(uspi));
116         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117         if (sb->s_flags & MS_SYNCHRONOUS) {
118                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
119                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
120         }
121         sb->s_dirt = 1;
122         
123         unlock_super (sb);
124         UFSD("EXIT\n");
125         return;
126
127 failed:
128         unlock_super (sb);
129         UFSD("EXIT (FAILED)\n");
130         return;
131 }
132
133 /*
134  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
135  */
136 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
137 {
138         struct super_block * sb;
139         struct ufs_sb_private_info * uspi;
140         struct ufs_super_block_first * usb1;
141         struct ufs_cg_private_info * ucpi;
142         struct ufs_cylinder_group * ucg;
143         unsigned overflow, cgno, bit, end_bit, i;
144         u64 blkno;
145         
146         sb = inode->i_sb;
147         uspi = UFS_SB(sb)->s_uspi;
148         usb1 = ubh_get_usb_first(uspi);
149
150         UFSD("ENTER, fragment %llu, count %u\n",
151              (unsigned long long)fragment, count);
152         
153         if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
154                 ufs_error (sb, "ufs_free_blocks", "internal error, "
155                            "fragment %llu, count %u\n",
156                            (unsigned long long)fragment, count);
157                 goto failed;
158         }
159
160         lock_super(sb);
161         
162 do_more:
163         overflow = 0;
164         cgno = ufs_dtog(uspi, fragment);
165         bit = ufs_dtogd(uspi, fragment);
166         if (cgno >= uspi->s_ncg) {
167                 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
168                 goto failed_unlock;
169         }
170         end_bit = bit + count;
171         if (end_bit > uspi->s_fpg) {
172                 overflow = bit + count - uspi->s_fpg;
173                 count -= overflow;
174                 end_bit -= overflow;
175         }
176
177         ucpi = ufs_load_cylinder (sb, cgno);
178         if (!ucpi) 
179                 goto failed_unlock;
180         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
181         if (!ufs_cg_chkmagic(sb, ucg)) {
182                 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
183                 goto failed_unlock;
184         }
185
186         for (i = bit; i < end_bit; i += uspi->s_fpb) {
187                 blkno = ufs_fragstoblks(i);
188                 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
189                         ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
190                 }
191                 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
192                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
193                         ufs_clusteracct (sb, ucpi, blkno, 1);
194
195                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
196                 uspi->cs_total.cs_nbfree++;
197                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
198
199                 if (uspi->fs_magic != UFS2_MAGIC) {
200                         unsigned cylno = ufs_cbtocylno(i);
201
202                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
203                                                   ufs_cbtorpos(i)), 1);
204                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
205                 }
206         }
207
208         ubh_mark_buffer_dirty (USPI_UBH(uspi));
209         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
210         if (sb->s_flags & MS_SYNCHRONOUS) {
211                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
212                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
213         }
214
215         if (overflow) {
216                 fragment += count;
217                 count = overflow;
218                 goto do_more;
219         }
220
221         sb->s_dirt = 1;
222         unlock_super (sb);
223         UFSD("EXIT\n");
224         return;
225
226 failed_unlock:
227         unlock_super (sb);
228 failed:
229         UFSD("EXIT (FAILED)\n");
230         return;
231 }
232
233 /*
234  * Modify inode page cache in such way:
235  * have - blocks with b_blocknr equal to oldb...oldb+count-1
236  * get - blocks with b_blocknr equal to newb...newb+count-1
237  * also we suppose that oldb...oldb+count-1 blocks
238  * situated at the end of file.
239  *
240  * We can come here from ufs_writepage or ufs_prepare_write,
241  * locked_page is argument of these functions, so we already lock it.
242  */
243 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
244                                unsigned int count, sector_t oldb,
245                                sector_t newb, struct page *locked_page)
246 {
247         const unsigned blks_per_page =
248                 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
249         const unsigned mask = blks_per_page - 1;
250         struct address_space * const mapping = inode->i_mapping;
251         pgoff_t index, cur_index, last_index;
252         unsigned pos, j, lblock;
253         sector_t end, i;
254         struct page *page;
255         struct buffer_head *head, *bh;
256
257         UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
258               inode->i_ino, count,
259              (unsigned long long)oldb, (unsigned long long)newb);
260
261         BUG_ON(!locked_page);
262         BUG_ON(!PageLocked(locked_page));
263
264         cur_index = locked_page->index;
265         end = count + beg;
266         last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
267         for (i = beg; i < end; i = (i | mask) + 1) {
268                 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
269
270                 if (likely(cur_index != index)) {
271                         page = ufs_get_locked_page(mapping, index);
272                         if (!page)/* it was truncated */
273                                 continue;
274                         if (IS_ERR(page)) {/* or EIO */
275                                 ufs_error(inode->i_sb, __func__,
276                                           "read of page %llu failed\n",
277                                           (unsigned long long)index);
278                                 continue;
279                         }
280                 } else
281                         page = locked_page;
282
283                 head = page_buffers(page);
284                 bh = head;
285                 pos = i & mask;
286                 for (j = 0; j < pos; ++j)
287                         bh = bh->b_this_page;
288
289
290                 if (unlikely(index == last_index))
291                         lblock = end & mask;
292                 else
293                         lblock = blks_per_page;
294
295                 do {
296                         if (j >= lblock)
297                                 break;
298                         pos = (i - beg) + j;
299
300                         if (!buffer_mapped(bh))
301                                         map_bh(bh, inode->i_sb, oldb + pos);
302                         if (!buffer_uptodate(bh)) {
303                                 ll_rw_block(READ, 1, &bh);
304                                 wait_on_buffer(bh);
305                                 if (!buffer_uptodate(bh)) {
306                                         ufs_error(inode->i_sb, __func__,
307                                                   "read of block failed\n");
308                                         break;
309                                 }
310                         }
311
312                         UFSD(" change from %llu to %llu, pos %u\n",
313                              (unsigned long long)(pos + oldb),
314                              (unsigned long long)(pos + newb), pos);
315
316                         bh->b_blocknr = newb + pos;
317                         unmap_underlying_metadata(bh->b_bdev,
318                                                   bh->b_blocknr);
319                         mark_buffer_dirty(bh);
320                         ++j;
321                         bh = bh->b_this_page;
322                 } while (bh != head);
323
324                 if (likely(cur_index != index))
325                         ufs_put_locked_page(page);
326         }
327         UFSD("EXIT\n");
328 }
329
330 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
331                             int sync)
332 {
333         struct buffer_head *bh;
334         sector_t end = beg + n;
335
336         for (; beg < end; ++beg) {
337                 bh = sb_getblk(inode->i_sb, beg);
338                 lock_buffer(bh);
339                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
340                 set_buffer_uptodate(bh);
341                 mark_buffer_dirty(bh);
342                 unlock_buffer(bh);
343                 if (IS_SYNC(inode) || sync)
344                         sync_dirty_buffer(bh);
345                 brelse(bh);
346         }
347 }
348
349 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
350                            u64 goal, unsigned count, int *err,
351                            struct page *locked_page)
352 {
353         struct super_block * sb;
354         struct ufs_sb_private_info * uspi;
355         struct ufs_super_block_first * usb1;
356         unsigned cgno, oldcount, newcount;
357         u64 tmp, request, result;
358         
359         UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
360              inode->i_ino, (unsigned long long)fragment,
361              (unsigned long long)goal, count);
362         
363         sb = inode->i_sb;
364         uspi = UFS_SB(sb)->s_uspi;
365         usb1 = ubh_get_usb_first(uspi);
366         *err = -ENOSPC;
367
368         lock_super (sb);
369         tmp = ufs_data_ptr_to_cpu(sb, p);
370
371         if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
372                 ufs_warning(sb, "ufs_new_fragments", "internal warning"
373                             " fragment %llu, count %u",
374                             (unsigned long long)fragment, count);
375                 count = uspi->s_fpb - ufs_fragnum(fragment); 
376         }
377         oldcount = ufs_fragnum (fragment);
378         newcount = oldcount + count;
379
380         /*
381          * Somebody else has just allocated our fragments
382          */
383         if (oldcount) {
384                 if (!tmp) {
385                         ufs_error(sb, "ufs_new_fragments", "internal error, "
386                                   "fragment %llu, tmp %llu\n",
387                                   (unsigned long long)fragment,
388                                   (unsigned long long)tmp);
389                         unlock_super(sb);
390                         return INVBLOCK;
391                 }
392                 if (fragment < UFS_I(inode)->i_lastfrag) {
393                         UFSD("EXIT (ALREADY ALLOCATED)\n");
394                         unlock_super (sb);
395                         return 0;
396                 }
397         }
398         else {
399                 if (tmp) {
400                         UFSD("EXIT (ALREADY ALLOCATED)\n");
401                         unlock_super(sb);
402                         return 0;
403                 }
404         }
405
406         /*
407          * There is not enough space for user on the device
408          */
409         if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
410                 unlock_super (sb);
411                 UFSD("EXIT (FAILED)\n");
412                 return 0;
413         }
414
415         if (goal >= uspi->s_size) 
416                 goal = 0;
417         if (goal == 0) 
418                 cgno = ufs_inotocg (inode->i_ino);
419         else
420                 cgno = ufs_dtog(uspi, goal);
421          
422         /*
423          * allocate new fragment
424          */
425         if (oldcount == 0) {
426                 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
427                 if (result) {
428                         ufs_cpu_to_data_ptr(sb, p, result);
429                         *err = 0;
430                         UFS_I(inode)->i_lastfrag =
431                                 max_t(u32, UFS_I(inode)->i_lastfrag,
432                                       fragment + count);
433                         ufs_clear_frags(inode, result + oldcount,
434                                         newcount - oldcount, locked_page != NULL);
435                 }
436                 unlock_super(sb);
437                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
438                 return result;
439         }
440
441         /*
442          * resize block
443          */
444         result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
445         if (result) {
446                 *err = 0;
447                 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
448                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
449                                 locked_page != NULL);
450                 unlock_super(sb);
451                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
452                 return result;
453         }
454
455         /*
456          * allocate new block and move data
457          */
458         switch (fs32_to_cpu(sb, usb1->fs_optim)) {
459             case UFS_OPTSPACE:
460                 request = newcount;
461                 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
462                     > uspi->s_dsize * uspi->s_minfree / (2 * 100))
463                         break;
464                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
465                 break;
466             default:
467                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
468         
469             case UFS_OPTTIME:
470                 request = uspi->s_fpb;
471                 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
472                     (uspi->s_minfree - 2) / 100)
473                         break;
474                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
475                 break;
476         }
477         result = ufs_alloc_fragments (inode, cgno, goal, request, err);
478         if (result) {
479                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
480                                 locked_page != NULL);
481                 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
482                                    uspi->s_sbbase + tmp,
483                                    uspi->s_sbbase + result, locked_page);
484                 ufs_cpu_to_data_ptr(sb, p, result);
485                 *err = 0;
486                 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
487                 unlock_super(sb);
488                 if (newcount < request)
489                         ufs_free_fragments (inode, result + newcount, request - newcount);
490                 ufs_free_fragments (inode, tmp, oldcount);
491                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
492                 return result;
493         }
494
495         unlock_super(sb);
496         UFSD("EXIT (FAILED)\n");
497         return 0;
498 }               
499
500 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
501                              unsigned oldcount, unsigned newcount, int *err)
502 {
503         struct super_block * sb;
504         struct ufs_sb_private_info * uspi;
505         struct ufs_super_block_first * usb1;
506         struct ufs_cg_private_info * ucpi;
507         struct ufs_cylinder_group * ucg;
508         unsigned cgno, fragno, fragoff, count, fragsize, i;
509         
510         UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
511              (unsigned long long)fragment, oldcount, newcount);
512         
513         sb = inode->i_sb;
514         uspi = UFS_SB(sb)->s_uspi;
515         usb1 = ubh_get_usb_first (uspi);
516         count = newcount - oldcount;
517         
518         cgno = ufs_dtog(uspi, fragment);
519         if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
520                 return 0;
521         if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
522                 return 0;
523         ucpi = ufs_load_cylinder (sb, cgno);
524         if (!ucpi)
525                 return 0;
526         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
527         if (!ufs_cg_chkmagic(sb, ucg)) {
528                 ufs_panic (sb, "ufs_add_fragments",
529                         "internal error, bad magic number on cg %u", cgno);
530                 return 0;
531         }
532
533         fragno = ufs_dtogd(uspi, fragment);
534         fragoff = ufs_fragnum (fragno);
535         for (i = oldcount; i < newcount; i++)
536                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
537                         return 0;
538         /*
539          * Block can be extended
540          */
541         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
542         for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
543                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
544                         break;
545         fragsize = i - oldcount;
546         if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
547                 ufs_panic (sb, "ufs_add_fragments",
548                         "internal error or corrupted bitmap on cg %u", cgno);
549         fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
550         if (fragsize != count)
551                 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
552         for (i = oldcount; i < newcount; i++)
553                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
554
555         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
556         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
557         uspi->cs_total.cs_nffree -= count;
558         
559         ubh_mark_buffer_dirty (USPI_UBH(uspi));
560         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
561         if (sb->s_flags & MS_SYNCHRONOUS) {
562                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
563                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
564         }
565         sb->s_dirt = 1;
566
567         UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
568         
569         return fragment;
570 }
571
572 #define UFS_TEST_FREE_SPACE_CG \
573         ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
574         if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
575                 goto cg_found; \
576         for (k = count; k < uspi->s_fpb; k++) \
577                 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
578                         goto cg_found; 
579
580 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
581                                u64 goal, unsigned count, int *err)
582 {
583         struct super_block * sb;
584         struct ufs_sb_private_info * uspi;
585         struct ufs_super_block_first * usb1;
586         struct ufs_cg_private_info * ucpi;
587         struct ufs_cylinder_group * ucg;
588         unsigned oldcg, i, j, k, allocsize;
589         u64 result;
590         
591         UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
592              inode->i_ino, cgno, (unsigned long long)goal, count);
593
594         sb = inode->i_sb;
595         uspi = UFS_SB(sb)->s_uspi;
596         usb1 = ubh_get_usb_first(uspi);
597         oldcg = cgno;
598         
599         /*
600          * 1. searching on preferred cylinder group
601          */
602         UFS_TEST_FREE_SPACE_CG
603
604         /*
605          * 2. quadratic rehash
606          */
607         for (j = 1; j < uspi->s_ncg; j *= 2) {
608                 cgno += j;
609                 if (cgno >= uspi->s_ncg) 
610                         cgno -= uspi->s_ncg;
611                 UFS_TEST_FREE_SPACE_CG
612         }
613
614         /*
615          * 3. brute force search
616          * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
617          */
618         cgno = (oldcg + 1) % uspi->s_ncg;
619         for (j = 2; j < uspi->s_ncg; j++) {
620                 cgno++;
621                 if (cgno >= uspi->s_ncg)
622                         cgno = 0;
623                 UFS_TEST_FREE_SPACE_CG
624         }
625         
626         UFSD("EXIT (FAILED)\n");
627         return 0;
628
629 cg_found:
630         ucpi = ufs_load_cylinder (sb, cgno);
631         if (!ucpi)
632                 return 0;
633         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
634         if (!ufs_cg_chkmagic(sb, ucg)) 
635                 ufs_panic (sb, "ufs_alloc_fragments",
636                         "internal error, bad magic number on cg %u", cgno);
637         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
638
639         if (count == uspi->s_fpb) {
640                 result = ufs_alloccg_block (inode, ucpi, goal, err);
641                 if (result == INVBLOCK)
642                         return 0;
643                 goto succed;
644         }
645
646         for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
647                 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
648                         break;
649         
650         if (allocsize == uspi->s_fpb) {
651                 result = ufs_alloccg_block (inode, ucpi, goal, err);
652                 if (result == INVBLOCK)
653                         return 0;
654                 goal = ufs_dtogd(uspi, result);
655                 for (i = count; i < uspi->s_fpb; i++)
656                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
657                 i = uspi->s_fpb - count;
658
659                 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
660                 uspi->cs_total.cs_nffree += i;
661                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
662                 fs32_add(sb, &ucg->cg_frsum[i], 1);
663                 goto succed;
664         }
665
666         result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
667         if (result == INVBLOCK)
668                 return 0;
669         for (i = 0; i < count; i++)
670                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
671         
672         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
673         uspi->cs_total.cs_nffree -= count;
674         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
675         fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
676
677         if (count != allocsize)
678                 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
679
680 succed:
681         ubh_mark_buffer_dirty (USPI_UBH(uspi));
682         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
683         if (sb->s_flags & MS_SYNCHRONOUS) {
684                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
685                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
686         }
687         sb->s_dirt = 1;
688
689         result += cgno * uspi->s_fpg;
690         UFSD("EXIT3, result %llu\n", (unsigned long long)result);
691         return result;
692 }
693
694 static u64 ufs_alloccg_block(struct inode *inode,
695                              struct ufs_cg_private_info *ucpi,
696                              u64 goal, int *err)
697 {
698         struct super_block * sb;
699         struct ufs_sb_private_info * uspi;
700         struct ufs_super_block_first * usb1;
701         struct ufs_cylinder_group * ucg;
702         u64 result, blkno;
703
704         UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
705
706         sb = inode->i_sb;
707         uspi = UFS_SB(sb)->s_uspi;
708         usb1 = ubh_get_usb_first(uspi);
709         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
710
711         if (goal == 0) {
712                 goal = ucpi->c_rotor;
713                 goto norot;
714         }
715         goal = ufs_blknum (goal);
716         goal = ufs_dtogd(uspi, goal);
717         
718         /*
719          * If the requested block is available, use it.
720          */
721         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
722                 result = goal;
723                 goto gotit;
724         }
725         
726 norot:  
727         result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
728         if (result == INVBLOCK)
729                 return INVBLOCK;
730         ucpi->c_rotor = result;
731 gotit:
732         blkno = ufs_fragstoblks(result);
733         ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
734         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
735                 ufs_clusteracct (sb, ucpi, blkno, -1);
736
737         fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
738         uspi->cs_total.cs_nbfree--;
739         fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
740
741         if (uspi->fs_magic != UFS2_MAGIC) {
742                 unsigned cylno = ufs_cbtocylno((unsigned)result);
743
744                 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
745                                           ufs_cbtorpos((unsigned)result)), 1);
746                 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
747         }
748         
749         UFSD("EXIT, result %llu\n", (unsigned long long)result);
750
751         return result;
752 }
753
754 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
755                           struct ufs_buffer_head *ubh,
756                           unsigned begin, unsigned size,
757                           unsigned char *table, unsigned char mask)
758 {
759         unsigned rest, offset;
760         unsigned char *cp;
761         
762
763         offset = begin & ~uspi->s_fmask;
764         begin >>= uspi->s_fshift;
765         for (;;) {
766                 if ((offset + size) < uspi->s_fsize)
767                         rest = size;
768                 else
769                         rest = uspi->s_fsize - offset;
770                 size -= rest;
771                 cp = ubh->bh[begin]->b_data + offset;
772                 while ((table[*cp++] & mask) == 0 && --rest)
773                         ;
774                 if (rest || !size)
775                         break;
776                 begin++;
777                 offset = 0;
778         }
779         return (size + rest);
780 }
781
782 /*
783  * Find a block of the specified size in the specified cylinder group.
784  * @sp: pointer to super block
785  * @ucpi: pointer to cylinder group info
786  * @goal: near which block we want find new one
787  * @count: specified size
788  */
789 static u64 ufs_bitmap_search(struct super_block *sb,
790                              struct ufs_cg_private_info *ucpi,
791                              u64 goal, unsigned count)
792 {
793         /*
794          * Bit patterns for identifying fragments in the block map
795          * used as ((map & mask_arr) == want_arr)
796          */
797         static const int mask_arr[9] = {
798                 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
799         };
800         static const int want_arr[9] = {
801                 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
802         };
803         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
804         struct ufs_super_block_first *usb1;
805         struct ufs_cylinder_group *ucg;
806         unsigned start, length, loc;
807         unsigned pos, want, blockmap, mask, end;
808         u64 result;
809
810         UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
811              (unsigned long long)goal, count);
812
813         usb1 = ubh_get_usb_first (uspi);
814         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
815
816         if (goal)
817                 start = ufs_dtogd(uspi, goal) >> 3;
818         else
819                 start = ucpi->c_frotor >> 3;
820                 
821         length = ((uspi->s_fpg + 7) >> 3) - start;
822         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
823                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
824                 1 << (count - 1 + (uspi->s_fpb & 7))); 
825         if (loc == 0) {
826                 length = start + 1;
827                 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
828                                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
829                                 ufs_fragtable_other,
830                                 1 << (count - 1 + (uspi->s_fpb & 7)));
831                 if (loc == 0) {
832                         ufs_error(sb, "ufs_bitmap_search",
833                                   "bitmap corrupted on cg %u, start %u,"
834                                   " length %u, count %u, freeoff %u\n",
835                                   ucpi->c_cgx, start, length, count,
836                                   ucpi->c_freeoff);
837                         return INVBLOCK;
838                 }
839                 start = 0;
840         }
841         result = (start + length - loc) << 3;
842         ucpi->c_frotor = result;
843
844         /*
845          * found the byte in the map
846          */
847
848         for (end = result + 8; result < end; result += uspi->s_fpb) {
849                 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
850                 blockmap <<= 1;
851                 mask = mask_arr[count];
852                 want = want_arr[count];
853                 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
854                         if ((blockmap & mask) == want) {
855                                 UFSD("EXIT, result %llu\n",
856                                      (unsigned long long)result);
857                                 return result + pos;
858                         }
859                         mask <<= 1;
860                         want <<= 1;
861                 }
862         }
863
864         ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
865                   ucpi->c_cgx);
866         UFSD("EXIT (FAILED)\n");
867         return INVBLOCK;
868 }
869
870 static void ufs_clusteracct(struct super_block * sb,
871         struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
872 {
873         struct ufs_sb_private_info * uspi;
874         int i, start, end, forw, back;
875         
876         uspi = UFS_SB(sb)->s_uspi;
877         if (uspi->s_contigsumsize <= 0)
878                 return;
879
880         if (cnt > 0)
881                 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
882         else
883                 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
884
885         /*
886          * Find the size of the cluster going forward.
887          */
888         start = blkno + 1;
889         end = start + uspi->s_contigsumsize;
890         if ( end >= ucpi->c_nclusterblks)
891                 end = ucpi->c_nclusterblks;
892         i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
893         if (i > end)
894                 i = end;
895         forw = i - start;
896         
897         /*
898          * Find the size of the cluster going backward.
899          */
900         start = blkno - 1;
901         end = start - uspi->s_contigsumsize;
902         if (end < 0 ) 
903                 end = -1;
904         i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
905         if ( i < end) 
906                 i = end;
907         back = start - i;
908         
909         /*
910          * Account for old cluster and the possibly new forward and
911          * back clusters.
912          */
913         i = back + forw + 1;
914         if (i > uspi->s_contigsumsize)
915                 i = uspi->s_contigsumsize;
916         fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
917         if (back > 0)
918                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
919         if (forw > 0)
920                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
921 }
922
923
924 static unsigned char ufs_fragtable_8fpb[] = {
925         0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
926         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
927         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
928         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
929         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
930         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
931         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
932         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
933         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
934         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
935         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
936         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
937         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
938         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
939         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
940         0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
941 };
942
943 static unsigned char ufs_fragtable_other[] = {
944         0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
945         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
948         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
949         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950         0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
951         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
952         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
953         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
954         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
956         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
957         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
958         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
959         0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
960 };