[PATCH] ufs: prepare write + change blocks on the fly
[pandora-kernel.git] / fs / ufs / balloc.c
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10
11 #include <linux/fs.h>
12 #include <linux/ufs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/quotaops.h>
17 #include <linux/buffer_head.h>
18 #include <linux/capability.h>
19 #include <linux/bitops.h>
20 #include <asm/byteorder.h>
21
22 #include "swab.h"
23 #include "util.h"
24
25 #define INVBLOCK ((u64)-1L)
26
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33
34 /*
35  * Free 'count' fragments from fragment number 'fragment'
36  */
37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38 {
39         struct super_block * sb;
40         struct ufs_sb_private_info * uspi;
41         struct ufs_super_block_first * usb1;
42         struct ufs_cg_private_info * ucpi;
43         struct ufs_cylinder_group * ucg;
44         unsigned cgno, bit, end_bit, bbase, blkmap, i;
45         u64 blkno;
46         
47         sb = inode->i_sb;
48         uspi = UFS_SB(sb)->s_uspi;
49         usb1 = ubh_get_usb_first(uspi);
50         
51         UFSD("ENTER, fragment %llu, count %u\n",
52              (unsigned long long)fragment, count);
53         
54         if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55                 ufs_error (sb, "ufs_free_fragments", "internal error");
56         
57         lock_super(sb);
58         
59         cgno = ufs_dtog(uspi, fragment);
60         bit = ufs_dtogd(uspi, fragment);
61         if (cgno >= uspi->s_ncg) {
62                 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63                 goto failed;
64         }
65                 
66         ucpi = ufs_load_cylinder (sb, cgno);
67         if (!ucpi) 
68                 goto failed;
69         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70         if (!ufs_cg_chkmagic(sb, ucg)) {
71                 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72                 goto failed;
73         }
74
75         end_bit = bit + count;
76         bbase = ufs_blknum (bit);
77         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78         ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79         for (i = bit; i < end_bit; i++) {
80                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82                 else 
83                         ufs_error (sb, "ufs_free_fragments",
84                                    "bit already cleared for fragment %u", i);
85         }
86         
87         DQUOT_FREE_BLOCK (inode, count);
88
89         
90         fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91         uspi->cs_total.cs_nffree += count;
92         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
94         ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
95
96         /*
97          * Trying to reassemble free fragments into block
98          */
99         blkno = ufs_fragstoblks (bbase);
100         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
101                 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102                 uspi->cs_total.cs_nffree -= uspi->s_fpb;
103                 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105                         ufs_clusteracct (sb, ucpi, blkno, 1);
106                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107                 uspi->cs_total.cs_nbfree++;
108                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109                 if (uspi->fs_magic != UFS2_MAGIC) {
110                         unsigned cylno = ufs_cbtocylno (bbase);
111
112                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
113                                                   ufs_cbtorpos(bbase)), 1);
114                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
115                 }
116         }
117         
118         ubh_mark_buffer_dirty (USPI_UBH(uspi));
119         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
120         if (sb->s_flags & MS_SYNCHRONOUS) {
121                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
122                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123         }
124         sb->s_dirt = 1;
125         
126         unlock_super (sb);
127         UFSD("EXIT\n");
128         return;
129
130 failed:
131         unlock_super (sb);
132         UFSD("EXIT (FAILED)\n");
133         return;
134 }
135
136 /*
137  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
138  */
139 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
140 {
141         struct super_block * sb;
142         struct ufs_sb_private_info * uspi;
143         struct ufs_super_block_first * usb1;
144         struct ufs_cg_private_info * ucpi;
145         struct ufs_cylinder_group * ucg;
146         unsigned overflow, cgno, bit, end_bit, i;
147         u64 blkno;
148         
149         sb = inode->i_sb;
150         uspi = UFS_SB(sb)->s_uspi;
151         usb1 = ubh_get_usb_first(uspi);
152
153         UFSD("ENTER, fragment %llu, count %u\n",
154              (unsigned long long)fragment, count);
155         
156         if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
157                 ufs_error (sb, "ufs_free_blocks", "internal error, "
158                            "fragment %llu, count %u\n",
159                            (unsigned long long)fragment, count);
160                 goto failed;
161         }
162
163         lock_super(sb);
164         
165 do_more:
166         overflow = 0;
167         cgno = ufs_dtog(uspi, fragment);
168         bit = ufs_dtogd(uspi, fragment);
169         if (cgno >= uspi->s_ncg) {
170                 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
171                 goto failed_unlock;
172         }
173         end_bit = bit + count;
174         if (end_bit > uspi->s_fpg) {
175                 overflow = bit + count - uspi->s_fpg;
176                 count -= overflow;
177                 end_bit -= overflow;
178         }
179
180         ucpi = ufs_load_cylinder (sb, cgno);
181         if (!ucpi) 
182                 goto failed_unlock;
183         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
184         if (!ufs_cg_chkmagic(sb, ucg)) {
185                 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
186                 goto failed_unlock;
187         }
188
189         for (i = bit; i < end_bit; i += uspi->s_fpb) {
190                 blkno = ufs_fragstoblks(i);
191                 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
192                         ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
193                 }
194                 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
195                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
196                         ufs_clusteracct (sb, ucpi, blkno, 1);
197                 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
198
199                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
200                 uspi->cs_total.cs_nbfree++;
201                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
202
203                 if (uspi->fs_magic != UFS2_MAGIC) {
204                         unsigned cylno = ufs_cbtocylno(i);
205
206                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
207                                                   ufs_cbtorpos(i)), 1);
208                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
209                 }
210         }
211
212         ubh_mark_buffer_dirty (USPI_UBH(uspi));
213         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
214         if (sb->s_flags & MS_SYNCHRONOUS) {
215                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
216                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
217         }
218
219         if (overflow) {
220                 fragment += count;
221                 count = overflow;
222                 goto do_more;
223         }
224
225         sb->s_dirt = 1;
226         unlock_super (sb);
227         UFSD("EXIT\n");
228         return;
229
230 failed_unlock:
231         unlock_super (sb);
232 failed:
233         UFSD("EXIT (FAILED)\n");
234         return;
235 }
236
237 /*
238  * Modify inode page cache in such way:
239  * have - blocks with b_blocknr equal to oldb...oldb+count-1
240  * get - blocks with b_blocknr equal to newb...newb+count-1
241  * also we suppose that oldb...oldb+count-1 blocks
242  * situated at the end of file.
243  *
244  * We can come here from ufs_writepage or ufs_prepare_write,
245  * locked_page is argument of these functions, so we already lock it.
246  */
247 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
248                                unsigned int count, sector_t oldb,
249                                sector_t newb, struct page *locked_page)
250 {
251         const unsigned blks_per_page =
252                 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
253         const unsigned mask = blks_per_page - 1;
254         struct address_space * const mapping = inode->i_mapping;
255         pgoff_t index, cur_index, last_index;
256         unsigned pos, j, lblock;
257         sector_t end, i;
258         struct page *page;
259         struct buffer_head *head, *bh;
260
261         UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
262               inode->i_ino, count,
263              (unsigned long long)oldb, (unsigned long long)newb);
264
265         BUG_ON(!locked_page);
266         BUG_ON(!PageLocked(locked_page));
267
268         cur_index = locked_page->index;
269         end = count + beg;
270         last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
271         for (i = beg; i < end; i = (i | mask) + 1) {
272                 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
273
274                 if (likely(cur_index != index)) {
275                         page = ufs_get_locked_page(mapping, index);
276                         if (!page)/* it was truncated */
277                                 continue;
278                         if (IS_ERR(page)) {/* or EIO */
279                                 ufs_error(inode->i_sb, __FUNCTION__,
280                                           "read of page %llu failed\n",
281                                           (unsigned long long)index);
282                                 continue;
283                         }
284                 } else
285                         page = locked_page;
286
287                 head = page_buffers(page);
288                 bh = head;
289                 pos = i & mask;
290                 for (j = 0; j < pos; ++j)
291                         bh = bh->b_this_page;
292
293
294                 if (unlikely(index == last_index))
295                         lblock = end & mask;
296                 else
297                         lblock = blks_per_page;
298
299                 do {
300                         if (j >= lblock)
301                                 break;
302                         pos = (i - beg) + j;
303
304                         if (!buffer_mapped(bh))
305                                         map_bh(bh, inode->i_sb, oldb + pos);
306                         if (!buffer_uptodate(bh)) {
307                                 ll_rw_block(READ, 1, &bh);
308                                 wait_on_buffer(bh);
309                                 if (!buffer_uptodate(bh)) {
310                                         ufs_error(inode->i_sb, __FUNCTION__,
311                                                   "read of block failed\n");
312                                         break;
313                                 }
314                         }
315
316                         UFSD(" change from %llu to %llu, pos %u\n",
317                              (unsigned long long)pos + oldb,
318                              (unsigned long long)pos + newb, pos);
319
320                         bh->b_blocknr = newb + pos;
321                         unmap_underlying_metadata(bh->b_bdev,
322                                                   bh->b_blocknr);
323                         mark_buffer_dirty(bh);
324                         ++j;
325                         bh = bh->b_this_page;
326                 } while (bh != head);
327
328                 if (likely(cur_index != index))
329                         ufs_put_locked_page(page);
330         }
331         UFSD("EXIT\n");
332 }
333
334 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
335                             int sync)
336 {
337         struct buffer_head *bh;
338         sector_t end = beg + n;
339
340         for (; beg < end; ++beg) {
341                 bh = sb_getblk(inode->i_sb, beg);
342                 lock_buffer(bh);
343                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
344                 set_buffer_uptodate(bh);
345                 mark_buffer_dirty(bh);
346                 unlock_buffer(bh);
347                 if (IS_SYNC(inode) || sync)
348                         sync_dirty_buffer(bh);
349                 brelse(bh);
350         }
351 }
352
353 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
354                            u64 goal, unsigned count, int *err,
355                            struct page *locked_page)
356 {
357         struct super_block * sb;
358         struct ufs_sb_private_info * uspi;
359         struct ufs_super_block_first * usb1;
360         unsigned cgno, oldcount, newcount;
361         u64 tmp, request, result;
362         
363         UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
364              inode->i_ino, (unsigned long long)fragment,
365              (unsigned long long)goal, count);
366         
367         sb = inode->i_sb;
368         uspi = UFS_SB(sb)->s_uspi;
369         usb1 = ubh_get_usb_first(uspi);
370         *err = -ENOSPC;
371
372         lock_super (sb);
373         tmp = ufs_data_ptr_to_cpu(sb, p);
374
375         if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
376                 ufs_warning(sb, "ufs_new_fragments", "internal warning"
377                             " fragment %llu, count %u",
378                             (unsigned long long)fragment, count);
379                 count = uspi->s_fpb - ufs_fragnum(fragment); 
380         }
381         oldcount = ufs_fragnum (fragment);
382         newcount = oldcount + count;
383
384         /*
385          * Somebody else has just allocated our fragments
386          */
387         if (oldcount) {
388                 if (!tmp) {
389                         ufs_error(sb, "ufs_new_fragments", "internal error, "
390                                   "fragment %llu, tmp %llu\n",
391                                   (unsigned long long)fragment,
392                                   (unsigned long long)tmp);
393                         unlock_super(sb);
394                         return INVBLOCK;
395                 }
396                 if (fragment < UFS_I(inode)->i_lastfrag) {
397                         UFSD("EXIT (ALREADY ALLOCATED)\n");
398                         unlock_super (sb);
399                         return 0;
400                 }
401         }
402         else {
403                 if (tmp) {
404                         UFSD("EXIT (ALREADY ALLOCATED)\n");
405                         unlock_super(sb);
406                         return 0;
407                 }
408         }
409
410         /*
411          * There is not enough space for user on the device
412          */
413         if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
414                 unlock_super (sb);
415                 UFSD("EXIT (FAILED)\n");
416                 return 0;
417         }
418
419         if (goal >= uspi->s_size) 
420                 goal = 0;
421         if (goal == 0) 
422                 cgno = ufs_inotocg (inode->i_ino);
423         else
424                 cgno = ufs_dtog(uspi, goal);
425          
426         /*
427          * allocate new fragment
428          */
429         if (oldcount == 0) {
430                 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
431                 if (result) {
432                         ufs_cpu_to_data_ptr(sb, p, result);
433                         *err = 0;
434                         UFS_I(inode)->i_lastfrag =
435                                 max_t(u32, UFS_I(inode)->i_lastfrag,
436                                       fragment + count);
437                         ufs_clear_frags(inode, result + oldcount,
438                                         newcount - oldcount, locked_page != NULL);
439                 }
440                 unlock_super(sb);
441                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
442                 return result;
443         }
444
445         /*
446          * resize block
447          */
448         result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
449         if (result) {
450                 *err = 0;
451                 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
452                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
453                                 locked_page != NULL);
454                 unlock_super(sb);
455                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
456                 return result;
457         }
458
459         /*
460          * allocate new block and move data
461          */
462         switch (fs32_to_cpu(sb, usb1->fs_optim)) {
463             case UFS_OPTSPACE:
464                 request = newcount;
465                 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
466                     > uspi->s_dsize * uspi->s_minfree / (2 * 100))
467                         break;
468                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
469                 break;
470             default:
471                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
472         
473             case UFS_OPTTIME:
474                 request = uspi->s_fpb;
475                 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
476                     (uspi->s_minfree - 2) / 100)
477                         break;
478                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
479                 break;
480         }
481         result = ufs_alloc_fragments (inode, cgno, goal, request, err);
482         if (result) {
483                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
484                                 locked_page != NULL);
485                 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
486                                    result, locked_page);
487                 ufs_cpu_to_data_ptr(sb, p, result);
488                 *err = 0;
489                 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
490                 unlock_super(sb);
491                 if (newcount < request)
492                         ufs_free_fragments (inode, result + newcount, request - newcount);
493                 ufs_free_fragments (inode, tmp, oldcount);
494                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
495                 return result;
496         }
497
498         unlock_super(sb);
499         UFSD("EXIT (FAILED)\n");
500         return 0;
501 }               
502
503 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
504                              unsigned oldcount, unsigned newcount, int *err)
505 {
506         struct super_block * sb;
507         struct ufs_sb_private_info * uspi;
508         struct ufs_super_block_first * usb1;
509         struct ufs_cg_private_info * ucpi;
510         struct ufs_cylinder_group * ucg;
511         unsigned cgno, fragno, fragoff, count, fragsize, i;
512         
513         UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
514              (unsigned long long)fragment, oldcount, newcount);
515         
516         sb = inode->i_sb;
517         uspi = UFS_SB(sb)->s_uspi;
518         usb1 = ubh_get_usb_first (uspi);
519         count = newcount - oldcount;
520         
521         cgno = ufs_dtog(uspi, fragment);
522         if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
523                 return 0;
524         if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
525                 return 0;
526         ucpi = ufs_load_cylinder (sb, cgno);
527         if (!ucpi)
528                 return 0;
529         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
530         if (!ufs_cg_chkmagic(sb, ucg)) {
531                 ufs_panic (sb, "ufs_add_fragments",
532                         "internal error, bad magic number on cg %u", cgno);
533                 return 0;
534         }
535
536         fragno = ufs_dtogd(uspi, fragment);
537         fragoff = ufs_fragnum (fragno);
538         for (i = oldcount; i < newcount; i++)
539                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
540                         return 0;
541         /*
542          * Block can be extended
543          */
544         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
545         for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
546                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
547                         break;
548         fragsize = i - oldcount;
549         if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
550                 ufs_panic (sb, "ufs_add_fragments",
551                         "internal error or corrupted bitmap on cg %u", cgno);
552         fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
553         if (fragsize != count)
554                 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
555         for (i = oldcount; i < newcount; i++)
556                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
557         if(DQUOT_ALLOC_BLOCK(inode, count)) {
558                 *err = -EDQUOT;
559                 return 0;
560         }
561
562         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
563         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
564         uspi->cs_total.cs_nffree -= count;
565         
566         ubh_mark_buffer_dirty (USPI_UBH(uspi));
567         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
568         if (sb->s_flags & MS_SYNCHRONOUS) {
569                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
570                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
571         }
572         sb->s_dirt = 1;
573
574         UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
575         
576         return fragment;
577 }
578
579 #define UFS_TEST_FREE_SPACE_CG \
580         ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
581         if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
582                 goto cg_found; \
583         for (k = count; k < uspi->s_fpb; k++) \
584                 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
585                         goto cg_found; 
586
587 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
588                                u64 goal, unsigned count, int *err)
589 {
590         struct super_block * sb;
591         struct ufs_sb_private_info * uspi;
592         struct ufs_super_block_first * usb1;
593         struct ufs_cg_private_info * ucpi;
594         struct ufs_cylinder_group * ucg;
595         unsigned oldcg, i, j, k, allocsize;
596         u64 result;
597         
598         UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
599              inode->i_ino, cgno, (unsigned long long)goal, count);
600
601         sb = inode->i_sb;
602         uspi = UFS_SB(sb)->s_uspi;
603         usb1 = ubh_get_usb_first(uspi);
604         oldcg = cgno;
605         
606         /*
607          * 1. searching on preferred cylinder group
608          */
609         UFS_TEST_FREE_SPACE_CG
610
611         /*
612          * 2. quadratic rehash
613          */
614         for (j = 1; j < uspi->s_ncg; j *= 2) {
615                 cgno += j;
616                 if (cgno >= uspi->s_ncg) 
617                         cgno -= uspi->s_ncg;
618                 UFS_TEST_FREE_SPACE_CG
619         }
620
621         /*
622          * 3. brute force search
623          * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
624          */
625         cgno = (oldcg + 1) % uspi->s_ncg;
626         for (j = 2; j < uspi->s_ncg; j++) {
627                 cgno++;
628                 if (cgno >= uspi->s_ncg)
629                         cgno = 0;
630                 UFS_TEST_FREE_SPACE_CG
631         }
632         
633         UFSD("EXIT (FAILED)\n");
634         return 0;
635
636 cg_found:
637         ucpi = ufs_load_cylinder (sb, cgno);
638         if (!ucpi)
639                 return 0;
640         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
641         if (!ufs_cg_chkmagic(sb, ucg)) 
642                 ufs_panic (sb, "ufs_alloc_fragments",
643                         "internal error, bad magic number on cg %u", cgno);
644         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
645
646         if (count == uspi->s_fpb) {
647                 result = ufs_alloccg_block (inode, ucpi, goal, err);
648                 if (result == INVBLOCK)
649                         return 0;
650                 goto succed;
651         }
652
653         for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
654                 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
655                         break;
656         
657         if (allocsize == uspi->s_fpb) {
658                 result = ufs_alloccg_block (inode, ucpi, goal, err);
659                 if (result == INVBLOCK)
660                         return 0;
661                 goal = ufs_dtogd(uspi, result);
662                 for (i = count; i < uspi->s_fpb; i++)
663                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
664                 i = uspi->s_fpb - count;
665                 DQUOT_FREE_BLOCK(inode, i);
666
667                 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
668                 uspi->cs_total.cs_nffree += i;
669                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
670                 fs32_add(sb, &ucg->cg_frsum[i], 1);
671                 goto succed;
672         }
673
674         result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
675         if (result == INVBLOCK)
676                 return 0;
677         if(DQUOT_ALLOC_BLOCK(inode, count)) {
678                 *err = -EDQUOT;
679                 return 0;
680         }
681         for (i = 0; i < count; i++)
682                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
683         
684         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
685         uspi->cs_total.cs_nffree -= count;
686         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
687         fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
688
689         if (count != allocsize)
690                 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
691
692 succed:
693         ubh_mark_buffer_dirty (USPI_UBH(uspi));
694         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
695         if (sb->s_flags & MS_SYNCHRONOUS) {
696                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
697                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
698         }
699         sb->s_dirt = 1;
700
701         result += cgno * uspi->s_fpg;
702         UFSD("EXIT3, result %llu\n", (unsigned long long)result);
703         return result;
704 }
705
706 static u64 ufs_alloccg_block(struct inode *inode,
707                              struct ufs_cg_private_info *ucpi,
708                              u64 goal, int *err)
709 {
710         struct super_block * sb;
711         struct ufs_sb_private_info * uspi;
712         struct ufs_super_block_first * usb1;
713         struct ufs_cylinder_group * ucg;
714         u64 result, blkno;
715
716         UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
717
718         sb = inode->i_sb;
719         uspi = UFS_SB(sb)->s_uspi;
720         usb1 = ubh_get_usb_first(uspi);
721         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
722
723         if (goal == 0) {
724                 goal = ucpi->c_rotor;
725                 goto norot;
726         }
727         goal = ufs_blknum (goal);
728         goal = ufs_dtogd(uspi, goal);
729         
730         /*
731          * If the requested block is available, use it.
732          */
733         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
734                 result = goal;
735                 goto gotit;
736         }
737         
738 norot:  
739         result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
740         if (result == INVBLOCK)
741                 return INVBLOCK;
742         ucpi->c_rotor = result;
743 gotit:
744         blkno = ufs_fragstoblks(result);
745         ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
746         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
747                 ufs_clusteracct (sb, ucpi, blkno, -1);
748         if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
749                 *err = -EDQUOT;
750                 return INVBLOCK;
751         }
752
753         fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
754         uspi->cs_total.cs_nbfree--;
755         fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
756
757         if (uspi->fs_magic != UFS2_MAGIC) {
758                 unsigned cylno = ufs_cbtocylno((unsigned)result);
759
760                 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
761                                           ufs_cbtorpos((unsigned)result)), 1);
762                 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
763         }
764         
765         UFSD("EXIT, result %llu\n", (unsigned long long)result);
766
767         return result;
768 }
769
770 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
771                           struct ufs_buffer_head *ubh,
772                           unsigned begin, unsigned size,
773                           unsigned char *table, unsigned char mask)
774 {
775         unsigned rest, offset;
776         unsigned char *cp;
777         
778
779         offset = begin & ~uspi->s_fmask;
780         begin >>= uspi->s_fshift;
781         for (;;) {
782                 if ((offset + size) < uspi->s_fsize)
783                         rest = size;
784                 else
785                         rest = uspi->s_fsize - offset;
786                 size -= rest;
787                 cp = ubh->bh[begin]->b_data + offset;
788                 while ((table[*cp++] & mask) == 0 && --rest)
789                         ;
790                 if (rest || !size)
791                         break;
792                 begin++;
793                 offset = 0;
794         }
795         return (size + rest);
796 }
797
798 /*
799  * Find a block of the specified size in the specified cylinder group.
800  * @sp: pointer to super block
801  * @ucpi: pointer to cylinder group info
802  * @goal: near which block we want find new one
803  * @count: specified size
804  */
805 static u64 ufs_bitmap_search(struct super_block *sb,
806                              struct ufs_cg_private_info *ucpi,
807                              u64 goal, unsigned count)
808 {
809         /*
810          * Bit patterns for identifying fragments in the block map
811          * used as ((map & mask_arr) == want_arr)
812          */
813         static const int mask_arr[9] = {
814                 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
815         };
816         static const int want_arr[9] = {
817                 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
818         };
819         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
820         struct ufs_super_block_first *usb1;
821         struct ufs_cylinder_group *ucg;
822         unsigned start, length, loc;
823         unsigned pos, want, blockmap, mask, end;
824         u64 result;
825
826         UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
827              (unsigned long long)goal, count);
828
829         usb1 = ubh_get_usb_first (uspi);
830         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
831
832         if (goal)
833                 start = ufs_dtogd(uspi, goal) >> 3;
834         else
835                 start = ucpi->c_frotor >> 3;
836                 
837         length = ((uspi->s_fpg + 7) >> 3) - start;
838         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
839                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
840                 1 << (count - 1 + (uspi->s_fpb & 7))); 
841         if (loc == 0) {
842                 length = start + 1;
843                 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
844                                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
845                                 ufs_fragtable_other,
846                                 1 << (count - 1 + (uspi->s_fpb & 7)));
847                 if (loc == 0) {
848                         ufs_error(sb, "ufs_bitmap_search",
849                                   "bitmap corrupted on cg %u, start %u,"
850                                   " length %u, count %u, freeoff %u\n",
851                                   ucpi->c_cgx, start, length, count,
852                                   ucpi->c_freeoff);
853                         return INVBLOCK;
854                 }
855                 start = 0;
856         }
857         result = (start + length - loc) << 3;
858         ucpi->c_frotor = result;
859
860         /*
861          * found the byte in the map
862          */
863
864         for (end = result + 8; result < end; result += uspi->s_fpb) {
865                 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
866                 blockmap <<= 1;
867                 mask = mask_arr[count];
868                 want = want_arr[count];
869                 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
870                         if ((blockmap & mask) == want) {
871                                 UFSD("EXIT, result %llu\n",
872                                      (unsigned long long)result);
873                                 return result + pos;
874                         }
875                         mask <<= 1;
876                         want <<= 1;
877                 }
878         }
879
880         ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
881                   ucpi->c_cgx);
882         UFSD("EXIT (FAILED)\n");
883         return INVBLOCK;
884 }
885
886 static void ufs_clusteracct(struct super_block * sb,
887         struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
888 {
889         struct ufs_sb_private_info * uspi;
890         int i, start, end, forw, back;
891         
892         uspi = UFS_SB(sb)->s_uspi;
893         if (uspi->s_contigsumsize <= 0)
894                 return;
895
896         if (cnt > 0)
897                 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
898         else
899                 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
900
901         /*
902          * Find the size of the cluster going forward.
903          */
904         start = blkno + 1;
905         end = start + uspi->s_contigsumsize;
906         if ( end >= ucpi->c_nclusterblks)
907                 end = ucpi->c_nclusterblks;
908         i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
909         if (i > end)
910                 i = end;
911         forw = i - start;
912         
913         /*
914          * Find the size of the cluster going backward.
915          */
916         start = blkno - 1;
917         end = start - uspi->s_contigsumsize;
918         if (end < 0 ) 
919                 end = -1;
920         i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
921         if ( i < end) 
922                 i = end;
923         back = start - i;
924         
925         /*
926          * Account for old cluster and the possibly new forward and
927          * back clusters.
928          */
929         i = back + forw + 1;
930         if (i > uspi->s_contigsumsize)
931                 i = uspi->s_contigsumsize;
932         fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
933         if (back > 0)
934                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
935         if (forw > 0)
936                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
937 }
938
939
940 static unsigned char ufs_fragtable_8fpb[] = {
941         0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
942         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
943         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
944         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
945         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
946         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
947         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
948         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
949         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
950         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
951         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
952         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
953         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
954         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
955         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
956         0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
957 };
958
959 static unsigned char ufs_fragtable_other[] = {
960         0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
961         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
962         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
963         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
964         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
965         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
966         0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
967         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
968         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
969         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
970         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
971         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
972         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
973         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
974         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
975         0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
976 };