2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dir2_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_mount.h"
37 #include "xfs_itable.h"
38 #include "xfs_dir2_data.h"
39 #include "xfs_dir2_leaf.h"
40 #include "xfs_dir2_block.h"
41 #include "xfs_inode_item.h"
42 #include "xfs_extfree_item.h"
43 #include "xfs_alloc.h"
45 #include "xfs_rtalloc.h"
46 #include "xfs_error.h"
47 #include "xfs_attr_leaf.h"
49 #include "xfs_quota.h"
50 #include "xfs_trans_space.h"
51 #include "xfs_buf_item.h"
52 #include "xfs_filestream.h"
53 #include "xfs_vnodeops.h"
54 #include "xfs_trace.h"
59 xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
62 kmem_zone_t *xfs_bmap_free_item_zone;
65 * Prototypes for internal bmap routines.
70 * Called from xfs_bmap_add_attrfork to handle extents format files.
72 STATIC int /* error */
73 xfs_bmap_add_attrfork_extents(
74 xfs_trans_t *tp, /* transaction pointer */
75 xfs_inode_t *ip, /* incore inode pointer */
76 xfs_fsblock_t *firstblock, /* first block allocated */
77 xfs_bmap_free_t *flist, /* blocks to free at commit */
78 int *flags); /* inode logging flags */
81 * Called from xfs_bmap_add_attrfork to handle local format files.
83 STATIC int /* error */
84 xfs_bmap_add_attrfork_local(
85 xfs_trans_t *tp, /* transaction pointer */
86 xfs_inode_t *ip, /* incore inode pointer */
87 xfs_fsblock_t *firstblock, /* first block allocated */
88 xfs_bmap_free_t *flist, /* blocks to free at commit */
89 int *flags); /* inode logging flags */
92 * Called by xfs_bmap_add_extent to handle cases converting a delayed
93 * allocation to a real allocation.
95 STATIC int /* error */
96 xfs_bmap_add_extent_delay_real(
97 xfs_inode_t *ip, /* incore inode pointer */
98 xfs_extnum_t *idx, /* extent number to update/insert */
99 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
100 xfs_bmbt_irec_t *new, /* new data to add to file extents */
101 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
102 xfs_fsblock_t *first, /* pointer to firstblock variable */
103 xfs_bmap_free_t *flist, /* list of extents to be freed */
104 int *logflagsp); /* inode logging flags */
107 * Called by xfs_bmap_add_extent to handle cases converting a hole
108 * to a delayed allocation.
110 STATIC int /* error */
111 xfs_bmap_add_extent_hole_delay(
112 xfs_inode_t *ip, /* incore inode pointer */
113 xfs_extnum_t *idx, /* extent number to update/insert */
114 xfs_bmbt_irec_t *new, /* new data to add to file extents */
115 int *logflagsp); /* inode logging flags */
118 * Called by xfs_bmap_add_extent to handle cases converting a hole
119 * to a real allocation.
121 STATIC int /* error */
122 xfs_bmap_add_extent_hole_real(
123 xfs_inode_t *ip, /* incore inode pointer */
124 xfs_extnum_t *idx, /* extent number to update/insert */
125 xfs_btree_cur_t *cur, /* if null, not a btree */
126 xfs_bmbt_irec_t *new, /* new data to add to file extents */
127 int *logflagsp, /* inode logging flags */
128 int whichfork); /* data or attr fork */
131 * Called by xfs_bmap_add_extent to handle cases converting an unwritten
132 * allocation to a real allocation or vice versa.
134 STATIC int /* error */
135 xfs_bmap_add_extent_unwritten_real(
136 xfs_inode_t *ip, /* incore inode pointer */
137 xfs_extnum_t *idx, /* extent number to update/insert */
138 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
139 xfs_bmbt_irec_t *new, /* new data to add to file extents */
140 int *logflagsp); /* inode logging flags */
143 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
144 * It figures out where to ask the underlying allocator to put the new extent.
146 STATIC int /* error */
148 xfs_bmalloca_t *ap); /* bmap alloc argument struct */
151 * Transform a btree format file with only one leaf node, where the
152 * extents list will fit in the inode, into an extents format file.
153 * Since the file extents are already in-core, all we have to do is
154 * give up the space for the btree root and pitch the leaf block.
156 STATIC int /* error */
157 xfs_bmap_btree_to_extents(
158 xfs_trans_t *tp, /* transaction pointer */
159 xfs_inode_t *ip, /* incore inode pointer */
160 xfs_btree_cur_t *cur, /* btree cursor */
161 int *logflagsp, /* inode logging flags */
162 int whichfork); /* data or attr fork */
165 * Remove the entry "free" from the free item list. Prev points to the
166 * previous entry, unless "free" is the head of the list.
170 xfs_bmap_free_t *flist, /* free item list header */
171 xfs_bmap_free_item_t *prev, /* previous item on list, if any */
172 xfs_bmap_free_item_t *free); /* list item to be freed */
175 * Convert an extents-format file into a btree-format file.
176 * The new file will have a root block (in the inode) and a single child block.
178 STATIC int /* error */
179 xfs_bmap_extents_to_btree(
180 xfs_trans_t *tp, /* transaction pointer */
181 xfs_inode_t *ip, /* incore inode pointer */
182 xfs_fsblock_t *firstblock, /* first-block-allocated */
183 xfs_bmap_free_t *flist, /* blocks freed in xaction */
184 xfs_btree_cur_t **curp, /* cursor returned to caller */
185 int wasdel, /* converting a delayed alloc */
186 int *logflagsp, /* inode logging flags */
187 int whichfork); /* data or attr fork */
190 * Convert a local file to an extents file.
191 * This code is sort of bogus, since the file data needs to get
192 * logged so it won't be lost. The bmap-level manipulations are ok, though.
194 STATIC int /* error */
195 xfs_bmap_local_to_extents(
196 xfs_trans_t *tp, /* transaction pointer */
197 xfs_inode_t *ip, /* incore inode pointer */
198 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
199 xfs_extlen_t total, /* total blocks needed by transaction */
200 int *logflagsp, /* inode logging flags */
201 int whichfork); /* data or attr fork */
204 * Search the extents list for the inode, for the extent containing bno.
205 * If bno lies in a hole, point to the next entry. If bno lies past eof,
206 * *eofp will be set, and *prevp will contain the last entry (null if none).
207 * Else, *lastxp will be set to the index of the found
208 * entry; *gotp will contain the entry.
210 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
211 xfs_bmap_search_extents(
212 xfs_inode_t *ip, /* incore inode pointer */
213 xfs_fileoff_t bno, /* block number searched for */
214 int whichfork, /* data or attr fork */
215 int *eofp, /* out: end of file found */
216 xfs_extnum_t *lastxp, /* out: last extent index */
217 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
218 xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */
221 * Check the last inode extent to determine whether this allocation will result
222 * in blocks being allocated at the end of the file. When we allocate new data
223 * blocks at the end of the file which do not start at the previous data block,
224 * we will try to align the new blocks at stripe unit boundaries.
226 STATIC int /* error */
228 xfs_inode_t *ip, /* incore inode pointer */
229 xfs_fileoff_t off, /* file offset in fsblocks */
230 int whichfork, /* data or attribute fork */
231 char *aeof); /* return value */
234 * Compute the worst-case number of indirect blocks that will be used
235 * for ip's delayed extent of length "len".
238 xfs_bmap_worst_indlen(
239 xfs_inode_t *ip, /* incore inode pointer */
240 xfs_filblks_t len); /* delayed extent length */
244 * Perform various validation checks on the values being returned
248 xfs_bmap_validate_ret(
252 xfs_bmbt_irec_t *mval,
256 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
264 xfs_fsblock_t blockno,
269 xfs_bmap_count_leaves(
276 xfs_bmap_disk_count_leaves(
277 struct xfs_mount *mp,
278 struct xfs_btree_block *block,
283 * Bmap internal routines.
286 STATIC int /* error */
288 struct xfs_btree_cur *cur,
292 int *stat) /* success/failure */
294 cur->bc_rec.b.br_startoff = off;
295 cur->bc_rec.b.br_startblock = bno;
296 cur->bc_rec.b.br_blockcount = len;
297 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
300 STATIC int /* error */
302 struct xfs_btree_cur *cur,
306 int *stat) /* success/failure */
308 cur->bc_rec.b.br_startoff = off;
309 cur->bc_rec.b.br_startblock = bno;
310 cur->bc_rec.b.br_blockcount = len;
311 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
315 * Update the record referred to by cur to the value given
316 * by [off, bno, len, state].
317 * This either works (return 0) or gets an EFSCORRUPTED error.
321 struct xfs_btree_cur *cur,
327 union xfs_btree_rec rec;
329 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
330 return xfs_btree_update(cur, &rec);
334 * Called from xfs_bmap_add_attrfork to handle btree format files.
336 STATIC int /* error */
337 xfs_bmap_add_attrfork_btree(
338 xfs_trans_t *tp, /* transaction pointer */
339 xfs_inode_t *ip, /* incore inode pointer */
340 xfs_fsblock_t *firstblock, /* first block allocated */
341 xfs_bmap_free_t *flist, /* blocks to free at commit */
342 int *flags) /* inode logging flags */
344 xfs_btree_cur_t *cur; /* btree cursor */
345 int error; /* error return value */
346 xfs_mount_t *mp; /* file system mount struct */
347 int stat; /* newroot status */
350 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
351 *flags |= XFS_ILOG_DBROOT;
353 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
354 cur->bc_private.b.flist = flist;
355 cur->bc_private.b.firstblock = *firstblock;
356 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
358 /* must be at least one entry */
359 XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
360 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
363 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
364 return XFS_ERROR(ENOSPC);
366 *firstblock = cur->bc_private.b.firstblock;
367 cur->bc_private.b.allocated = 0;
368 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
372 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
377 * Called from xfs_bmap_add_attrfork to handle extents format files.
379 STATIC int /* error */
380 xfs_bmap_add_attrfork_extents(
381 xfs_trans_t *tp, /* transaction pointer */
382 xfs_inode_t *ip, /* incore inode pointer */
383 xfs_fsblock_t *firstblock, /* first block allocated */
384 xfs_bmap_free_t *flist, /* blocks to free at commit */
385 int *flags) /* inode logging flags */
387 xfs_btree_cur_t *cur; /* bmap btree cursor */
388 int error; /* error return value */
390 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
393 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
394 flags, XFS_DATA_FORK);
396 cur->bc_private.b.allocated = 0;
397 xfs_btree_del_cursor(cur,
398 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
404 * Called from xfs_bmap_add_attrfork to handle local format files.
406 STATIC int /* error */
407 xfs_bmap_add_attrfork_local(
408 xfs_trans_t *tp, /* transaction pointer */
409 xfs_inode_t *ip, /* incore inode pointer */
410 xfs_fsblock_t *firstblock, /* first block allocated */
411 xfs_bmap_free_t *flist, /* blocks to free at commit */
412 int *flags) /* inode logging flags */
414 xfs_da_args_t dargs; /* args for dir/attr code */
415 int error; /* error return value */
416 xfs_mount_t *mp; /* mount structure pointer */
418 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
420 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
422 memset(&dargs, 0, sizeof(dargs));
424 dargs.firstblock = firstblock;
426 dargs.total = mp->m_dirblkfsbs;
427 dargs.whichfork = XFS_DATA_FORK;
429 error = xfs_dir2_sf_to_block(&dargs);
431 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
437 * Called by xfs_bmapi to update file extent records and the btree
438 * after allocating space (or doing a delayed allocation).
440 STATIC int /* error */
442 xfs_inode_t *ip, /* incore inode pointer */
443 xfs_extnum_t *idx, /* extent number to update/insert */
444 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
445 xfs_bmbt_irec_t *new, /* new data to add to file extents */
446 xfs_fsblock_t *first, /* pointer to firstblock variable */
447 xfs_bmap_free_t *flist, /* list of extents to be freed */
448 int *logflagsp, /* inode logging flags */
449 int whichfork) /* data or attr fork */
451 xfs_btree_cur_t *cur; /* btree cursor or null */
452 xfs_filblks_t da_new; /* new count del alloc blocks used */
453 xfs_filblks_t da_old; /* old count del alloc blocks used */
454 int error; /* error return value */
455 xfs_ifork_t *ifp; /* inode fork ptr */
456 int logflags; /* returned value */
457 xfs_extnum_t nextents; /* number of extents in file now */
459 XFS_STATS_INC(xs_add_exlist);
462 ifp = XFS_IFORK_PTR(ip, whichfork);
463 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
468 ASSERT(*idx <= nextents);
471 * This is the first extent added to a new/empty file.
472 * Special case this one, so other routines get to assume there are
473 * already extents in the list.
476 xfs_iext_insert(ip, *idx, 1, new,
477 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
481 if (!isnullstartblock(new->br_startblock)) {
482 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
483 logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
488 * Any kind of new delayed allocation goes here.
490 else if (isnullstartblock(new->br_startblock)) {
492 ASSERT((cur->bc_private.b.flags &
493 XFS_BTCUR_BPRV_WASDEL) == 0);
494 error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
498 * Real allocation off the end of the file.
500 else if (*idx == nextents) {
502 ASSERT((cur->bc_private.b.flags &
503 XFS_BTCUR_BPRV_WASDEL) == 0);
504 error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
505 &logflags, whichfork);
507 xfs_bmbt_irec_t prev; /* old extent at offset idx */
510 * Get the record referred to by idx.
512 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &prev);
514 * If it's a real allocation record, and the new allocation ends
515 * after the start of the referred to record, then we're filling
516 * in a delayed or unwritten allocation with a real one, or
517 * converting real back to unwritten.
519 if (!isnullstartblock(new->br_startblock) &&
520 new->br_startoff + new->br_blockcount > prev.br_startoff) {
521 if (prev.br_state != XFS_EXT_UNWRITTEN &&
522 isnullstartblock(prev.br_startblock)) {
523 da_old = startblockval(prev.br_startblock);
525 ASSERT(cur->bc_private.b.flags &
526 XFS_BTCUR_BPRV_WASDEL);
527 error = xfs_bmap_add_extent_delay_real(ip,
528 idx, &cur, new, &da_new,
529 first, flist, &logflags);
531 ASSERT(new->br_state == XFS_EXT_NORM ||
532 new->br_state == XFS_EXT_UNWRITTEN);
534 error = xfs_bmap_add_extent_unwritten_real(ip,
535 idx, &cur, new, &logflags);
541 * Otherwise we're filling in a hole with an allocation.
545 ASSERT((cur->bc_private.b.flags &
546 XFS_BTCUR_BPRV_WASDEL) == 0);
547 error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
548 new, &logflags, whichfork);
554 ASSERT(*curp == cur || *curp == NULL);
557 * Convert to a btree if necessary.
559 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
560 XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
561 int tmp_logflags; /* partial log flag return val */
564 error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
565 flist, &cur, da_old > 0, &tmp_logflags, whichfork);
566 logflags |= tmp_logflags;
571 * Adjust for changes in reserved delayed indirect blocks.
572 * Nothing to do for disk quotas here.
574 if (da_old || da_new) {
579 nblks += cur->bc_private.b.allocated;
580 ASSERT(nblks <= da_old);
582 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
583 (int64_t)(da_old - nblks), 0);
586 * Clear out the allocated field, done with it now in any case.
589 cur->bc_private.b.allocated = 0;
595 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
597 *logflagsp = logflags;
602 * Called by xfs_bmap_add_extent to handle cases converting a delayed
603 * allocation to a real allocation.
605 STATIC int /* error */
606 xfs_bmap_add_extent_delay_real(
607 xfs_inode_t *ip, /* incore inode pointer */
608 xfs_extnum_t *idx, /* extent number to update/insert */
609 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
610 xfs_bmbt_irec_t *new, /* new data to add to file extents */
611 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
612 xfs_fsblock_t *first, /* pointer to firstblock variable */
613 xfs_bmap_free_t *flist, /* list of extents to be freed */
614 int *logflagsp) /* inode logging flags */
616 xfs_btree_cur_t *cur; /* btree cursor */
617 int diff; /* temp value */
618 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
619 int error; /* error return value */
620 int i; /* temp state */
621 xfs_ifork_t *ifp; /* inode fork pointer */
622 xfs_fileoff_t new_endoff; /* end offset of new entry */
623 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
624 /* left is 0, right is 1, prev is 2 */
625 int rval=0; /* return value (logging flags) */
626 int state = 0;/* state bits, accessed thru macros */
627 xfs_filblks_t temp=0; /* value for dnew calculations */
628 xfs_filblks_t temp2=0;/* value for dnew calculations */
629 int tmp_rval; /* partial logging flags */
636 * Set up a bunch of variables to make the tests simpler.
639 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
640 ep = xfs_iext_get_ext(ifp, *idx);
641 xfs_bmbt_get_all(ep, &PREV);
642 new_endoff = new->br_startoff + new->br_blockcount;
643 ASSERT(PREV.br_startoff <= new->br_startoff);
644 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
647 * Set flags determining what part of the previous delayed allocation
648 * extent is being replaced by a real allocation.
650 if (PREV.br_startoff == new->br_startoff)
651 state |= BMAP_LEFT_FILLING;
652 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
653 state |= BMAP_RIGHT_FILLING;
656 * Check and set flags if this segment has a left neighbor.
657 * Don't set contiguous if the combined extent would be too large.
660 state |= BMAP_LEFT_VALID;
661 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
663 if (isnullstartblock(LEFT.br_startblock))
664 state |= BMAP_LEFT_DELAY;
667 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
668 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
669 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
670 LEFT.br_state == new->br_state &&
671 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
672 state |= BMAP_LEFT_CONTIG;
675 * Check and set flags if this segment has a right neighbor.
676 * Don't set contiguous if the combined extent would be too large.
677 * Also check for all-three-contiguous being too large.
679 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
680 state |= BMAP_RIGHT_VALID;
681 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
683 if (isnullstartblock(RIGHT.br_startblock))
684 state |= BMAP_RIGHT_DELAY;
687 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
688 new_endoff == RIGHT.br_startoff &&
689 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
690 new->br_state == RIGHT.br_state &&
691 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
692 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
693 BMAP_RIGHT_FILLING)) !=
694 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
695 BMAP_RIGHT_FILLING) ||
696 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
698 state |= BMAP_RIGHT_CONTIG;
702 * Switch out based on the FILLING and CONTIG state bits.
704 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
705 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
706 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
707 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
709 * Filling in all of a previously delayed allocation extent.
710 * The left and right neighbors are both contiguous with new.
713 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
714 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
715 LEFT.br_blockcount + PREV.br_blockcount +
716 RIGHT.br_blockcount);
717 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
719 xfs_iext_remove(ip, *idx + 1, 2, state);
720 ip->i_d.di_nextents--;
722 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
724 rval = XFS_ILOG_CORE;
725 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
727 RIGHT.br_blockcount, &i)))
729 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
730 if ((error = xfs_btree_delete(cur, &i)))
732 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
733 if ((error = xfs_btree_decrement(cur, 0, &i)))
735 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
736 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
740 RIGHT.br_blockcount, LEFT.br_state)))
746 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
748 * Filling in all of a previously delayed allocation extent.
749 * The left neighbor is contiguous, the right is not.
753 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
754 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
755 LEFT.br_blockcount + PREV.br_blockcount);
756 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
758 xfs_iext_remove(ip, *idx + 1, 1, state);
760 rval = XFS_ILOG_DEXT;
763 if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
764 LEFT.br_startblock, LEFT.br_blockcount,
767 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
768 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
771 PREV.br_blockcount, LEFT.br_state)))
777 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
779 * Filling in all of a previously delayed allocation extent.
780 * The right neighbor is contiguous, the left is not.
782 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
783 xfs_bmbt_set_startblock(ep, new->br_startblock);
784 xfs_bmbt_set_blockcount(ep,
785 PREV.br_blockcount + RIGHT.br_blockcount);
786 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
788 xfs_iext_remove(ip, *idx + 1, 1, state);
790 rval = XFS_ILOG_DEXT;
793 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
795 RIGHT.br_blockcount, &i)))
797 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
798 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
801 RIGHT.br_blockcount, PREV.br_state)))
808 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
810 * Filling in all of a previously delayed allocation extent.
811 * Neither the left nor right neighbors are contiguous with
814 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
815 xfs_bmbt_set_startblock(ep, new->br_startblock);
816 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
818 ip->i_d.di_nextents++;
820 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
822 rval = XFS_ILOG_CORE;
823 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
824 new->br_startblock, new->br_blockcount,
827 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
828 cur->bc_rec.b.br_state = XFS_EXT_NORM;
829 if ((error = xfs_btree_insert(cur, &i)))
831 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
837 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
839 * Filling in the first part of a previous delayed allocation.
840 * The left neighbor is contiguous.
842 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
843 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
844 LEFT.br_blockcount + new->br_blockcount);
845 xfs_bmbt_set_startoff(ep,
846 PREV.br_startoff + new->br_blockcount);
847 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
849 temp = PREV.br_blockcount - new->br_blockcount;
850 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
851 xfs_bmbt_set_blockcount(ep, temp);
853 rval = XFS_ILOG_DEXT;
856 if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
857 LEFT.br_startblock, LEFT.br_blockcount,
860 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
861 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
868 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
869 startblockval(PREV.br_startblock));
870 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
871 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
877 case BMAP_LEFT_FILLING:
879 * Filling in the first part of a previous delayed allocation.
880 * The left neighbor is not contiguous.
882 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
883 xfs_bmbt_set_startoff(ep, new_endoff);
884 temp = PREV.br_blockcount - new->br_blockcount;
885 xfs_bmbt_set_blockcount(ep, temp);
886 xfs_iext_insert(ip, *idx, 1, new, state);
887 ip->i_d.di_nextents++;
889 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
891 rval = XFS_ILOG_CORE;
892 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
893 new->br_startblock, new->br_blockcount,
896 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
897 cur->bc_rec.b.br_state = XFS_EXT_NORM;
898 if ((error = xfs_btree_insert(cur, &i)))
900 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
902 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
903 ip->i_d.di_nextents > ip->i_df.if_ext_max) {
904 error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
905 first, flist, &cur, 1, &tmp_rval,
911 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
912 startblockval(PREV.br_startblock) -
913 (cur ? cur->bc_private.b.allocated : 0));
914 ep = xfs_iext_get_ext(ifp, *idx + 1);
915 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
916 trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_);
921 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
923 * Filling in the last part of a previous delayed allocation.
924 * The right neighbor is contiguous with the new allocation.
926 temp = PREV.br_blockcount - new->br_blockcount;
927 trace_xfs_bmap_pre_update(ip, *idx + 1, state, _THIS_IP_);
928 xfs_bmbt_set_blockcount(ep, temp);
929 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx + 1),
930 new->br_startoff, new->br_startblock,
931 new->br_blockcount + RIGHT.br_blockcount,
933 trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_);
935 rval = XFS_ILOG_DEXT;
938 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
940 RIGHT.br_blockcount, &i)))
942 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
943 if ((error = xfs_bmbt_update(cur, new->br_startoff,
951 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
952 startblockval(PREV.br_startblock));
953 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
954 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
955 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
961 case BMAP_RIGHT_FILLING:
963 * Filling in the last part of a previous delayed allocation.
964 * The right neighbor is not contiguous.
966 temp = PREV.br_blockcount - new->br_blockcount;
967 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
968 xfs_bmbt_set_blockcount(ep, temp);
969 xfs_iext_insert(ip, *idx + 1, 1, new, state);
970 ip->i_d.di_nextents++;
972 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
974 rval = XFS_ILOG_CORE;
975 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
976 new->br_startblock, new->br_blockcount,
979 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
980 cur->bc_rec.b.br_state = XFS_EXT_NORM;
981 if ((error = xfs_btree_insert(cur, &i)))
983 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
985 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
986 ip->i_d.di_nextents > ip->i_df.if_ext_max) {
987 error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
988 first, flist, &cur, 1, &tmp_rval,
994 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
995 startblockval(PREV.br_startblock) -
996 (cur ? cur->bc_private.b.allocated : 0));
997 ep = xfs_iext_get_ext(ifp, *idx);
998 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
999 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1007 * Filling in the middle part of a previous delayed allocation.
1008 * Contiguity is impossible here.
1009 * This case is avoided almost all the time.
1011 * We start with a delayed allocation:
1013 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1016 * and we are allocating:
1017 * +rrrrrrrrrrrrrrrrr+
1020 * and we set it up for insertion as:
1021 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1023 * PREV @ idx LEFT RIGHT
1024 * inserted at idx + 1
1026 temp = new->br_startoff - PREV.br_startoff;
1027 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1028 trace_xfs_bmap_pre_update(ip, *idx, 0, _THIS_IP_);
1029 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
1031 RIGHT.br_state = PREV.br_state;
1032 RIGHT.br_startblock = nullstartblock(
1033 (int)xfs_bmap_worst_indlen(ip, temp2));
1034 RIGHT.br_startoff = new_endoff;
1035 RIGHT.br_blockcount = temp2;
1036 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
1037 xfs_iext_insert(ip, *idx + 1, 2, &LEFT, state);
1038 ip->i_d.di_nextents++;
1040 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1042 rval = XFS_ILOG_CORE;
1043 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1044 new->br_startblock, new->br_blockcount,
1047 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1048 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1049 if ((error = xfs_btree_insert(cur, &i)))
1051 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1053 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1054 ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1055 error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1056 first, flist, &cur, 1, &tmp_rval,
1062 temp = xfs_bmap_worst_indlen(ip, temp);
1063 temp2 = xfs_bmap_worst_indlen(ip, temp2);
1064 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
1065 (cur ? cur->bc_private.b.allocated : 0));
1067 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1068 -((int64_t)diff), 0)) {
1070 * Ick gross gag me with a spoon.
1072 ASSERT(0); /* want to see if this ever happens! */
1078 !xfs_icsb_modify_counters(ip->i_mount,
1080 -((int64_t)diff), 0))
1087 !xfs_icsb_modify_counters(ip->i_mount,
1089 -((int64_t)diff), 0))
1094 ep = xfs_iext_get_ext(ifp, *idx);
1095 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1096 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1097 trace_xfs_bmap_pre_update(ip, *idx + 2, state, _THIS_IP_);
1098 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx + 2),
1099 nullstartblock((int)temp2));
1100 trace_xfs_bmap_post_update(ip, *idx + 2, state, _THIS_IP_);
1103 *dnew = temp + temp2;
1106 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1107 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1108 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1109 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1110 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1111 case BMAP_LEFT_CONTIG:
1112 case BMAP_RIGHT_CONTIG:
1114 * These cases are all impossible.
1128 * Called by xfs_bmap_add_extent to handle cases converting an unwritten
1129 * allocation to a real allocation or vice versa.
1131 STATIC int /* error */
1132 xfs_bmap_add_extent_unwritten_real(
1133 xfs_inode_t *ip, /* incore inode pointer */
1134 xfs_extnum_t *idx, /* extent number to update/insert */
1135 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
1136 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1137 int *logflagsp) /* inode logging flags */
1139 xfs_btree_cur_t *cur; /* btree cursor */
1140 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1141 int error; /* error return value */
1142 int i; /* temp state */
1143 xfs_ifork_t *ifp; /* inode fork pointer */
1144 xfs_fileoff_t new_endoff; /* end offset of new entry */
1145 xfs_exntst_t newext; /* new extent state */
1146 xfs_exntst_t oldext; /* old extent state */
1147 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1148 /* left is 0, right is 1, prev is 2 */
1149 int rval=0; /* return value (logging flags) */
1150 int state = 0;/* state bits, accessed thru macros */
1156 * Set up a bunch of variables to make the tests simpler.
1160 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1161 ep = xfs_iext_get_ext(ifp, *idx);
1162 xfs_bmbt_get_all(ep, &PREV);
1163 newext = new->br_state;
1164 oldext = (newext == XFS_EXT_UNWRITTEN) ?
1165 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
1166 ASSERT(PREV.br_state == oldext);
1167 new_endoff = new->br_startoff + new->br_blockcount;
1168 ASSERT(PREV.br_startoff <= new->br_startoff);
1169 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1172 * Set flags determining what part of the previous oldext allocation
1173 * extent is being replaced by a newext allocation.
1175 if (PREV.br_startoff == new->br_startoff)
1176 state |= BMAP_LEFT_FILLING;
1177 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1178 state |= BMAP_RIGHT_FILLING;
1181 * Check and set flags if this segment has a left neighbor.
1182 * Don't set contiguous if the combined extent would be too large.
1185 state |= BMAP_LEFT_VALID;
1186 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
1188 if (isnullstartblock(LEFT.br_startblock))
1189 state |= BMAP_LEFT_DELAY;
1192 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1193 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1194 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1195 LEFT.br_state == newext &&
1196 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1197 state |= BMAP_LEFT_CONTIG;
1200 * Check and set flags if this segment has a right neighbor.
1201 * Don't set contiguous if the combined extent would be too large.
1202 * Also check for all-three-contiguous being too large.
1204 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1205 state |= BMAP_RIGHT_VALID;
1206 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
1207 if (isnullstartblock(RIGHT.br_startblock))
1208 state |= BMAP_RIGHT_DELAY;
1211 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1212 new_endoff == RIGHT.br_startoff &&
1213 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1214 newext == RIGHT.br_state &&
1215 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1216 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1217 BMAP_RIGHT_FILLING)) !=
1218 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1219 BMAP_RIGHT_FILLING) ||
1220 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1222 state |= BMAP_RIGHT_CONTIG;
1225 * Switch out based on the FILLING and CONTIG state bits.
1227 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1228 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1229 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1230 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1232 * Setting all of a previous oldext extent to newext.
1233 * The left and right neighbors are both contiguous with new.
1237 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1238 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1239 LEFT.br_blockcount + PREV.br_blockcount +
1240 RIGHT.br_blockcount);
1241 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1243 xfs_iext_remove(ip, *idx + 1, 2, state);
1244 ip->i_d.di_nextents -= 2;
1246 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1248 rval = XFS_ILOG_CORE;
1249 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1250 RIGHT.br_startblock,
1251 RIGHT.br_blockcount, &i)))
1253 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1254 if ((error = xfs_btree_delete(cur, &i)))
1256 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1257 if ((error = xfs_btree_decrement(cur, 0, &i)))
1259 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1260 if ((error = xfs_btree_delete(cur, &i)))
1262 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1263 if ((error = xfs_btree_decrement(cur, 0, &i)))
1265 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1266 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1268 LEFT.br_blockcount + PREV.br_blockcount +
1269 RIGHT.br_blockcount, LEFT.br_state)))
1274 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1276 * Setting all of a previous oldext extent to newext.
1277 * The left neighbor is contiguous, the right is not.
1281 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1282 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1283 LEFT.br_blockcount + PREV.br_blockcount);
1284 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1286 xfs_iext_remove(ip, *idx + 1, 1, state);
1287 ip->i_d.di_nextents--;
1289 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1291 rval = XFS_ILOG_CORE;
1292 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1293 PREV.br_startblock, PREV.br_blockcount,
1296 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1297 if ((error = xfs_btree_delete(cur, &i)))
1299 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1300 if ((error = xfs_btree_decrement(cur, 0, &i)))
1302 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1303 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1305 LEFT.br_blockcount + PREV.br_blockcount,
1311 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1313 * Setting all of a previous oldext extent to newext.
1314 * The right neighbor is contiguous, the left is not.
1316 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1317 xfs_bmbt_set_blockcount(ep,
1318 PREV.br_blockcount + RIGHT.br_blockcount);
1319 xfs_bmbt_set_state(ep, newext);
1320 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1321 xfs_iext_remove(ip, *idx + 1, 1, state);
1322 ip->i_d.di_nextents--;
1324 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1326 rval = XFS_ILOG_CORE;
1327 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1328 RIGHT.br_startblock,
1329 RIGHT.br_blockcount, &i)))
1331 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1332 if ((error = xfs_btree_delete(cur, &i)))
1334 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1335 if ((error = xfs_btree_decrement(cur, 0, &i)))
1337 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1338 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1340 new->br_blockcount + RIGHT.br_blockcount,
1346 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1348 * Setting all of a previous oldext extent to newext.
1349 * Neither the left nor right neighbors are contiguous with
1352 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1353 xfs_bmbt_set_state(ep, newext);
1354 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1357 rval = XFS_ILOG_DEXT;
1360 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1361 new->br_startblock, new->br_blockcount,
1364 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1365 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1366 new->br_startblock, new->br_blockcount,
1372 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1374 * Setting the first part of a previous oldext extent to newext.
1375 * The left neighbor is contiguous.
1377 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
1378 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
1379 LEFT.br_blockcount + new->br_blockcount);
1380 xfs_bmbt_set_startoff(ep,
1381 PREV.br_startoff + new->br_blockcount);
1382 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
1384 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1385 xfs_bmbt_set_startblock(ep,
1386 new->br_startblock + new->br_blockcount);
1387 xfs_bmbt_set_blockcount(ep,
1388 PREV.br_blockcount - new->br_blockcount);
1389 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1394 rval = XFS_ILOG_DEXT;
1397 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1398 PREV.br_startblock, PREV.br_blockcount,
1401 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1402 if ((error = xfs_bmbt_update(cur,
1403 PREV.br_startoff + new->br_blockcount,
1404 PREV.br_startblock + new->br_blockcount,
1405 PREV.br_blockcount - new->br_blockcount,
1408 if ((error = xfs_btree_decrement(cur, 0, &i)))
1410 if (xfs_bmbt_update(cur, LEFT.br_startoff,
1412 LEFT.br_blockcount + new->br_blockcount,
1418 case BMAP_LEFT_FILLING:
1420 * Setting the first part of a previous oldext extent to newext.
1421 * The left neighbor is not contiguous.
1423 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1424 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1425 xfs_bmbt_set_startoff(ep, new_endoff);
1426 xfs_bmbt_set_blockcount(ep,
1427 PREV.br_blockcount - new->br_blockcount);
1428 xfs_bmbt_set_startblock(ep,
1429 new->br_startblock + new->br_blockcount);
1430 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1432 xfs_iext_insert(ip, *idx, 1, new, state);
1433 ip->i_d.di_nextents++;
1435 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1437 rval = XFS_ILOG_CORE;
1438 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1439 PREV.br_startblock, PREV.br_blockcount,
1442 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1443 if ((error = xfs_bmbt_update(cur,
1444 PREV.br_startoff + new->br_blockcount,
1445 PREV.br_startblock + new->br_blockcount,
1446 PREV.br_blockcount - new->br_blockcount,
1449 cur->bc_rec.b = *new;
1450 if ((error = xfs_btree_insert(cur, &i)))
1452 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1456 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1458 * Setting the last part of a previous oldext extent to newext.
1459 * The right neighbor is contiguous with the new allocation.
1461 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1462 xfs_bmbt_set_blockcount(ep,
1463 PREV.br_blockcount - new->br_blockcount);
1464 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1468 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1469 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1470 new->br_startoff, new->br_startblock,
1471 new->br_blockcount + RIGHT.br_blockcount, newext);
1472 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1475 rval = XFS_ILOG_DEXT;
1478 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1480 PREV.br_blockcount, &i)))
1482 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1483 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1485 PREV.br_blockcount - new->br_blockcount,
1488 if ((error = xfs_btree_increment(cur, 0, &i)))
1490 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1492 new->br_blockcount + RIGHT.br_blockcount,
1498 case BMAP_RIGHT_FILLING:
1500 * Setting the last part of a previous oldext extent to newext.
1501 * The right neighbor is not contiguous.
1503 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1504 xfs_bmbt_set_blockcount(ep,
1505 PREV.br_blockcount - new->br_blockcount);
1506 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1509 xfs_iext_insert(ip, *idx, 1, new, state);
1511 ip->i_d.di_nextents++;
1513 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1515 rval = XFS_ILOG_CORE;
1516 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1517 PREV.br_startblock, PREV.br_blockcount,
1520 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1521 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1523 PREV.br_blockcount - new->br_blockcount,
1526 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1527 new->br_startblock, new->br_blockcount,
1530 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1531 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1532 if ((error = xfs_btree_insert(cur, &i)))
1534 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1540 * Setting the middle part of a previous oldext extent to
1541 * newext. Contiguity is impossible here.
1542 * One extent becomes three extents.
1544 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1545 xfs_bmbt_set_blockcount(ep,
1546 new->br_startoff - PREV.br_startoff);
1547 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1550 r[1].br_startoff = new_endoff;
1551 r[1].br_blockcount =
1552 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1553 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1554 r[1].br_state = oldext;
1557 xfs_iext_insert(ip, *idx, 2, &r[0], state);
1559 ip->i_d.di_nextents += 2;
1561 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1563 rval = XFS_ILOG_CORE;
1564 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1565 PREV.br_startblock, PREV.br_blockcount,
1568 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1569 /* new right extent - oldext */
1570 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1571 r[1].br_startblock, r[1].br_blockcount,
1574 /* new left extent - oldext */
1575 cur->bc_rec.b = PREV;
1576 cur->bc_rec.b.br_blockcount =
1577 new->br_startoff - PREV.br_startoff;
1578 if ((error = xfs_btree_insert(cur, &i)))
1580 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1582 * Reset the cursor to the position of the new extent
1583 * we are about to insert as we can't trust it after
1584 * the previous insert.
1586 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1587 new->br_startblock, new->br_blockcount,
1590 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1591 /* new middle extent - newext */
1592 cur->bc_rec.b.br_state = new->br_state;
1593 if ((error = xfs_btree_insert(cur, &i)))
1595 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1599 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1600 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1601 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1602 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1603 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1604 case BMAP_LEFT_CONTIG:
1605 case BMAP_RIGHT_CONTIG:
1607 * These cases are all impossible.
1621 * Called by xfs_bmap_add_extent to handle cases converting a hole
1622 * to a delayed allocation.
1625 STATIC int /* error */
1626 xfs_bmap_add_extent_hole_delay(
1627 xfs_inode_t *ip, /* incore inode pointer */
1628 xfs_extnum_t *idx, /* extent number to update/insert */
1629 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1630 int *logflagsp) /* inode logging flags */
1632 xfs_ifork_t *ifp; /* inode fork pointer */
1633 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1634 xfs_filblks_t newlen=0; /* new indirect size */
1635 xfs_filblks_t oldlen=0; /* old indirect size */
1636 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1637 int state; /* state bits, accessed thru macros */
1638 xfs_filblks_t temp=0; /* temp for indirect calculations */
1640 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1642 ASSERT(isnullstartblock(new->br_startblock));
1645 * Check and set flags if this segment has a left neighbor
1648 state |= BMAP_LEFT_VALID;
1649 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
1651 if (isnullstartblock(left.br_startblock))
1652 state |= BMAP_LEFT_DELAY;
1656 * Check and set flags if the current (right) segment exists.
1657 * If it doesn't exist, we're converting the hole at end-of-file.
1659 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1660 state |= BMAP_RIGHT_VALID;
1661 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
1663 if (isnullstartblock(right.br_startblock))
1664 state |= BMAP_RIGHT_DELAY;
1668 * Set contiguity flags on the left and right neighbors.
1669 * Don't let extents get too large, even if the pieces are contiguous.
1671 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1672 left.br_startoff + left.br_blockcount == new->br_startoff &&
1673 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1674 state |= BMAP_LEFT_CONTIG;
1676 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1677 new->br_startoff + new->br_blockcount == right.br_startoff &&
1678 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1679 (!(state & BMAP_LEFT_CONTIG) ||
1680 (left.br_blockcount + new->br_blockcount +
1681 right.br_blockcount <= MAXEXTLEN)))
1682 state |= BMAP_RIGHT_CONTIG;
1685 * Switch out based on the contiguity flags.
1687 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1688 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1690 * New allocation is contiguous with delayed allocations
1691 * on the left and on the right.
1692 * Merge all three into a single extent record.
1695 temp = left.br_blockcount + new->br_blockcount +
1696 right.br_blockcount;
1698 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1699 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1700 oldlen = startblockval(left.br_startblock) +
1701 startblockval(new->br_startblock) +
1702 startblockval(right.br_startblock);
1703 newlen = xfs_bmap_worst_indlen(ip, temp);
1704 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1705 nullstartblock((int)newlen));
1706 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1708 xfs_iext_remove(ip, *idx + 1, 1, state);
1711 case BMAP_LEFT_CONTIG:
1713 * New allocation is contiguous with a delayed allocation
1715 * Merge the new allocation with the left neighbor.
1718 temp = left.br_blockcount + new->br_blockcount;
1720 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1721 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1722 oldlen = startblockval(left.br_startblock) +
1723 startblockval(new->br_startblock);
1724 newlen = xfs_bmap_worst_indlen(ip, temp);
1725 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1726 nullstartblock((int)newlen));
1727 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1730 case BMAP_RIGHT_CONTIG:
1732 * New allocation is contiguous with a delayed allocation
1734 * Merge the new allocation with the right neighbor.
1736 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1737 temp = new->br_blockcount + right.br_blockcount;
1738 oldlen = startblockval(new->br_startblock) +
1739 startblockval(right.br_startblock);
1740 newlen = xfs_bmap_worst_indlen(ip, temp);
1741 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1743 nullstartblock((int)newlen), temp, right.br_state);
1744 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1749 * New allocation is not contiguous with another
1750 * delayed allocation.
1751 * Insert a new entry.
1753 oldlen = newlen = 0;
1754 xfs_iext_insert(ip, *idx, 1, new, state);
1757 if (oldlen != newlen) {
1758 ASSERT(oldlen > newlen);
1759 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1760 (int64_t)(oldlen - newlen), 0);
1762 * Nothing to do for disk quota accounting here.
1770 * Called by xfs_bmap_add_extent to handle cases converting a hole
1771 * to a real allocation.
1773 STATIC int /* error */
1774 xfs_bmap_add_extent_hole_real(
1775 xfs_inode_t *ip, /* incore inode pointer */
1776 xfs_extnum_t *idx, /* extent number to update/insert */
1777 xfs_btree_cur_t *cur, /* if null, not a btree */
1778 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1779 int *logflagsp, /* inode logging flags */
1780 int whichfork) /* data or attr fork */
1782 int error; /* error return value */
1783 int i; /* temp state */
1784 xfs_ifork_t *ifp; /* inode fork pointer */
1785 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1786 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1787 int rval=0; /* return value (logging flags) */
1788 int state; /* state bits, accessed thru macros */
1790 ifp = XFS_IFORK_PTR(ip, whichfork);
1791 ASSERT(*idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
1794 if (whichfork == XFS_ATTR_FORK)
1795 state |= BMAP_ATTRFORK;
1798 * Check and set flags if this segment has a left neighbor.
1801 state |= BMAP_LEFT_VALID;
1802 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
1803 if (isnullstartblock(left.br_startblock))
1804 state |= BMAP_LEFT_DELAY;
1808 * Check and set flags if this segment has a current value.
1809 * Not true if we're inserting into the "hole" at eof.
1811 if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1812 state |= BMAP_RIGHT_VALID;
1813 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
1814 if (isnullstartblock(right.br_startblock))
1815 state |= BMAP_RIGHT_DELAY;
1819 * We're inserting a real allocation between "left" and "right".
1820 * Set the contiguity flags. Don't let extents get too large.
1822 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1823 left.br_startoff + left.br_blockcount == new->br_startoff &&
1824 left.br_startblock + left.br_blockcount == new->br_startblock &&
1825 left.br_state == new->br_state &&
1826 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1827 state |= BMAP_LEFT_CONTIG;
1829 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1830 new->br_startoff + new->br_blockcount == right.br_startoff &&
1831 new->br_startblock + new->br_blockcount == right.br_startblock &&
1832 new->br_state == right.br_state &&
1833 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1834 (!(state & BMAP_LEFT_CONTIG) ||
1835 left.br_blockcount + new->br_blockcount +
1836 right.br_blockcount <= MAXEXTLEN))
1837 state |= BMAP_RIGHT_CONTIG;
1841 * Select which case we're in here, and implement it.
1843 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1844 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1846 * New allocation is contiguous with real allocations on the
1847 * left and on the right.
1848 * Merge all three into a single extent record.
1851 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1852 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1853 left.br_blockcount + new->br_blockcount +
1854 right.br_blockcount);
1855 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1857 xfs_iext_remove(ip, *idx + 1, 1, state);
1859 XFS_IFORK_NEXT_SET(ip, whichfork,
1860 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
1862 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1864 rval = XFS_ILOG_CORE;
1865 if ((error = xfs_bmbt_lookup_eq(cur,
1867 right.br_startblock,
1868 right.br_blockcount, &i)))
1870 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1871 if ((error = xfs_btree_delete(cur, &i)))
1873 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1874 if ((error = xfs_btree_decrement(cur, 0, &i)))
1876 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1877 if ((error = xfs_bmbt_update(cur, left.br_startoff,
1879 left.br_blockcount +
1880 new->br_blockcount +
1881 right.br_blockcount,
1887 case BMAP_LEFT_CONTIG:
1889 * New allocation is contiguous with a real allocation
1891 * Merge the new allocation with the left neighbor.
1894 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1895 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1896 left.br_blockcount + new->br_blockcount);
1897 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1900 rval = xfs_ilog_fext(whichfork);
1903 if ((error = xfs_bmbt_lookup_eq(cur,
1906 left.br_blockcount, &i)))
1908 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1909 if ((error = xfs_bmbt_update(cur, left.br_startoff,
1911 left.br_blockcount +
1918 case BMAP_RIGHT_CONTIG:
1920 * New allocation is contiguous with a real allocation
1922 * Merge the new allocation with the right neighbor.
1924 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1925 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1926 new->br_startoff, new->br_startblock,
1927 new->br_blockcount + right.br_blockcount,
1929 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1932 rval = xfs_ilog_fext(whichfork);
1935 if ((error = xfs_bmbt_lookup_eq(cur,
1937 right.br_startblock,
1938 right.br_blockcount, &i)))
1940 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1941 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1943 new->br_blockcount +
1944 right.br_blockcount,
1952 * New allocation is not contiguous with another
1954 * Insert a new entry.
1956 xfs_iext_insert(ip, *idx, 1, new, state);
1957 XFS_IFORK_NEXT_SET(ip, whichfork,
1958 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
1960 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1962 rval = XFS_ILOG_CORE;
1963 if ((error = xfs_bmbt_lookup_eq(cur,
1966 new->br_blockcount, &i)))
1968 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1969 cur->bc_rec.b.br_state = new->br_state;
1970 if ((error = xfs_btree_insert(cur, &i)))
1972 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1982 * Adjust the size of the new extent based on di_extsize and rt extsize.
1985 xfs_bmap_extsize_align(
1987 xfs_bmbt_irec_t *gotp, /* next extent pointer */
1988 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
1989 xfs_extlen_t extsz, /* align to this extent size */
1990 int rt, /* is this a realtime inode? */
1991 int eof, /* is extent at end-of-file? */
1992 int delay, /* creating delalloc extent? */
1993 int convert, /* overwriting unwritten extent? */
1994 xfs_fileoff_t *offp, /* in/out: aligned offset */
1995 xfs_extlen_t *lenp) /* in/out: aligned length */
1997 xfs_fileoff_t orig_off; /* original offset */
1998 xfs_extlen_t orig_alen; /* original length */
1999 xfs_fileoff_t orig_end; /* original off+len */
2000 xfs_fileoff_t nexto; /* next file offset */
2001 xfs_fileoff_t prevo; /* previous file offset */
2002 xfs_fileoff_t align_off; /* temp for offset */
2003 xfs_extlen_t align_alen; /* temp for length */
2004 xfs_extlen_t temp; /* temp for calculations */
2009 orig_off = align_off = *offp;
2010 orig_alen = align_alen = *lenp;
2011 orig_end = orig_off + orig_alen;
2014 * If this request overlaps an existing extent, then don't
2015 * attempt to perform any additional alignment.
2017 if (!delay && !eof &&
2018 (orig_off >= gotp->br_startoff) &&
2019 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2024 * If the file offset is unaligned vs. the extent size
2025 * we need to align it. This will be possible unless
2026 * the file was previously written with a kernel that didn't
2027 * perform this alignment, or if a truncate shot us in the
2030 temp = do_mod(orig_off, extsz);
2036 * Same adjustment for the end of the requested area.
2038 if ((temp = (align_alen % extsz))) {
2039 align_alen += extsz - temp;
2042 * If the previous block overlaps with this proposed allocation
2043 * then move the start forward without adjusting the length.
2045 if (prevp->br_startoff != NULLFILEOFF) {
2046 if (prevp->br_startblock == HOLESTARTBLOCK)
2047 prevo = prevp->br_startoff;
2049 prevo = prevp->br_startoff + prevp->br_blockcount;
2052 if (align_off != orig_off && align_off < prevo)
2055 * If the next block overlaps with this proposed allocation
2056 * then move the start back without adjusting the length,
2057 * but not before offset 0.
2058 * This may of course make the start overlap previous block,
2059 * and if we hit the offset 0 limit then the next block
2060 * can still overlap too.
2062 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2063 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2064 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2065 nexto = gotp->br_startoff + gotp->br_blockcount;
2067 nexto = gotp->br_startoff;
2069 nexto = NULLFILEOFF;
2071 align_off + align_alen != orig_end &&
2072 align_off + align_alen > nexto)
2073 align_off = nexto > align_alen ? nexto - align_alen : 0;
2075 * If we're now overlapping the next or previous extent that
2076 * means we can't fit an extsz piece in this hole. Just move
2077 * the start forward to the first valid spot and set
2078 * the length so we hit the end.
2080 if (align_off != orig_off && align_off < prevo)
2082 if (align_off + align_alen != orig_end &&
2083 align_off + align_alen > nexto &&
2084 nexto != NULLFILEOFF) {
2085 ASSERT(nexto > prevo);
2086 align_alen = nexto - align_off;
2090 * If realtime, and the result isn't a multiple of the realtime
2091 * extent size we need to remove blocks until it is.
2093 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2095 * We're not covering the original request, or
2096 * we won't be able to once we fix the length.
2098 if (orig_off < align_off ||
2099 orig_end > align_off + align_alen ||
2100 align_alen - temp < orig_alen)
2101 return XFS_ERROR(EINVAL);
2103 * Try to fix it by moving the start up.
2105 if (align_off + temp <= orig_off) {
2110 * Try to fix it by moving the end in.
2112 else if (align_off + align_alen - temp >= orig_end)
2115 * Set the start to the minimum then trim the length.
2118 align_alen -= orig_off - align_off;
2119 align_off = orig_off;
2120 align_alen -= align_alen % mp->m_sb.sb_rextsize;
2123 * Result doesn't cover the request, fail it.
2125 if (orig_off < align_off || orig_end > align_off + align_alen)
2126 return XFS_ERROR(EINVAL);
2128 ASSERT(orig_off >= align_off);
2129 ASSERT(orig_end <= align_off + align_alen);
2133 if (!eof && gotp->br_startoff != NULLFILEOFF)
2134 ASSERT(align_off + align_alen <= gotp->br_startoff);
2135 if (prevp->br_startoff != NULLFILEOFF)
2136 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2144 #define XFS_ALLOC_GAP_UNITS 4
2148 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2150 xfs_fsblock_t adjust; /* adjustment to block numbers */
2151 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2152 xfs_mount_t *mp; /* mount point structure */
2153 int nullfb; /* true if ap->firstblock isn't set */
2154 int rt; /* true if inode is realtime */
2156 #define ISVALID(x,y) \
2158 (x) < mp->m_sb.sb_rblocks : \
2159 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2160 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2161 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2163 mp = ap->ip->i_mount;
2164 nullfb = ap->firstblock == NULLFSBLOCK;
2165 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2166 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2168 * If allocating at eof, and there's a previous real block,
2169 * try to use its last block as our starting point.
2171 if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
2172 !isnullstartblock(ap->prevp->br_startblock) &&
2173 ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
2174 ap->prevp->br_startblock)) {
2175 ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
2177 * Adjust for the gap between prevp and us.
2180 (ap->prevp->br_startoff + ap->prevp->br_blockcount);
2182 ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
2186 * If not at eof, then compare the two neighbor blocks.
2187 * Figure out whether either one gives us a good starting point,
2188 * and pick the better one.
2190 else if (!ap->eof) {
2191 xfs_fsblock_t gotbno; /* right side block number */
2192 xfs_fsblock_t gotdiff=0; /* right side difference */
2193 xfs_fsblock_t prevbno; /* left side block number */
2194 xfs_fsblock_t prevdiff=0; /* left side difference */
2197 * If there's a previous (left) block, select a requested
2198 * start block based on it.
2200 if (ap->prevp->br_startoff != NULLFILEOFF &&
2201 !isnullstartblock(ap->prevp->br_startblock) &&
2202 (prevbno = ap->prevp->br_startblock +
2203 ap->prevp->br_blockcount) &&
2204 ISVALID(prevbno, ap->prevp->br_startblock)) {
2206 * Calculate gap to end of previous block.
2208 adjust = prevdiff = ap->off -
2209 (ap->prevp->br_startoff +
2210 ap->prevp->br_blockcount);
2212 * Figure the startblock based on the previous block's
2213 * end and the gap size.
2215 * If the gap is large relative to the piece we're
2216 * allocating, or using it gives us an invalid block
2217 * number, then just use the end of the previous block.
2219 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2220 ISVALID(prevbno + prevdiff,
2221 ap->prevp->br_startblock))
2226 * If the firstblock forbids it, can't use it,
2229 if (!rt && !nullfb &&
2230 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2231 prevbno = NULLFSBLOCK;
2234 * No previous block or can't follow it, just default.
2237 prevbno = NULLFSBLOCK;
2239 * If there's a following (right) block, select a requested
2240 * start block based on it.
2242 if (!isnullstartblock(ap->gotp->br_startblock)) {
2244 * Calculate gap to start of next block.
2246 adjust = gotdiff = ap->gotp->br_startoff - ap->off;
2248 * Figure the startblock based on the next block's
2249 * start and the gap size.
2251 gotbno = ap->gotp->br_startblock;
2254 * If the gap is large relative to the piece we're
2255 * allocating, or using it gives us an invalid block
2256 * number, then just use the start of the next block
2257 * offset by our length.
2259 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2260 ISVALID(gotbno - gotdiff, gotbno))
2262 else if (ISVALID(gotbno - ap->alen, gotbno)) {
2264 gotdiff += adjust - ap->alen;
2268 * If the firstblock forbids it, can't use it,
2271 if (!rt && !nullfb &&
2272 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2273 gotbno = NULLFSBLOCK;
2276 * No next block, just default.
2279 gotbno = NULLFSBLOCK;
2281 * If both valid, pick the better one, else the only good
2282 * one, else ap->rval is already set (to 0 or the inode block).
2284 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2285 ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
2286 else if (prevbno != NULLFSBLOCK)
2288 else if (gotbno != NULLFSBLOCK)
2296 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2298 xfs_alloctype_t atype = 0; /* type for allocation routines */
2299 int error; /* error return value */
2300 xfs_mount_t *mp; /* mount point structure */
2301 xfs_extlen_t prod = 0; /* product factor for allocators */
2302 xfs_extlen_t ralen = 0; /* realtime allocation length */
2303 xfs_extlen_t align; /* minimum allocation alignment */
2306 mp = ap->ip->i_mount;
2307 align = xfs_get_extsz_hint(ap->ip);
2308 prod = align / mp->m_sb.sb_rextsize;
2309 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2310 align, 1, ap->eof, 0,
2311 ap->conv, &ap->off, &ap->alen);
2315 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2318 * If the offset & length are not perfectly aligned
2319 * then kill prod, it will just get us in trouble.
2321 if (do_mod(ap->off, align) || ap->alen % align)
2324 * Set ralen to be the actual requested length in rtextents.
2326 ralen = ap->alen / mp->m_sb.sb_rextsize;
2328 * If the old value was close enough to MAXEXTLEN that
2329 * we rounded up to it, cut it back so it's valid again.
2330 * Note that if it's a really large request (bigger than
2331 * MAXEXTLEN), we don't hear about that number, and can't
2332 * adjust the starting point to match it.
2334 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2335 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2338 * Lock out other modifications to the RT bitmap inode.
2340 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
2341 xfs_trans_ijoin_ref(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
2344 * If it's an allocation to an empty file at offset 0,
2345 * pick an extent that will space things out in the rt area.
2347 if (ap->eof && ap->off == 0) {
2348 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2350 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2353 ap->rval = rtx * mp->m_sb.sb_rextsize;
2358 xfs_bmap_adjacent(ap);
2361 * Realtime allocation, done through xfs_rtallocate_extent.
2363 atype = ap->rval == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2364 do_div(ap->rval, mp->m_sb.sb_rextsize);
2367 if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
2368 &ralen, atype, ap->wasdel, prod, &rtb)))
2370 if (rtb == NULLFSBLOCK && prod > 1 &&
2371 (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
2372 ap->alen, &ralen, atype,
2373 ap->wasdel, 1, &rtb)))
2376 if (ap->rval != NULLFSBLOCK) {
2377 ap->rval *= mp->m_sb.sb_rextsize;
2378 ralen *= mp->m_sb.sb_rextsize;
2380 ap->ip->i_d.di_nblocks += ralen;
2381 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2383 ap->ip->i_delayed_blks -= ralen;
2385 * Adjust the disk quota also. This was reserved
2388 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2389 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2390 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2398 xfs_bmap_btalloc_nullfb(
2399 struct xfs_bmalloca *ap,
2400 struct xfs_alloc_arg *args,
2403 struct xfs_mount *mp = ap->ip->i_mount;
2404 struct xfs_perag *pag;
2405 xfs_agnumber_t ag, startag;
2409 if (ap->userdata && xfs_inode_is_filestream(ap->ip))
2410 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2412 args->type = XFS_ALLOCTYPE_START_BNO;
2413 args->total = ap->total;
2416 * Search for an allocation group with a single extent large enough
2417 * for the request. If one isn't found, then adjust the minimum
2418 * allocation size to the largest space found.
2420 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
2421 if (startag == NULLAGNUMBER)
2424 pag = xfs_perag_get(mp, ag);
2425 while (*blen < args->maxlen) {
2426 if (!pag->pagf_init) {
2427 error = xfs_alloc_pagf_init(mp, args->tp, ag,
2428 XFS_ALLOC_FLAG_TRYLOCK);
2436 * See xfs_alloc_fix_freelist...
2438 if (pag->pagf_init) {
2439 xfs_extlen_t longest;
2440 longest = xfs_alloc_longest_free_extent(mp, pag);
2441 if (*blen < longest)
2446 if (xfs_inode_is_filestream(ap->ip)) {
2447 if (*blen >= args->maxlen)
2452 * If startag is an invalid AG, we've
2453 * come here once before and
2454 * xfs_filestream_new_ag picked the
2455 * best currently available.
2457 * Don't continue looping, since we
2458 * could loop forever.
2460 if (startag == NULLAGNUMBER)
2463 error = xfs_filestream_new_ag(ap, &ag);
2468 /* loop again to set 'blen'*/
2469 startag = NULLAGNUMBER;
2470 pag = xfs_perag_get(mp, ag);
2474 if (++ag == mp->m_sb.sb_agcount)
2479 pag = xfs_perag_get(mp, ag);
2484 * Since the above loop did a BUF_TRYLOCK, it is
2485 * possible that there is space for this request.
2487 if (notinit || *blen < ap->minlen)
2488 args->minlen = ap->minlen;
2490 * If the best seen length is less than the request
2491 * length, use the best as the minimum.
2493 else if (*blen < args->maxlen)
2494 args->minlen = *blen;
2496 * Otherwise we've seen an extent as big as maxlen,
2497 * use that as the minimum.
2500 args->minlen = args->maxlen;
2503 * set the failure fallback case to look in the selected
2504 * AG as the stream may have moved.
2506 if (xfs_inode_is_filestream(ap->ip))
2507 ap->rval = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
2514 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2516 xfs_mount_t *mp; /* mount point structure */
2517 xfs_alloctype_t atype = 0; /* type for allocation routines */
2518 xfs_extlen_t align; /* minimum allocation alignment */
2519 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2521 xfs_alloc_arg_t args;
2523 xfs_extlen_t nextminlen = 0;
2524 int nullfb; /* true if ap->firstblock isn't set */
2529 mp = ap->ip->i_mount;
2530 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2531 if (unlikely(align)) {
2532 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2533 align, 0, ap->eof, 0, ap->conv,
2534 &ap->off, &ap->alen);
2538 nullfb = ap->firstblock == NULLFSBLOCK;
2539 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2541 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
2542 ag = xfs_filestream_lookup_ag(ap->ip);
2543 ag = (ag != NULLAGNUMBER) ? ag : 0;
2544 ap->rval = XFS_AGB_TO_FSB(mp, ag, 0);
2546 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2549 ap->rval = ap->firstblock;
2551 xfs_bmap_adjacent(ap);
2554 * If allowed, use ap->rval; otherwise must use firstblock since
2555 * it's in the right allocation group.
2557 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
2560 ap->rval = ap->firstblock;
2562 * Normal allocation, done through xfs_alloc_vextent.
2564 tryagain = isaligned = 0;
2567 args.fsbno = ap->rval;
2569 /* Trim the allocation back to the maximum an AG can fit. */
2570 args.maxlen = MIN(ap->alen, XFS_ALLOC_AG_MAX_USABLE(mp));
2571 args.firstblock = ap->firstblock;
2574 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
2577 } else if (ap->low) {
2578 if (xfs_inode_is_filestream(ap->ip))
2579 args.type = XFS_ALLOCTYPE_FIRST_AG;
2581 args.type = XFS_ALLOCTYPE_START_BNO;
2582 args.total = args.minlen = ap->minlen;
2584 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2585 args.total = ap->total;
2586 args.minlen = ap->minlen;
2588 /* apply extent size hints if obtained earlier */
2589 if (unlikely(align)) {
2591 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2592 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2593 } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
2597 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
2598 if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2599 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2602 * If we are not low on available data blocks, and the
2603 * underlying logical volume manager is a stripe, and
2604 * the file offset is zero then try to allocate data
2605 * blocks on stripe unit boundary.
2606 * NOTE: ap->aeof is only set if the allocation length
2607 * is >= the stripe unit and the allocation offset is
2608 * at the end of file.
2610 if (!ap->low && ap->aeof) {
2612 args.alignment = mp->m_dalign;
2616 * Adjust for alignment
2618 if (blen > args.alignment && blen <= args.maxlen)
2619 args.minlen = blen - args.alignment;
2620 args.minalignslop = 0;
2623 * First try an exact bno allocation.
2624 * If it fails then do a near or start bno
2625 * allocation with alignment turned on.
2629 args.type = XFS_ALLOCTYPE_THIS_BNO;
2632 * Compute the minlen+alignment for the
2633 * next case. Set slop so that the value
2634 * of minlen+alignment+slop doesn't go up
2635 * between the calls.
2637 if (blen > mp->m_dalign && blen <= args.maxlen)
2638 nextminlen = blen - mp->m_dalign;
2640 nextminlen = args.minlen;
2641 if (nextminlen + mp->m_dalign > args.minlen + 1)
2643 nextminlen + mp->m_dalign -
2646 args.minalignslop = 0;
2650 args.minalignslop = 0;
2652 args.minleft = ap->minleft;
2653 args.wasdel = ap->wasdel;
2655 args.userdata = ap->userdata;
2656 if ((error = xfs_alloc_vextent(&args)))
2658 if (tryagain && args.fsbno == NULLFSBLOCK) {
2660 * Exact allocation failed. Now try with alignment
2664 args.fsbno = ap->rval;
2665 args.alignment = mp->m_dalign;
2666 args.minlen = nextminlen;
2667 args.minalignslop = 0;
2669 if ((error = xfs_alloc_vextent(&args)))
2672 if (isaligned && args.fsbno == NULLFSBLOCK) {
2674 * allocation failed, so turn off alignment and
2678 args.fsbno = ap->rval;
2680 if ((error = xfs_alloc_vextent(&args)))
2683 if (args.fsbno == NULLFSBLOCK && nullfb &&
2684 args.minlen > ap->minlen) {
2685 args.minlen = ap->minlen;
2686 args.type = XFS_ALLOCTYPE_START_BNO;
2687 args.fsbno = ap->rval;
2688 if ((error = xfs_alloc_vextent(&args)))
2691 if (args.fsbno == NULLFSBLOCK && nullfb) {
2693 args.type = XFS_ALLOCTYPE_FIRST_AG;
2694 args.total = ap->minlen;
2696 if ((error = xfs_alloc_vextent(&args)))
2700 if (args.fsbno != NULLFSBLOCK) {
2701 ap->firstblock = ap->rval = args.fsbno;
2702 ASSERT(nullfb || fb_agno == args.agno ||
2703 (ap->low && fb_agno < args.agno));
2704 ap->alen = args.len;
2705 ap->ip->i_d.di_nblocks += args.len;
2706 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2708 ap->ip->i_delayed_blks -= args.len;
2710 * Adjust the disk quota also. This was reserved
2713 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2714 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2715 XFS_TRANS_DQ_BCOUNT,
2718 ap->rval = NULLFSBLOCK;
2725 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2726 * It figures out where to ask the underlying allocator to put the new extent.
2730 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2732 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
2733 return xfs_bmap_rtalloc(ap);
2734 return xfs_bmap_btalloc(ap);
2738 * Transform a btree format file with only one leaf node, where the
2739 * extents list will fit in the inode, into an extents format file.
2740 * Since the file extents are already in-core, all we have to do is
2741 * give up the space for the btree root and pitch the leaf block.
2743 STATIC int /* error */
2744 xfs_bmap_btree_to_extents(
2745 xfs_trans_t *tp, /* transaction pointer */
2746 xfs_inode_t *ip, /* incore inode pointer */
2747 xfs_btree_cur_t *cur, /* btree cursor */
2748 int *logflagsp, /* inode logging flags */
2749 int whichfork) /* data or attr fork */
2752 struct xfs_btree_block *cblock;/* child btree block */
2753 xfs_fsblock_t cbno; /* child block number */
2754 xfs_buf_t *cbp; /* child block's buffer */
2755 int error; /* error return value */
2756 xfs_ifork_t *ifp; /* inode fork data */
2757 xfs_mount_t *mp; /* mount point structure */
2758 __be64 *pp; /* ptr to block address */
2759 struct xfs_btree_block *rblock;/* root btree block */
2762 ifp = XFS_IFORK_PTR(ip, whichfork);
2763 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2764 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2765 rblock = ifp->if_broot;
2766 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
2767 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
2768 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
2769 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
2770 cbno = be64_to_cpu(*pp);
2773 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
2776 if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
2777 XFS_BMAP_BTREE_REF)))
2779 cblock = XFS_BUF_TO_BLOCK(cbp);
2780 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
2782 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
2783 ip->i_d.di_nblocks--;
2784 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
2785 xfs_trans_binval(tp, cbp);
2786 if (cur->bc_bufs[0] == cbp)
2787 cur->bc_bufs[0] = NULL;
2788 xfs_iroot_realloc(ip, -1, whichfork);
2789 ASSERT(ifp->if_broot == NULL);
2790 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
2791 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
2792 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2797 * Called by xfs_bmapi to update file extent records and the btree
2798 * after removing space (or undoing a delayed allocation).
2800 STATIC int /* error */
2801 xfs_bmap_del_extent(
2802 xfs_inode_t *ip, /* incore inode pointer */
2803 xfs_trans_t *tp, /* current transaction pointer */
2804 xfs_extnum_t *idx, /* extent number to update/delete */
2805 xfs_bmap_free_t *flist, /* list of extents to be freed */
2806 xfs_btree_cur_t *cur, /* if null, not a btree */
2807 xfs_bmbt_irec_t *del, /* data to remove from extents */
2808 int *logflagsp, /* inode logging flags */
2809 int whichfork) /* data or attr fork */
2811 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
2812 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
2813 xfs_fsblock_t del_endblock=0; /* first block past del */
2814 xfs_fileoff_t del_endoff; /* first offset past del */
2815 int delay; /* current block is delayed allocated */
2816 int do_fx; /* free extent at end of routine */
2817 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
2818 int error; /* error return value */
2819 int flags; /* inode logging flags */
2820 xfs_bmbt_irec_t got; /* current extent entry */
2821 xfs_fileoff_t got_endoff; /* first offset past got */
2822 int i; /* temp state */
2823 xfs_ifork_t *ifp; /* inode fork pointer */
2824 xfs_mount_t *mp; /* mount structure */
2825 xfs_filblks_t nblks; /* quota/sb block count */
2826 xfs_bmbt_irec_t new; /* new record to be inserted */
2828 uint qfield; /* quota field to update */
2829 xfs_filblks_t temp; /* for indirect length calculations */
2830 xfs_filblks_t temp2; /* for indirect length calculations */
2833 XFS_STATS_INC(xs_del_exlist);
2835 if (whichfork == XFS_ATTR_FORK)
2836 state |= BMAP_ATTRFORK;
2839 ifp = XFS_IFORK_PTR(ip, whichfork);