2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_btree_trace.h"
37 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_error.h"
41 #include "xfs_itable.h"
42 #include "xfs_fsops.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_utils.h"
46 #include "xfs_vnodeops.h"
47 #include "xfs_log_priv.h"
48 #include "xfs_trans_priv.h"
49 #include "xfs_filestream.h"
50 #include "xfs_da_btree.h"
51 #include "xfs_extfree_item.h"
52 #include "xfs_mru_cache.h"
53 #include "xfs_inode_item.h"
55 #include "xfs_trace.h"
57 #include <linux/namei.h>
58 #include <linux/init.h>
59 #include <linux/slab.h>
60 #include <linux/mount.h>
61 #include <linux/mempool.h>
62 #include <linux/writeback.h>
63 #include <linux/kthread.h>
64 #include <linux/freezer.h>
65 #include <linux/parser.h>
67 static const struct super_operations xfs_super_operations;
68 static kmem_zone_t *xfs_ioend_zone;
69 mempool_t *xfs_ioend_pool;
71 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
72 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
73 #define MNTOPT_LOGDEV "logdev" /* log device */
74 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
75 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
76 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
77 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
78 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
79 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
80 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
81 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
82 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */
83 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
84 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
85 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
86 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
87 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
88 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
89 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
90 * unwritten extent conversion */
91 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
92 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
93 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
94 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
95 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
96 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
98 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
99 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
100 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
101 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */
102 #define MNTOPT_NOQUOTA "noquota" /* no quotas */
103 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
104 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
105 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
106 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
107 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
108 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
109 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
110 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
111 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
112 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
113 #define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */
114 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
117 * Table driven mount option parser.
119 * Currently only used for remount, but it will be used for mount
120 * in the future, too.
123 Opt_barrier, Opt_nobarrier, Opt_err
126 static const match_table_t tokens = {
127 {Opt_barrier, "barrier"},
128 {Opt_nobarrier, "nobarrier"},
134 suffix_strtoul(char *s, char **endp, unsigned int base)
136 int last, shift_left_factor = 0;
139 last = strlen(value) - 1;
140 if (value[last] == 'K' || value[last] == 'k') {
141 shift_left_factor = 10;
144 if (value[last] == 'M' || value[last] == 'm') {
145 shift_left_factor = 20;
148 if (value[last] == 'G' || value[last] == 'g') {
149 shift_left_factor = 30;
153 return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
157 * This function fills in xfs_mount_t fields based on mount args.
158 * Note: the superblock has _not_ yet been read in.
160 * Note that this function leaks the various device name allocations on
161 * failure. The caller takes care of them.
165 struct xfs_mount *mp,
168 struct super_block *sb = mp->m_super;
169 char *this_char, *value, *eov;
173 __uint8_t iosizelog = 0;
176 * set up the mount name first so all the errors will refer to the
179 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
182 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
185 * Copy binary VFS mount flags we are interested in.
187 if (sb->s_flags & MS_RDONLY)
188 mp->m_flags |= XFS_MOUNT_RDONLY;
189 if (sb->s_flags & MS_DIRSYNC)
190 mp->m_flags |= XFS_MOUNT_DIRSYNC;
191 if (sb->s_flags & MS_SYNCHRONOUS)
192 mp->m_flags |= XFS_MOUNT_WSYNC;
195 * Set some default flags that could be cleared by the mount option
198 mp->m_flags |= XFS_MOUNT_BARRIER;
199 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
200 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
201 mp->m_flags |= XFS_MOUNT_DELAYLOG;
204 * These can be overridden by the mount option parsing.
212 while ((this_char = strsep(&options, ",")) != NULL) {
215 if ((value = strchr(this_char, '=')) != NULL)
218 if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
219 if (!value || !*value) {
220 xfs_warn(mp, "%s option requires an argument",
224 mp->m_logbufs = simple_strtoul(value, &eov, 10);
225 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
226 if (!value || !*value) {
227 xfs_warn(mp, "%s option requires an argument",
231 mp->m_logbsize = suffix_strtoul(value, &eov, 10);
232 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
233 if (!value || !*value) {
234 xfs_warn(mp, "%s option requires an argument",
238 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
241 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
242 xfs_warn(mp, "%s option not allowed on this system",
245 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
246 if (!value || !*value) {
247 xfs_warn(mp, "%s option requires an argument",
251 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
254 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
255 if (!value || !*value) {
256 xfs_warn(mp, "%s option requires an argument",
260 iosize = simple_strtoul(value, &eov, 10);
261 iosizelog = ffs(iosize) - 1;
262 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
263 if (!value || !*value) {
264 xfs_warn(mp, "%s option requires an argument",
268 iosize = suffix_strtoul(value, &eov, 10);
269 iosizelog = ffs(iosize) - 1;
270 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
271 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
272 mp->m_flags |= XFS_MOUNT_GRPID;
273 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
274 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
275 mp->m_flags &= ~XFS_MOUNT_GRPID;
276 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
277 mp->m_flags |= XFS_MOUNT_WSYNC;
278 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
279 mp->m_flags |= XFS_MOUNT_NORECOVERY;
280 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
281 mp->m_flags |= XFS_MOUNT_NOALIGN;
282 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
283 mp->m_flags |= XFS_MOUNT_SWALLOC;
284 } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
285 if (!value || !*value) {
286 xfs_warn(mp, "%s option requires an argument",
290 dsunit = simple_strtoul(value, &eov, 10);
291 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
292 if (!value || !*value) {
293 xfs_warn(mp, "%s option requires an argument",
297 dswidth = simple_strtoul(value, &eov, 10);
298 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
299 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
301 xfs_warn(mp, "%s option not allowed on this system",
305 } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
306 mp->m_flags |= XFS_MOUNT_NOUUID;
307 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
308 mp->m_flags |= XFS_MOUNT_BARRIER;
309 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
310 mp->m_flags &= ~XFS_MOUNT_BARRIER;
311 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
312 mp->m_flags |= XFS_MOUNT_IKEEP;
313 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
314 mp->m_flags &= ~XFS_MOUNT_IKEEP;
315 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
316 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
317 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
318 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
319 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
320 mp->m_flags |= XFS_MOUNT_ATTR2;
321 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
322 mp->m_flags &= ~XFS_MOUNT_ATTR2;
323 mp->m_flags |= XFS_MOUNT_NOATTR2;
324 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
325 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
326 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
327 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
328 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
329 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
330 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
331 } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
332 !strcmp(this_char, MNTOPT_UQUOTA) ||
333 !strcmp(this_char, MNTOPT_USRQUOTA)) {
334 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
336 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
337 !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
338 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
339 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
340 } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
341 !strcmp(this_char, MNTOPT_PRJQUOTA)) {
342 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
344 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
345 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
346 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
347 } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
348 !strcmp(this_char, MNTOPT_GRPQUOTA)) {
349 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
351 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
352 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
353 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
355 mp->m_flags |= XFS_MOUNT_DELAYLOG;
356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
357 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
358 } else if (!strcmp(this_char, "ihashsize")) {
360 "ihashsize no longer used, option is deprecated.");
361 } else if (!strcmp(this_char, "osyncisdsync")) {
363 "osyncisdsync has no effect, option is deprecated.");
364 } else if (!strcmp(this_char, "osyncisosync")) {
366 "osyncisosync has no effect, option is deprecated.");
367 } else if (!strcmp(this_char, "irixsgid")) {
369 "irixsgid is now a sysctl(2) variable, option is deprecated.");
371 xfs_warn(mp, "unknown mount option [%s].", this_char);
377 * no recovery flag requires a read-only mount
379 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
380 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
381 xfs_warn(mp, "no-recovery mounts must be read-only.");
385 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
387 "sunit and swidth options incompatible with the noalign option");
391 #ifndef CONFIG_XFS_QUOTA
392 if (XFS_IS_QUOTA_RUNNING(mp)) {
393 xfs_warn(mp, "quota support not available in this kernel.");
398 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
399 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
400 xfs_warn(mp, "cannot mount with both project and group quota");
404 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
405 xfs_warn(mp, "sunit and swidth must be specified together");
409 if (dsunit && (dswidth % dsunit != 0)) {
411 "stripe width (%d) must be a multiple of the stripe unit (%d)",
417 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
419 * At this point the superblock has not been read
420 * in, therefore we do not know the block size.
421 * Before the mount call ends we will convert
425 mp->m_dalign = dsunit;
426 mp->m_flags |= XFS_MOUNT_RETERR;
430 mp->m_swidth = dswidth;
433 if (mp->m_logbufs != -1 &&
434 mp->m_logbufs != 0 &&
435 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
436 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
437 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
438 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
439 return XFS_ERROR(EINVAL);
441 if (mp->m_logbsize != -1 &&
442 mp->m_logbsize != 0 &&
443 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
444 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
445 !is_power_of_2(mp->m_logbsize))) {
447 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
449 return XFS_ERROR(EINVAL);
453 if (iosizelog > XFS_MAX_IO_LOG ||
454 iosizelog < XFS_MIN_IO_LOG) {
455 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
456 iosizelog, XFS_MIN_IO_LOG,
458 return XFS_ERROR(EINVAL);
461 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
462 mp->m_readio_log = iosizelog;
463 mp->m_writeio_log = iosizelog;
469 struct proc_xfs_info {
476 struct xfs_mount *mp,
479 static struct proc_xfs_info xfs_info_set[] = {
480 /* the few simple ones we can get from the mount struct */
481 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
482 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
483 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
484 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
485 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
486 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
487 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
488 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
489 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
490 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
493 static struct proc_xfs_info xfs_info_unset[] = {
494 /* the few simple ones we can get from the mount struct */
495 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
496 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
497 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
500 struct proc_xfs_info *xfs_infop;
502 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
503 if (mp->m_flags & xfs_infop->flag)
504 seq_puts(m, xfs_infop->str);
506 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
507 if (!(mp->m_flags & xfs_infop->flag))
508 seq_puts(m, xfs_infop->str);
511 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
512 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
513 (int)(1 << mp->m_writeio_log) >> 10);
515 if (mp->m_logbufs > 0)
516 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
517 if (mp->m_logbsize > 0)
518 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
521 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
523 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
525 if (mp->m_dalign > 0)
526 seq_printf(m, "," MNTOPT_SUNIT "=%d",
527 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
528 if (mp->m_swidth > 0)
529 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
530 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
532 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
533 seq_puts(m, "," MNTOPT_USRQUOTA);
534 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
535 seq_puts(m, "," MNTOPT_UQUOTANOENF);
537 /* Either project or group quotas can be active, not both */
539 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
540 if (mp->m_qflags & XFS_OQUOTA_ENFD)
541 seq_puts(m, "," MNTOPT_PRJQUOTA);
543 seq_puts(m, "," MNTOPT_PQUOTANOENF);
544 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
545 if (mp->m_qflags & XFS_OQUOTA_ENFD)
546 seq_puts(m, "," MNTOPT_GRPQUOTA);
548 seq_puts(m, "," MNTOPT_GQUOTANOENF);
551 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
552 seq_puts(m, "," MNTOPT_NOQUOTA);
558 unsigned int blockshift)
560 unsigned int pagefactor = 1;
561 unsigned int bitshift = BITS_PER_LONG - 1;
563 /* Figure out maximum filesize, on Linux this can depend on
564 * the filesystem blocksize (on 32 bit platforms).
565 * __block_write_begin does this in an [unsigned] long...
566 * page->index << (PAGE_CACHE_SHIFT - bbits)
567 * So, for page sized blocks (4K on 32 bit platforms),
568 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
569 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
570 * but for smaller blocksizes it is less (bbits = log2 bsize).
571 * Note1: get_block_t takes a long (implicit cast from above)
572 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
573 * can optionally convert the [unsigned] long from above into
574 * an [unsigned] long long.
577 #if BITS_PER_LONG == 32
578 # if defined(CONFIG_LBDAF)
579 ASSERT(sizeof(sector_t) == 8);
580 pagefactor = PAGE_CACHE_SIZE;
581 bitshift = BITS_PER_LONG;
583 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
587 return (((__uint64_t)pagefactor) << bitshift) - 1;
594 struct block_device **bdevp)
598 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
600 if (IS_ERR(*bdevp)) {
601 error = PTR_ERR(*bdevp);
602 xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
610 struct block_device *bdev)
613 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
617 * Try to write out the superblock using barriers.
623 xfs_buf_t *sbp = xfs_getsb(mp, 0);
628 XFS_BUF_UNDELAYWRITE(sbp);
630 XFS_BUF_UNASYNC(sbp);
631 XFS_BUF_ORDERED(sbp);
634 error = xfs_buf_iowait(sbp);
637 * Clear all the flags we set and possible error state in the
638 * buffer. We only did the write to try out whether barriers
639 * worked and shouldn't leave any traces in the superblock
643 XFS_BUF_ERROR(sbp, 0);
644 XFS_BUF_UNORDERED(sbp);
651 xfs_mountfs_check_barriers(xfs_mount_t *mp)
655 if (mp->m_logdev_targp != mp->m_ddev_targp) {
657 "Disabling barriers, not supported with external log device");
658 mp->m_flags &= ~XFS_MOUNT_BARRIER;
662 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
664 "Disabling barriers, underlying device is readonly");
665 mp->m_flags &= ~XFS_MOUNT_BARRIER;
669 error = xfs_barrier_test(mp);
672 "Disabling barriers, trial barrier write failed");
673 mp->m_flags &= ~XFS_MOUNT_BARRIER;
679 xfs_blkdev_issue_flush(
680 xfs_buftarg_t *buftarg)
682 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
687 struct xfs_mount *mp)
689 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
690 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
691 xfs_free_buftarg(mp, mp->m_logdev_targp);
692 xfs_blkdev_put(logdev);
694 if (mp->m_rtdev_targp) {
695 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
696 xfs_free_buftarg(mp, mp->m_rtdev_targp);
697 xfs_blkdev_put(rtdev);
699 xfs_free_buftarg(mp, mp->m_ddev_targp);
703 * The file system configurations are:
704 * (1) device (partition) with data and internal log
705 * (2) logical volume with data and log subvolumes.
706 * (3) logical volume with data, log, and realtime subvolumes.
708 * We only have to handle opening the log and realtime volumes here if
709 * they are present. The data subvolume has already been opened by
710 * get_sb_bdev() and is stored in sb->s_bdev.
714 struct xfs_mount *mp)
716 struct block_device *ddev = mp->m_super->s_bdev;
717 struct block_device *logdev = NULL, *rtdev = NULL;
721 * Open real time and log devices - order is important.
724 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
730 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
732 goto out_close_logdev;
734 if (rtdev == ddev || rtdev == logdev) {
736 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
738 goto out_close_rtdev;
743 * Setup xfs_mount buffer target pointers
746 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
747 if (!mp->m_ddev_targp)
748 goto out_close_rtdev;
751 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
753 if (!mp->m_rtdev_targp)
754 goto out_free_ddev_targ;
757 if (logdev && logdev != ddev) {
758 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
760 if (!mp->m_logdev_targp)
761 goto out_free_rtdev_targ;
763 mp->m_logdev_targp = mp->m_ddev_targp;
769 if (mp->m_rtdev_targp)
770 xfs_free_buftarg(mp, mp->m_rtdev_targp);
772 xfs_free_buftarg(mp, mp->m_ddev_targp);
775 xfs_blkdev_put(rtdev);
777 if (logdev && logdev != ddev)
778 xfs_blkdev_put(logdev);
784 * Setup xfs_mount buffer target pointers based on superblock
788 struct xfs_mount *mp)
792 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
793 mp->m_sb.sb_sectsize);
797 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
798 unsigned int log_sector_size = BBSIZE;
800 if (xfs_sb_version_hassector(&mp->m_sb))
801 log_sector_size = mp->m_sb.sb_logsectsize;
802 error = xfs_setsize_buftarg(mp->m_logdev_targp,
803 mp->m_sb.sb_blocksize,
808 if (mp->m_rtdev_targp) {
809 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
810 mp->m_sb.sb_blocksize,
811 mp->m_sb.sb_sectsize);
820 * XFS AIL push thread support
824 struct xfs_ail *ailp,
825 xfs_lsn_t threshold_lsn)
827 /* only ever move the target forwards */
828 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
829 ailp->xa_target = threshold_lsn;
830 wake_up_process(ailp->xa_task);
838 struct xfs_ail *ailp = data;
839 xfs_lsn_t last_pushed_lsn = 0;
840 long tout = 0; /* milliseconds */
842 while (!kthread_should_stop()) {
844 * for short sleeps indicating congestion, don't allow us to
845 * get woken early. Otherwise all we do is bang on the AIL lock
846 * without making progress.
848 if (tout && tout <= 20)
849 __set_current_state(TASK_KILLABLE);
851 __set_current_state(TASK_INTERRUPTIBLE);
852 schedule_timeout(tout ?
853 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
858 ASSERT(ailp->xa_mount->m_log);
859 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
862 tout = xfsaild_push(ailp, &last_pushed_lsn);
870 struct xfs_ail *ailp)
873 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
874 ailp->xa_mount->m_fsname);
875 if (IS_ERR(ailp->xa_task))
876 return -PTR_ERR(ailp->xa_task);
882 struct xfs_ail *ailp)
884 kthread_stop(ailp->xa_task);
888 /* Catch misguided souls that try to use this interface on XFS */
889 STATIC struct inode *
891 struct super_block *sb)
898 * Now that the generic code is guaranteed not to be accessing
899 * the linux inode, we can reclaim the inode.
902 xfs_fs_destroy_inode(
905 struct xfs_inode *ip = XFS_I(inode);
907 trace_xfs_destroy_inode(ip);
909 XFS_STATS_INC(vn_reclaim);
911 /* bad inode, get out here ASAP */
912 if (is_bad_inode(inode))
917 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
920 * We should never get here with one of the reclaim flags already set.
922 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
923 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
926 * We always use background reclaim here because even if the
927 * inode is clean, it still may be under IO and hence we have
928 * to take the flush lock. The background reclaim path handles
929 * this more efficiently than we can here, so simply let background
930 * reclaim tear down all inodes.
933 xfs_inode_set_reclaim_tag(ip);
937 * Slab object creation initialisation for the XFS inode.
938 * This covers only the idempotent fields in the XFS inode;
939 * all other fields need to be initialised on allocation
940 * from the slab. This avoids the need to repeatedly initialise
941 * fields in the xfs inode that left in the initialise state
942 * when freeing the inode.
945 xfs_fs_inode_init_once(
948 struct xfs_inode *ip = inode;
950 memset(ip, 0, sizeof(struct xfs_inode));
953 inode_init_once(VFS_I(ip));
956 atomic_set(&ip->i_iocount, 0);
957 atomic_set(&ip->i_pincount, 0);
958 spin_lock_init(&ip->i_flags_lock);
959 init_waitqueue_head(&ip->i_ipin_wait);
961 * Because we want to use a counting completion, complete
962 * the flush completion once to allow a single access to
963 * the flush completion without blocking.
965 init_completion(&ip->i_flush);
966 complete(&ip->i_flush);
968 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
969 "xfsino", ip->i_ino);
973 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
974 * we catch unlogged VFS level updates to the inode.
976 * We need the barrier() to maintain correct ordering between unlogged
977 * updates and the transaction commit code that clears the i_update_core
978 * field. This requires all updates to be completed before marking the
986 XFS_I(inode)->i_update_core = 1;
991 struct xfs_inode *ip)
993 struct xfs_mount *mp = ip->i_mount;
994 struct xfs_trans *tp;
997 xfs_iunlock(ip, XFS_ILOCK_SHARED);
998 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
999 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
1002 xfs_trans_cancel(tp, 0);
1003 /* we need to return with the lock hold shared */
1004 xfs_ilock(ip, XFS_ILOCK_SHARED);
1008 xfs_ilock(ip, XFS_ILOCK_EXCL);
1011 * Note - it's possible that we might have pushed ourselves out of the
1012 * way during trans_reserve which would flush the inode. But there's
1013 * no guarantee that the inode buffer has actually gone out yet (it's
1014 * delwri). Plus the buffer could be pinned anyway if it's part of
1015 * an inode in another recent transaction. So we play it safe and
1016 * fire off the transaction anyway.
1018 xfs_trans_ijoin(tp, ip);
1019 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1020 error = xfs_trans_commit(tp, 0);
1021 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
1028 struct inode *inode,
1029 struct writeback_control *wbc)
1031 struct xfs_inode *ip = XFS_I(inode);
1032 struct xfs_mount *mp = ip->i_mount;
1035 trace_xfs_write_inode(ip);
1037 if (XFS_FORCED_SHUTDOWN(mp))
1038 return XFS_ERROR(EIO);
1040 if (wbc->sync_mode == WB_SYNC_ALL) {
1042 * Make sure the inode has made it it into the log. Instead
1043 * of forcing it all the way to stable storage using a
1044 * synchronous transaction we let the log force inside the
1045 * ->sync_fs call do that for thus, which reduces the number
1046 * of synchronous log foces dramatically.
1049 xfs_ilock(ip, XFS_ILOCK_SHARED);
1050 if (ip->i_update_core) {
1051 error = xfs_log_inode(ip);
1057 * We make this non-blocking if the inode is contended, return
1058 * EAGAIN to indicate to the caller that they did not succeed.
1059 * This prevents the flush path from blocking on inodes inside
1060 * another operation right now, they get caught later by
1063 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1066 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1070 * Now we have the flush lock and the inode is not pinned, we
1071 * can check if the inode is really clean as we know that
1072 * there are no pending transaction completions, it is not
1073 * waiting on the delayed write queue and there is no IO in
1076 if (xfs_inode_clean(ip)) {
1081 error = xfs_iflush(ip, SYNC_TRYLOCK);
1085 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1088 * if we failed to write out the inode then mark
1089 * it dirty again so we'll try again later.
1092 xfs_mark_inode_dirty_sync(ip);
1098 struct inode *inode)
1100 xfs_inode_t *ip = XFS_I(inode);
1102 trace_xfs_evict_inode(ip);
1104 truncate_inode_pages(&inode->i_data, 0);
1105 end_writeback(inode);
1106 XFS_STATS_INC(vn_rele);
1107 XFS_STATS_INC(vn_remove);
1108 XFS_STATS_DEC(vn_active);
1111 * The iolock is used by the file system to coordinate reads,
1112 * writes, and block truncates. Up to this point the lock
1113 * protected concurrent accesses by users of the inode. But
1114 * from here forward we're doing some final processing of the
1115 * inode because we're done with it, and although we reuse the
1116 * iolock for protection it is really a distinct lock class
1117 * (in the lockdep sense) from before. To keep lockdep happy
1118 * (and basically indicate what we are doing), we explicitly
1119 * re-init the iolock here.
1121 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1122 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1123 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
1124 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
1131 struct xfs_mount *mp)
1133 kfree(mp->m_fsname);
1134 kfree(mp->m_rtname);
1135 kfree(mp->m_logname);
1140 struct super_block *sb)
1142 struct xfs_mount *mp = XFS_M(sb);
1145 * Unregister the memory shrinker before we tear down the mount
1146 * structure so we don't have memory reclaim racing with us here.
1148 xfs_inode_shrinker_unregister(mp);
1152 * Blow away any referenced inode in the filestreams cache.
1153 * This can and will cause log traffic as inodes go inactive
1156 xfs_filestream_unmount(mp);
1158 XFS_bflush(mp->m_ddev_targp);
1162 xfs_icsb_destroy_counters(mp);
1163 xfs_close_devices(mp);
1164 xfs_free_fsname(mp);
1170 struct super_block *sb,
1173 struct xfs_mount *mp = XFS_M(sb);
1177 * Not much we can do for the first async pass. Writing out the
1178 * superblock would be counter-productive as we are going to redirty
1179 * when writing out other data and metadata (and writing out a single
1180 * block is quite fast anyway).
1182 * Try to asynchronously kick off quota syncing at least.
1185 xfs_qm_sync(mp, SYNC_TRYLOCK);
1189 error = xfs_quiesce_data(mp);
1194 int prev_sync_seq = mp->m_sync_seq;
1197 * The disk must be active because we're syncing.
1198 * We schedule xfssyncd now (now that the disk is
1199 * active) instead of later (when it might not be).
1201 wake_up_process(mp->m_sync_task);
1203 * We have to wait for the sync iteration to complete.
1204 * If we don't, the disk activity caused by the sync
1205 * will come after the sync is completed, and that
1206 * triggers another sync from laptop mode.
1208 wait_event(mp->m_wait_single_sync_task,
1209 mp->m_sync_seq != prev_sync_seq);
1217 struct dentry *dentry,
1218 struct kstatfs *statp)
1220 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1221 xfs_sb_t *sbp = &mp->m_sb;
1222 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1223 __uint64_t fakeinos, id;
1227 statp->f_type = XFS_SB_MAGIC;
1228 statp->f_namelen = MAXNAMELEN - 1;
1230 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1231 statp->f_fsid.val[0] = (u32)id;
1232 statp->f_fsid.val[1] = (u32)(id >> 32);
1234 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
1236 spin_lock(&mp->m_sb_lock);
1237 statp->f_bsize = sbp->sb_blocksize;
1238 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1239 statp->f_blocks = sbp->sb_dblocks - lsize;
1240 statp->f_bfree = statp->f_bavail =
1241 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1242 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1244 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1245 if (mp->m_maxicount)
1246 statp->f_files = min_t(typeof(statp->f_files),
1250 /* make sure statp->f_ffree does not underflow */
1251 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1252 statp->f_ffree = max_t(__int64_t, ffree, 0);
1254 spin_unlock(&mp->m_sb_lock);
1256 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1257 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1258 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1259 xfs_qm_statvfs(ip, statp);
1264 xfs_save_resvblks(struct xfs_mount *mp)
1266 __uint64_t resblks = 0;
1268 mp->m_resblks_save = mp->m_resblks;
1269 xfs_reserve_blocks(mp, &resblks, NULL);
1273 xfs_restore_resvblks(struct xfs_mount *mp)
1277 if (mp->m_resblks_save) {
1278 resblks = mp->m_resblks_save;
1279 mp->m_resblks_save = 0;
1281 resblks = xfs_default_resblks(mp);
1283 xfs_reserve_blocks(mp, &resblks, NULL);
1288 struct super_block *sb,
1292 struct xfs_mount *mp = XFS_M(sb);
1293 substring_t args[MAX_OPT_ARGS];
1297 while ((p = strsep(&options, ",")) != NULL) {
1303 token = match_token(p, tokens, args);
1306 mp->m_flags |= XFS_MOUNT_BARRIER;
1309 * Test if barriers are actually working if we can,
1310 * else delay this check until the filesystem is
1313 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
1314 xfs_mountfs_check_barriers(mp);
1317 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1321 * Logically we would return an error here to prevent
1322 * users from believing they might have changed
1323 * mount options using remount which can't be changed.
1325 * But unfortunately mount(8) adds all options from
1326 * mtab and fstab to the mount arguments in some cases
1327 * so we can't blindly reject options, but have to
1328 * check for each specified option if it actually
1329 * differs from the currently set option and only
1330 * reject it if that's the case.
1332 * Until that is implemented we return success for
1333 * every remount request, and silently ignore all
1334 * options that we can't actually change.
1338 "mount option \"%s\" not supported for remount\n", p);
1347 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1348 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1349 if (mp->m_flags & XFS_MOUNT_BARRIER)
1350 xfs_mountfs_check_barriers(mp);
1353 * If this is the first remount to writeable state we
1354 * might have some superblock changes to update.
1356 if (mp->m_update_flags) {
1357 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1359 xfs_warn(mp, "failed to write sb changes");
1362 mp->m_update_flags = 0;
1366 * Fill out the reserve pool if it is empty. Use the stashed
1367 * value if it is non-zero, otherwise go with the default.
1369 xfs_restore_resvblks(mp);
1373 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1375 * After we have synced the data but before we sync the
1376 * metadata, we need to free up the reserve block pool so that
1377 * the used block count in the superblock on disk is correct at
1378 * the end of the remount. Stash the current reserve pool size
1379 * so that if we get remounted rw, we can return it to the same
1383 xfs_quiesce_data(mp);
1384 xfs_save_resvblks(mp);
1385 xfs_quiesce_attr(mp);
1386 mp->m_flags |= XFS_MOUNT_RDONLY;
1393 * Second stage of a freeze. The data is already frozen so we only
1394 * need to take care of the metadata. Once that's done write a dummy
1395 * record to dirty the log in case of a crash while frozen.
1399 struct super_block *sb)
1401 struct xfs_mount *mp = XFS_M(sb);
1403 xfs_save_resvblks(mp);
1404 xfs_quiesce_attr(mp);
1405 return -xfs_fs_log_dummy(mp);
1410 struct super_block *sb)
1412 struct xfs_mount *mp = XFS_M(sb);
1414 xfs_restore_resvblks(mp);
1419 xfs_fs_show_options(
1421 struct vfsmount *mnt)
1423 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1427 * This function fills in xfs_mount_t fields based on mount args.
1428 * Note: the superblock _has_ now been read in.
1432 struct xfs_mount *mp)
1434 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1436 /* Fail a mount where the logbuf is smaller than the log stripe */
1437 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1438 if (mp->m_logbsize <= 0 &&
1439 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1440 mp->m_logbsize = mp->m_sb.sb_logsunit;
1441 } else if (mp->m_logbsize > 0 &&
1442 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1444 "logbuf size must be greater than or equal to log stripe size");
1445 return XFS_ERROR(EINVAL);
1448 /* Fail a mount if the logbuf is larger than 32K */
1449 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1451 "logbuf size for version 1 logs must be 16K or 32K");
1452 return XFS_ERROR(EINVAL);
1457 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1458 * told by noattr2 to turn it off
1460 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1461 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1462 mp->m_flags |= XFS_MOUNT_ATTR2;
1465 * prohibit r/w mounts of read-only filesystems
1467 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1469 "cannot mount a read-only filesystem as read-write");
1470 return XFS_ERROR(EROFS);
1478 struct super_block *sb,
1483 struct xfs_mount *mp = NULL;
1484 int flags = 0, error = ENOMEM;
1486 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1490 spin_lock_init(&mp->m_sb_lock);
1491 mutex_init(&mp->m_growlock);
1492 atomic_set(&mp->m_active_trans, 0);
1493 INIT_LIST_HEAD(&mp->m_sync_list);
1494 spin_lock_init(&mp->m_sync_lock);
1495 init_waitqueue_head(&mp->m_wait_single_sync_task);
1500 error = xfs_parseargs(mp, (char *)data);
1502 goto out_free_fsname;
1504 sb_min_blocksize(sb, BBSIZE);
1505 sb->s_xattr = xfs_xattr_handlers;
1506 sb->s_export_op = &xfs_export_operations;
1507 #ifdef CONFIG_XFS_QUOTA
1508 sb->s_qcop = &xfs_quotactl_operations;
1510 sb->s_op = &xfs_super_operations;
1513 flags |= XFS_MFSI_QUIET;
1515 error = xfs_open_devices(mp);
1517 goto out_free_fsname;
1519 error = xfs_icsb_init_counters(mp);
1521 goto out_close_devices;
1523 error = xfs_readsb(mp, flags);
1525 goto out_destroy_counters;
1527 error = xfs_finish_flags(mp);
1531 error = xfs_setup_devices(mp);
1535 if (mp->m_flags & XFS_MOUNT_BARRIER)
1536 xfs_mountfs_check_barriers(mp);
1538 error = xfs_filestream_mount(mp);
1543 * we must configure the block size in the superblock before we run the
1544 * full mount process as the mount process can lookup and cache inodes.
1545 * For the same reason we must also initialise the syncd and register
1546 * the inode cache shrinker so that inodes can be reclaimed during
1547 * operations like a quotacheck that iterate all inodes in the
1550 sb->s_magic = XFS_SB_MAGIC;
1551 sb->s_blocksize = mp->m_sb.sb_blocksize;
1552 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1553 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1554 sb->s_time_gran = 1;
1555 set_posix_acl_flag(sb);
1557 error = xfs_syncd_init(mp);
1559 goto out_filestream_unmount;
1561 xfs_inode_shrinker_register(mp);
1563 error = xfs_mountfs(mp);
1565 goto out_syncd_stop;
1567 root = igrab(VFS_I(mp->m_rootip));
1572 if (is_bad_inode(root)) {
1576 sb->s_root = d_alloc_root(root);
1585 xfs_inode_shrinker_unregister(mp);
1587 out_filestream_unmount:
1588 xfs_filestream_unmount(mp);
1591 out_destroy_counters:
1592 xfs_icsb_destroy_counters(mp);
1594 xfs_close_devices(mp);
1596 xfs_free_fsname(mp);
1610 xfs_inode_shrinker_unregister(mp);
1614 * Blow away any referenced inode in the filestreams cache.
1615 * This can and will cause log traffic as inodes go inactive
1618 xfs_filestream_unmount(mp);
1620 XFS_bflush(mp->m_ddev_targp);
1626 STATIC struct dentry *
1628 struct file_system_type *fs_type,
1630 const char *dev_name,
1633 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1636 static const struct super_operations xfs_super_operations = {
1637 .alloc_inode = xfs_fs_alloc_inode,
1638 .destroy_inode = xfs_fs_destroy_inode,
1639 .dirty_inode = xfs_fs_dirty_inode,
1640 .write_inode = xfs_fs_write_inode,
1641 .evict_inode = xfs_fs_evict_inode,
1642 .put_super = xfs_fs_put_super,
1643 .sync_fs = xfs_fs_sync_fs,
1644 .freeze_fs = xfs_fs_freeze,
1645 .unfreeze_fs = xfs_fs_unfreeze,
1646 .statfs = xfs_fs_statfs,
1647 .remount_fs = xfs_fs_remount,
1648 .show_options = xfs_fs_show_options,
1651 static struct file_system_type xfs_fs_type = {
1652 .owner = THIS_MODULE,
1654 .mount = xfs_fs_mount,
1655 .kill_sb = kill_block_super,
1656 .fs_flags = FS_REQUIRES_DEV,
1660 xfs_init_zones(void)
1663 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1664 if (!xfs_ioend_zone)
1667 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1669 if (!xfs_ioend_pool)
1670 goto out_destroy_ioend_zone;
1672 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1674 if (!xfs_log_ticket_zone)
1675 goto out_destroy_ioend_pool;
1677 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1678 "xfs_bmap_free_item");
1679 if (!xfs_bmap_free_item_zone)
1680 goto out_destroy_log_ticket_zone;
1682 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1684 if (!xfs_btree_cur_zone)
1685 goto out_destroy_bmap_free_item_zone;
1687 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1689 if (!xfs_da_state_zone)
1690 goto out_destroy_btree_cur_zone;
1692 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1693 if (!xfs_dabuf_zone)
1694 goto out_destroy_da_state_zone;
1696 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1697 if (!xfs_ifork_zone)
1698 goto out_destroy_dabuf_zone;
1700 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1701 if (!xfs_trans_zone)
1702 goto out_destroy_ifork_zone;
1704 xfs_log_item_desc_zone =
1705 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1706 "xfs_log_item_desc");
1707 if (!xfs_log_item_desc_zone)
1708 goto out_destroy_trans_zone;
1711 * The size of the zone allocated buf log item is the maximum
1712 * size possible under XFS. This wastes a little bit of memory,
1713 * but it is much faster.
1715 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1716 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1717 NBWORD) * sizeof(int))), "xfs_buf_item");
1718 if (!xfs_buf_item_zone)
1719 goto out_destroy_log_item_desc_zone;
1721 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1722 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1723 sizeof(xfs_extent_t))), "xfs_efd_item");
1725 goto out_destroy_buf_item_zone;
1727 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1728 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1729 sizeof(xfs_extent_t))), "xfs_efi_item");
1731 goto out_destroy_efd_zone;
1734 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1735 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1736 xfs_fs_inode_init_once);
1737 if (!xfs_inode_zone)
1738 goto out_destroy_efi_zone;
1741 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1742 KM_ZONE_SPREAD, NULL);
1744 goto out_destroy_inode_zone;
1748 out_destroy_inode_zone:
1749 kmem_zone_destroy(xfs_inode_zone);
1750 out_destroy_efi_zone:
1751 kmem_zone_destroy(xfs_efi_zone);
1752 out_destroy_efd_zone:
1753 kmem_zone_destroy(xfs_efd_zone);
1754 out_destroy_buf_item_zone:
1755 kmem_zone_destroy(xfs_buf_item_zone);
1756 out_destroy_log_item_desc_zone:
1757 kmem_zone_destroy(xfs_log_item_desc_zone);
1758 out_destroy_trans_zone:
1759 kmem_zone_destroy(xfs_trans_zone);
1760 out_destroy_ifork_zone:
1761 kmem_zone_destroy(xfs_ifork_zone);
1762 out_destroy_dabuf_zone:
1763 kmem_zone_destroy(xfs_dabuf_zone);
1764 out_destroy_da_state_zone:
1765 kmem_zone_destroy(xfs_da_state_zone);
1766 out_destroy_btree_cur_zone:
1767 kmem_zone_destroy(xfs_btree_cur_zone);
1768 out_destroy_bmap_free_item_zone:
1769 kmem_zone_destroy(xfs_bmap_free_item_zone);
1770 out_destroy_log_ticket_zone:
1771 kmem_zone_destroy(xfs_log_ticket_zone);
1772 out_destroy_ioend_pool:
1773 mempool_destroy(xfs_ioend_pool);
1774 out_destroy_ioend_zone:
1775 kmem_zone_destroy(xfs_ioend_zone);
1781 xfs_destroy_zones(void)
1783 kmem_zone_destroy(xfs_ili_zone);
1784 kmem_zone_destroy(xfs_inode_zone);
1785 kmem_zone_destroy(xfs_efi_zone);
1786 kmem_zone_destroy(xfs_efd_zone);
1787 kmem_zone_destroy(xfs_buf_item_zone);
1788 kmem_zone_destroy(xfs_log_item_desc_zone);
1789 kmem_zone_destroy(xfs_trans_zone);
1790 kmem_zone_destroy(xfs_ifork_zone);
1791 kmem_zone_destroy(xfs_dabuf_zone);
1792 kmem_zone_destroy(xfs_da_state_zone);
1793 kmem_zone_destroy(xfs_btree_cur_zone);
1794 kmem_zone_destroy(xfs_bmap_free_item_zone);
1795 kmem_zone_destroy(xfs_log_ticket_zone);
1796 mempool_destroy(xfs_ioend_pool);
1797 kmem_zone_destroy(xfs_ioend_zone);
1806 printk(KERN_INFO XFS_VERSION_STRING " with "
1807 XFS_BUILD_OPTIONS " enabled\n");
1812 error = xfs_init_zones();
1816 error = xfs_mru_cache_init();
1818 goto out_destroy_zones;
1820 error = xfs_filestream_init();
1822 goto out_mru_cache_uninit;
1824 error = xfs_buf_init();
1826 goto out_filestream_uninit;
1828 error = xfs_init_procfs();
1830 goto out_buf_terminate;
1832 error = xfs_sysctl_register();
1834 goto out_cleanup_procfs;
1838 error = register_filesystem(&xfs_fs_type);
1840 goto out_sysctl_unregister;
1843 out_sysctl_unregister:
1844 xfs_sysctl_unregister();
1846 xfs_cleanup_procfs();
1848 xfs_buf_terminate();
1849 out_filestream_uninit:
1850 xfs_filestream_uninit();
1851 out_mru_cache_uninit:
1852 xfs_mru_cache_uninit();
1854 xfs_destroy_zones();
1863 unregister_filesystem(&xfs_fs_type);
1864 xfs_sysctl_unregister();
1865 xfs_cleanup_procfs();
1866 xfs_buf_terminate();
1867 xfs_filestream_uninit();
1868 xfs_mru_cache_uninit();
1869 xfs_destroy_zones();
1872 module_init(init_xfs_fs);
1873 module_exit(exit_xfs_fs);
1875 MODULE_AUTHOR("Silicon Graphics, Inc.");
1876 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1877 MODULE_LICENSE("GPL");