2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_btree_trace.h"
40 #include "xfs_ialloc.h"
42 #include "xfs_rtalloc.h"
43 #include "xfs_error.h"
44 #include "xfs_itable.h"
45 #include "xfs_fsops.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_utils.h"
50 #include "xfs_vnodeops.h"
51 #include "xfs_version.h"
52 #include "xfs_log_priv.h"
53 #include "xfs_trans_priv.h"
54 #include "xfs_filestream.h"
55 #include "xfs_da_btree.h"
56 #include "xfs_extfree_item.h"
57 #include "xfs_mru_cache.h"
58 #include "xfs_inode_item.h"
60 #include "xfs_trace.h"
62 #include <linux/namei.h>
63 #include <linux/init.h>
64 #include <linux/slab.h>
65 #include <linux/mount.h>
66 #include <linux/mempool.h>
67 #include <linux/writeback.h>
68 #include <linux/kthread.h>
69 #include <linux/freezer.h>
70 #include <linux/parser.h>
72 static const struct super_operations xfs_super_operations;
73 static kmem_zone_t *xfs_ioend_zone;
74 mempool_t *xfs_ioend_pool;
76 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
77 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
78 #define MNTOPT_LOGDEV "logdev" /* log device */
79 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
80 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
81 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
82 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
83 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
84 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
85 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
86 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
87 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */
88 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
89 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
90 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
91 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
92 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
93 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
94 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
95 * unwritten extent conversion */
96 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
97 #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
98 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
99 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
100 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
101 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
102 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
104 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
105 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
106 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
107 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */
108 #define MNTOPT_NOQUOTA "noquota" /* no quotas */
109 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
110 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
111 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
112 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
113 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
114 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
115 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
116 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
117 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
118 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
119 #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
120 #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
121 #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
122 #define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */
123 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
126 * Table driven mount option parser.
128 * Currently only used for remount, but it will be used for mount
129 * in the future, too.
132 Opt_barrier, Opt_nobarrier, Opt_err
135 static const match_table_t tokens = {
136 {Opt_barrier, "barrier"},
137 {Opt_nobarrier, "nobarrier"},
143 suffix_strtoul(char *s, char **endp, unsigned int base)
145 int last, shift_left_factor = 0;
148 last = strlen(value) - 1;
149 if (value[last] == 'K' || value[last] == 'k') {
150 shift_left_factor = 10;
153 if (value[last] == 'M' || value[last] == 'm') {
154 shift_left_factor = 20;
157 if (value[last] == 'G' || value[last] == 'g') {
158 shift_left_factor = 30;
162 return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
166 * This function fills in xfs_mount_t fields based on mount args.
167 * Note: the superblock has _not_ yet been read in.
169 * Note that this function leaks the various device name allocations on
170 * failure. The caller takes care of them.
174 struct xfs_mount *mp,
178 struct super_block *sb = mp->m_super;
179 char *this_char, *value, *eov;
183 int dmapi_implies_ikeep = 1;
184 __uint8_t iosizelog = 0;
187 * Copy binary VFS mount flags we are interested in.
189 if (sb->s_flags & MS_RDONLY)
190 mp->m_flags |= XFS_MOUNT_RDONLY;
191 if (sb->s_flags & MS_DIRSYNC)
192 mp->m_flags |= XFS_MOUNT_DIRSYNC;
193 if (sb->s_flags & MS_SYNCHRONOUS)
194 mp->m_flags |= XFS_MOUNT_WSYNC;
197 * Set some default flags that could be cleared by the mount option
200 mp->m_flags |= XFS_MOUNT_BARRIER;
201 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
202 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
205 * These can be overridden by the mount option parsing.
213 while ((this_char = strsep(&options, ",")) != NULL) {
216 if ((value = strchr(this_char, '=')) != NULL)
219 if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
220 if (!value || !*value) {
222 "XFS: %s option requires an argument",
226 mp->m_logbufs = simple_strtoul(value, &eov, 10);
227 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
228 if (!value || !*value) {
230 "XFS: %s option requires an argument",
234 mp->m_logbsize = suffix_strtoul(value, &eov, 10);
235 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
236 if (!value || !*value) {
238 "XFS: %s option requires an argument",
242 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
245 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
246 if (!value || !*value) {
248 "XFS: %s option requires an argument",
252 *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
255 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
256 if (!value || !*value) {
258 "XFS: %s option requires an argument",
262 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
265 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
266 if (!value || !*value) {
268 "XFS: %s option requires an argument",
272 iosize = simple_strtoul(value, &eov, 10);
273 iosizelog = ffs(iosize) - 1;
274 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
275 if (!value || !*value) {
277 "XFS: %s option requires an argument",
281 iosize = suffix_strtoul(value, &eov, 10);
282 iosizelog = ffs(iosize) - 1;
283 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
284 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
285 mp->m_flags |= XFS_MOUNT_GRPID;
286 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
287 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
288 mp->m_flags &= ~XFS_MOUNT_GRPID;
289 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
290 mp->m_flags |= XFS_MOUNT_WSYNC;
291 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
292 mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
293 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
294 mp->m_flags |= XFS_MOUNT_NORECOVERY;
295 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
296 mp->m_flags |= XFS_MOUNT_NOALIGN;
297 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
298 mp->m_flags |= XFS_MOUNT_SWALLOC;
299 } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
300 if (!value || !*value) {
302 "XFS: %s option requires an argument",
306 dsunit = simple_strtoul(value, &eov, 10);
307 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
308 if (!value || !*value) {
310 "XFS: %s option requires an argument",
314 dswidth = simple_strtoul(value, &eov, 10);
315 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
316 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
319 "XFS: %s option not allowed on this system",
323 } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
324 mp->m_flags |= XFS_MOUNT_NOUUID;
325 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
326 mp->m_flags |= XFS_MOUNT_BARRIER;
327 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
328 mp->m_flags &= ~XFS_MOUNT_BARRIER;
329 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
330 mp->m_flags |= XFS_MOUNT_IKEEP;
331 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
332 dmapi_implies_ikeep = 0;
333 mp->m_flags &= ~XFS_MOUNT_IKEEP;
334 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
335 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
336 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
337 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
338 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
339 mp->m_flags |= XFS_MOUNT_ATTR2;
340 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
341 mp->m_flags &= ~XFS_MOUNT_ATTR2;
342 mp->m_flags |= XFS_MOUNT_NOATTR2;
343 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
344 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
345 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
346 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
347 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
348 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
349 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
350 } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
351 !strcmp(this_char, MNTOPT_UQUOTA) ||
352 !strcmp(this_char, MNTOPT_USRQUOTA)) {
353 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
355 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
356 !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
357 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
358 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
359 } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
360 !strcmp(this_char, MNTOPT_PRJQUOTA)) {
361 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
363 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
364 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
365 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
366 } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
367 !strcmp(this_char, MNTOPT_GRPQUOTA)) {
368 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
370 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
371 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
372 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
373 } else if (!strcmp(this_char, MNTOPT_DMAPI)) {
374 mp->m_flags |= XFS_MOUNT_DMAPI;
375 } else if (!strcmp(this_char, MNTOPT_XDSM)) {
376 mp->m_flags |= XFS_MOUNT_DMAPI;
377 } else if (!strcmp(this_char, MNTOPT_DMI)) {
378 mp->m_flags |= XFS_MOUNT_DMAPI;
379 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
380 mp->m_flags |= XFS_MOUNT_DELAYLOG;
382 "Enabling EXPERIMENTAL delayed logging feature "
383 "- use at your own risk.\n");
384 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
385 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
386 } else if (!strcmp(this_char, "ihashsize")) {
388 "XFS: ihashsize no longer used, option is deprecated.");
389 } else if (!strcmp(this_char, "osyncisdsync")) {
390 /* no-op, this is now the default */
392 "XFS: osyncisdsync is now the default, option is deprecated.");
393 } else if (!strcmp(this_char, "irixsgid")) {
395 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
398 "XFS: unknown mount option [%s].", this_char);
404 * no recovery flag requires a read-only mount
406 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
407 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
408 cmn_err(CE_WARN, "XFS: no-recovery mounts must be read-only.");
412 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
414 "XFS: sunit and swidth options incompatible with the noalign option");
418 #ifndef CONFIG_XFS_QUOTA
419 if (XFS_IS_QUOTA_RUNNING(mp)) {
421 "XFS: quota support not available in this kernel.");
426 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
427 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
429 "XFS: cannot mount with both project and group quota");
433 if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) {
434 printk("XFS: %s option needs the mount point option as well\n",
439 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
441 "XFS: sunit and swidth must be specified together");
445 if (dsunit && (dswidth % dsunit != 0)) {
447 "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)",
453 * Applications using DMI filesystems often expect the
454 * inode generation number to be monotonically increasing.
455 * If we delete inode chunks we break this assumption, so
456 * keep unused inode chunks on disk for DMI filesystems
457 * until we come up with a better solution.
458 * Note that if "ikeep" or "noikeep" mount options are
459 * supplied, then they are honored.
461 if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep)
462 mp->m_flags |= XFS_MOUNT_IKEEP;
465 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
467 * At this point the superblock has not been read
468 * in, therefore we do not know the block size.
469 * Before the mount call ends we will convert
473 mp->m_dalign = dsunit;
474 mp->m_flags |= XFS_MOUNT_RETERR;
478 mp->m_swidth = dswidth;
481 if (mp->m_logbufs != -1 &&
482 mp->m_logbufs != 0 &&
483 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
484 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
486 "XFS: invalid logbufs value: %d [not %d-%d]",
487 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
488 return XFS_ERROR(EINVAL);
490 if (mp->m_logbsize != -1 &&
491 mp->m_logbsize != 0 &&
492 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
493 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
494 !is_power_of_2(mp->m_logbsize))) {
496 "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
498 return XFS_ERROR(EINVAL);
501 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
504 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
507 if (iosizelog > XFS_MAX_IO_LOG ||
508 iosizelog < XFS_MIN_IO_LOG) {
510 "XFS: invalid log iosize: %d [not %d-%d]",
511 iosizelog, XFS_MIN_IO_LOG,
513 return XFS_ERROR(EINVAL);
516 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
517 mp->m_readio_log = iosizelog;
518 mp->m_writeio_log = iosizelog;
524 struct proc_xfs_info {
531 struct xfs_mount *mp,
534 static struct proc_xfs_info xfs_info_set[] = {
535 /* the few simple ones we can get from the mount struct */
536 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
537 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
538 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
539 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
540 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
541 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
542 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
543 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
544 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
545 { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI },
546 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
547 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
550 static struct proc_xfs_info xfs_info_unset[] = {
551 /* the few simple ones we can get from the mount struct */
552 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
553 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
554 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
557 struct proc_xfs_info *xfs_infop;
559 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
560 if (mp->m_flags & xfs_infop->flag)
561 seq_puts(m, xfs_infop->str);
563 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
564 if (!(mp->m_flags & xfs_infop->flag))
565 seq_puts(m, xfs_infop->str);
568 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
569 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
570 (int)(1 << mp->m_writeio_log) >> 10);
572 if (mp->m_logbufs > 0)
573 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
574 if (mp->m_logbsize > 0)
575 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
578 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
580 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
582 if (mp->m_dalign > 0)
583 seq_printf(m, "," MNTOPT_SUNIT "=%d",
584 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
585 if (mp->m_swidth > 0)
586 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
587 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
589 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
590 seq_puts(m, "," MNTOPT_USRQUOTA);
591 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
592 seq_puts(m, "," MNTOPT_UQUOTANOENF);
594 /* Either project or group quotas can be active, not both */
596 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
597 if (mp->m_qflags & XFS_OQUOTA_ENFD)
598 seq_puts(m, "," MNTOPT_PRJQUOTA);
600 seq_puts(m, "," MNTOPT_PQUOTANOENF);
601 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
602 if (mp->m_qflags & XFS_OQUOTA_ENFD)
603 seq_puts(m, "," MNTOPT_GRPQUOTA);
605 seq_puts(m, "," MNTOPT_GQUOTANOENF);
608 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
609 seq_puts(m, "," MNTOPT_NOQUOTA);
615 unsigned int blockshift)
617 unsigned int pagefactor = 1;
618 unsigned int bitshift = BITS_PER_LONG - 1;
620 /* Figure out maximum filesize, on Linux this can depend on
621 * the filesystem blocksize (on 32 bit platforms).
622 * __block_prepare_write does this in an [unsigned] long...
623 * page->index << (PAGE_CACHE_SHIFT - bbits)
624 * So, for page sized blocks (4K on 32 bit platforms),
625 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
626 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
627 * but for smaller blocksizes it is less (bbits = log2 bsize).
628 * Note1: get_block_t takes a long (implicit cast from above)
629 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
630 * can optionally convert the [unsigned] long from above into
631 * an [unsigned] long long.
634 #if BITS_PER_LONG == 32
635 # if defined(CONFIG_LBDAF)
636 ASSERT(sizeof(sector_t) == 8);
637 pagefactor = PAGE_CACHE_SIZE;
638 bitshift = BITS_PER_LONG;
640 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
644 return (((__uint64_t)pagefactor) << bitshift) - 1;
651 struct block_device **bdevp)
655 *bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp);
656 if (IS_ERR(*bdevp)) {
657 error = PTR_ERR(*bdevp);
658 printk("XFS: Invalid device [%s], error=%d\n", name, error);
666 struct block_device *bdev)
669 close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
673 * Try to write out the superblock using barriers.
679 xfs_buf_t *sbp = xfs_getsb(mp, 0);
684 XFS_BUF_UNDELAYWRITE(sbp);
686 XFS_BUF_UNASYNC(sbp);
687 XFS_BUF_ORDERED(sbp);
690 error = xfs_iowait(sbp);
693 * Clear all the flags we set and possible error state in the
694 * buffer. We only did the write to try out whether barriers
695 * worked and shouldn't leave any traces in the superblock
699 XFS_BUF_ERROR(sbp, 0);
700 XFS_BUF_UNORDERED(sbp);
707 xfs_mountfs_check_barriers(xfs_mount_t *mp)
711 if (mp->m_logdev_targp != mp->m_ddev_targp) {
712 xfs_fs_cmn_err(CE_NOTE, mp,
713 "Disabling barriers, not supported with external log device");
714 mp->m_flags &= ~XFS_MOUNT_BARRIER;
718 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
719 xfs_fs_cmn_err(CE_NOTE, mp,
720 "Disabling barriers, underlying device is readonly");
721 mp->m_flags &= ~XFS_MOUNT_BARRIER;
725 error = xfs_barrier_test(mp);
727 xfs_fs_cmn_err(CE_NOTE, mp,
728 "Disabling barriers, trial barrier write failed");
729 mp->m_flags &= ~XFS_MOUNT_BARRIER;
735 xfs_blkdev_issue_flush(
736 xfs_buftarg_t *buftarg)
738 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL,
744 struct xfs_mount *mp)
746 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
747 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
748 xfs_free_buftarg(mp, mp->m_logdev_targp);
749 xfs_blkdev_put(logdev);
751 if (mp->m_rtdev_targp) {
752 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
753 xfs_free_buftarg(mp, mp->m_rtdev_targp);
754 xfs_blkdev_put(rtdev);
756 xfs_free_buftarg(mp, mp->m_ddev_targp);
760 * The file system configurations are:
761 * (1) device (partition) with data and internal log
762 * (2) logical volume with data and log subvolumes.
763 * (3) logical volume with data, log, and realtime subvolumes.
765 * We only have to handle opening the log and realtime volumes here if
766 * they are present. The data subvolume has already been opened by
767 * get_sb_bdev() and is stored in sb->s_bdev.
771 struct xfs_mount *mp)
773 struct block_device *ddev = mp->m_super->s_bdev;
774 struct block_device *logdev = NULL, *rtdev = NULL;
778 * Open real time and log devices - order is important.
781 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
787 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
789 goto out_close_logdev;
791 if (rtdev == ddev || rtdev == logdev) {
793 "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
795 goto out_close_rtdev;
800 * Setup xfs_mount buffer target pointers
803 mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname);
804 if (!mp->m_ddev_targp)
805 goto out_close_rtdev;
808 mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname);
809 if (!mp->m_rtdev_targp)
810 goto out_free_ddev_targ;
813 if (logdev && logdev != ddev) {
814 mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname);
815 if (!mp->m_logdev_targp)
816 goto out_free_rtdev_targ;
818 mp->m_logdev_targp = mp->m_ddev_targp;
824 if (mp->m_rtdev_targp)
825 xfs_free_buftarg(mp, mp->m_rtdev_targp);
827 xfs_free_buftarg(mp, mp->m_ddev_targp);
830 xfs_blkdev_put(rtdev);
832 if (logdev && logdev != ddev)
833 xfs_blkdev_put(logdev);
839 * Setup xfs_mount buffer target pointers based on superblock
843 struct xfs_mount *mp)
847 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
848 mp->m_sb.sb_sectsize);
852 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
853 unsigned int log_sector_size = BBSIZE;
855 if (xfs_sb_version_hassector(&mp->m_sb))
856 log_sector_size = mp->m_sb.sb_logsectsize;
857 error = xfs_setsize_buftarg(mp->m_logdev_targp,
858 mp->m_sb.sb_blocksize,
863 if (mp->m_rtdev_targp) {
864 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
865 mp->m_sb.sb_blocksize,
866 mp->m_sb.sb_sectsize);
875 * XFS AIL push thread support
879 struct xfs_ail *ailp,
880 xfs_lsn_t threshold_lsn)
882 ailp->xa_target = threshold_lsn;
883 wake_up_process(ailp->xa_task);
890 struct xfs_ail *ailp = data;
891 xfs_lsn_t last_pushed_lsn = 0;
892 long tout = 0; /* milliseconds */
894 while (!kthread_should_stop()) {
895 schedule_timeout_interruptible(tout ?
896 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
901 ASSERT(ailp->xa_mount->m_log);
902 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
905 tout = xfsaild_push(ailp, &last_pushed_lsn);
913 struct xfs_ail *ailp)
916 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
917 ailp->xa_mount->m_fsname);
918 if (IS_ERR(ailp->xa_task))
919 return -PTR_ERR(ailp->xa_task);
925 struct xfs_ail *ailp)
927 kthread_stop(ailp->xa_task);
931 /* Catch misguided souls that try to use this interface on XFS */
932 STATIC struct inode *
934 struct super_block *sb)
941 * Now that the generic code is guaranteed not to be accessing
942 * the linux inode, we can reclaim the inode.
945 xfs_fs_destroy_inode(
948 struct xfs_inode *ip = XFS_I(inode);
950 xfs_itrace_entry(ip);
952 XFS_STATS_INC(vn_reclaim);
954 /* bad inode, get out here ASAP */
955 if (is_bad_inode(inode))
960 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
963 * We should never get here with one of the reclaim flags already set.
965 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
966 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
969 * We always use background reclaim here because even if the
970 * inode is clean, it still may be under IO and hence we have
971 * to take the flush lock. The background reclaim path handles
972 * this more efficiently than we can here, so simply let background
973 * reclaim tear down all inodes.
976 xfs_inode_set_reclaim_tag(ip);
980 * Slab object creation initialisation for the XFS inode.
981 * This covers only the idempotent fields in the XFS inode;
982 * all other fields need to be initialised on allocation
983 * from the slab. This avoids the need to repeatedly intialise
984 * fields in the xfs inode that left in the initialise state
985 * when freeing the inode.
988 xfs_fs_inode_init_once(
991 struct xfs_inode *ip = inode;
993 memset(ip, 0, sizeof(struct xfs_inode));
996 inode_init_once(VFS_I(ip));
999 atomic_set(&ip->i_iocount, 0);
1000 atomic_set(&ip->i_pincount, 0);
1001 spin_lock_init(&ip->i_flags_lock);
1002 init_waitqueue_head(&ip->i_ipin_wait);
1004 * Because we want to use a counting completion, complete
1005 * the flush completion once to allow a single access to
1006 * the flush completion without blocking.
1008 init_completion(&ip->i_flush);
1009 complete(&ip->i_flush);
1011 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1012 "xfsino", ip->i_ino);
1016 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
1017 * we catch unlogged VFS level updates to the inode. Care must be taken
1018 * here - the transaction code calls mark_inode_dirty_sync() to mark the
1019 * VFS inode dirty in a transaction and clears the i_update_core field;
1020 * it must clear the field after calling mark_inode_dirty_sync() to
1021 * correctly indicate that the dirty state has been propagated into the
1024 * We need the barrier() to maintain correct ordering between unlogged
1025 * updates and the transaction commit code that clears the i_update_core
1026 * field. This requires all updates to be completed before marking the
1031 struct inode *inode)
1034 XFS_I(inode)->i_update_core = 1;
1039 struct xfs_inode *ip)
1041 struct xfs_mount *mp = ip->i_mount;
1042 struct xfs_trans *tp;
1045 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1046 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
1047 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
1050 xfs_trans_cancel(tp, 0);
1051 /* we need to return with the lock hold shared */
1052 xfs_ilock(ip, XFS_ILOCK_SHARED);
1056 xfs_ilock(ip, XFS_ILOCK_EXCL);
1059 * Note - it's possible that we might have pushed ourselves out of the
1060 * way during trans_reserve which would flush the inode. But there's
1061 * no guarantee that the inode buffer has actually gone out yet (it's
1062 * delwri). Plus the buffer could be pinned anyway if it's part of
1063 * an inode in another recent transaction. So we play it safe and
1064 * fire off the transaction anyway.
1066 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1067 xfs_trans_ihold(tp, ip);
1068 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1069 xfs_trans_set_sync(tp);
1070 error = xfs_trans_commit(tp, 0);
1071 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
1078 struct inode *inode,
1079 struct writeback_control *wbc)
1081 struct xfs_inode *ip = XFS_I(inode);
1082 struct xfs_mount *mp = ip->i_mount;
1085 xfs_itrace_entry(ip);
1087 if (XFS_FORCED_SHUTDOWN(mp))
1088 return XFS_ERROR(EIO);
1090 if (wbc->sync_mode == WB_SYNC_ALL) {
1092 * Make sure the inode has hit stable storage. By using the
1093 * log and the fsync transactions we reduce the IOs we have
1094 * to do here from two (log and inode) to just the log.
1096 * Note: We still need to do a delwri write of the inode after
1097 * this to flush it to the backing buffer so that bulkstat
1098 * works properly if this is the first time the inode has been
1099 * written. Because we hold the ilock atomically over the
1100 * transaction commit and the inode flush we are guaranteed
1101 * that the inode is not pinned when it returns. If the flush
1102 * lock is already held, then the inode has already been
1103 * flushed once and we don't need to flush it again. Hence
1104 * the code will only flush the inode if it isn't already
1108 xfs_ilock(ip, XFS_ILOCK_SHARED);
1109 if (ip->i_update_core) {
1110 error = xfs_log_inode(ip);
1116 * We make this non-blocking if the inode is contended, return
1117 * EAGAIN to indicate to the caller that they did not succeed.
1118 * This prevents the flush path from blocking on inodes inside
1119 * another operation right now, they get caught later by xfs_sync.
1121 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1125 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1129 * Now we have the flush lock and the inode is not pinned, we can check
1130 * if the inode is really clean as we know that there are no pending
1131 * transaction completions, it is not waiting on the delayed write
1132 * queue and there is no IO in progress.
1134 if (xfs_inode_clean(ip)) {
1139 error = xfs_iflush(ip, 0);
1142 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1145 * if we failed to write out the inode then mark
1146 * it dirty again so we'll try again later.
1149 xfs_mark_inode_dirty_sync(ip);
1155 struct inode *inode)
1157 xfs_inode_t *ip = XFS_I(inode);
1159 xfs_itrace_entry(ip);
1160 XFS_STATS_INC(vn_rele);
1161 XFS_STATS_INC(vn_remove);
1162 XFS_STATS_DEC(vn_active);
1165 * The iolock is used by the file system to coordinate reads,
1166 * writes, and block truncates. Up to this point the lock
1167 * protected concurrent accesses by users of the inode. But
1168 * from here forward we're doing some final processing of the
1169 * inode because we're done with it, and although we reuse the
1170 * iolock for protection it is really a distinct lock class
1171 * (in the lockdep sense) from before. To keep lockdep happy
1172 * (and basically indicate what we are doing), we explicitly
1173 * re-init the iolock here.
1175 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1176 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1183 struct xfs_mount *mp)
1185 kfree(mp->m_fsname);
1186 kfree(mp->m_rtname);
1187 kfree(mp->m_logname);
1192 struct super_block *sb)
1194 struct xfs_mount *mp = XFS_M(sb);
1198 if (!(sb->s_flags & MS_RDONLY)) {
1200 * XXX(hch): this should be SYNC_WAIT.
1202 * Or more likely not needed at all because the VFS is already
1203 * calling ->sync_fs after shutting down all filestem
1204 * operations and just before calling ->put_super.
1206 xfs_sync_data(mp, 0);
1207 xfs_sync_attr(mp, 0);
1210 XFS_SEND_PREUNMOUNT(mp);
1213 * Blow away any referenced inode in the filestreams cache.
1214 * This can and will cause log traffic as inodes go inactive
1217 xfs_filestream_unmount(mp);
1219 XFS_bflush(mp->m_ddev_targp);
1221 XFS_SEND_UNMOUNT(mp);
1225 xfs_inode_shrinker_unregister(mp);
1226 xfs_icsb_destroy_counters(mp);
1227 xfs_close_devices(mp);
1229 xfs_free_fsname(mp);
1235 struct super_block *sb,
1238 struct xfs_mount *mp = XFS_M(sb);
1242 * Not much we can do for the first async pass. Writing out the
1243 * superblock would be counter-productive as we are going to redirty
1244 * when writing out other data and metadata (and writing out a single
1245 * block is quite fast anyway).
1247 * Try to asynchronously kick off quota syncing at least.
1250 xfs_qm_sync(mp, SYNC_TRYLOCK);
1254 error = xfs_quiesce_data(mp);
1259 int prev_sync_seq = mp->m_sync_seq;
1262 * The disk must be active because we're syncing.
1263 * We schedule xfssyncd now (now that the disk is
1264 * active) instead of later (when it might not be).
1266 wake_up_process(mp->m_sync_task);
1268 * We have to wait for the sync iteration to complete.
1269 * If we don't, the disk activity caused by the sync
1270 * will come after the sync is completed, and that
1271 * triggers another sync from laptop mode.
1273 wait_event(mp->m_wait_single_sync_task,
1274 mp->m_sync_seq != prev_sync_seq);
1282 struct dentry *dentry,
1283 struct kstatfs *statp)
1285 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1286 xfs_sb_t *sbp = &mp->m_sb;
1287 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1288 __uint64_t fakeinos, id;
1291 statp->f_type = XFS_SB_MAGIC;
1292 statp->f_namelen = MAXNAMELEN - 1;
1294 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1295 statp->f_fsid.val[0] = (u32)id;
1296 statp->f_fsid.val[1] = (u32)(id >> 32);
1298 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
1300 spin_lock(&mp->m_sb_lock);
1301 statp->f_bsize = sbp->sb_blocksize;
1302 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1303 statp->f_blocks = sbp->sb_dblocks - lsize;
1304 statp->f_bfree = statp->f_bavail =
1305 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1306 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1308 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1309 if (mp->m_maxicount)
1310 statp->f_files = min_t(typeof(statp->f_files),
1313 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1314 spin_unlock(&mp->m_sb_lock);
1316 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1317 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1318 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1319 xfs_qm_statvfs(ip, statp);
1324 xfs_save_resvblks(struct xfs_mount *mp)
1326 __uint64_t resblks = 0;
1328 mp->m_resblks_save = mp->m_resblks;
1329 xfs_reserve_blocks(mp, &resblks, NULL);
1333 xfs_restore_resvblks(struct xfs_mount *mp)
1337 if (mp->m_resblks_save) {
1338 resblks = mp->m_resblks_save;
1339 mp->m_resblks_save = 0;
1341 resblks = xfs_default_resblks(mp);
1343 xfs_reserve_blocks(mp, &resblks, NULL);
1348 struct super_block *sb,
1352 struct xfs_mount *mp = XFS_M(sb);
1353 substring_t args[MAX_OPT_ARGS];
1357 while ((p = strsep(&options, ",")) != NULL) {
1363 token = match_token(p, tokens, args);
1366 mp->m_flags |= XFS_MOUNT_BARRIER;
1369 * Test if barriers are actually working if we can,
1370 * else delay this check until the filesystem is
1373 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
1374 xfs_mountfs_check_barriers(mp);
1377 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1381 * Logically we would return an error here to prevent
1382 * users from believing they might have changed
1383 * mount options using remount which can't be changed.
1385 * But unfortunately mount(8) adds all options from
1386 * mtab and fstab to the mount arguments in some cases
1387 * so we can't blindly reject options, but have to
1388 * check for each specified option if it actually
1389 * differs from the currently set option and only
1390 * reject it if that's the case.
1392 * Until that is implemented we return success for
1393 * every remount request, and silently ignore all
1394 * options that we can't actually change.
1398 "XFS: mount option \"%s\" not supported for remount\n", p);
1407 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1408 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1409 if (mp->m_flags & XFS_MOUNT_BARRIER)
1410 xfs_mountfs_check_barriers(mp);
1413 * If this is the first remount to writeable state we
1414 * might have some superblock changes to update.
1416 if (mp->m_update_flags) {
1417 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1420 "XFS: failed to write sb changes");
1423 mp->m_update_flags = 0;
1427 * Fill out the reserve pool if it is empty. Use the stashed
1428 * value if it is non-zero, otherwise go with the default.
1430 xfs_restore_resvblks(mp);
1434 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1436 * After we have synced the data but before we sync the
1437 * metadata, we need to free up the reserve block pool so that
1438 * the used block count in the superblock on disk is correct at
1439 * the end of the remount. Stash the current reserve pool size
1440 * so that if we get remounted rw, we can return it to the same
1444 xfs_quiesce_data(mp);
1445 xfs_save_resvblks(mp);
1446 xfs_quiesce_attr(mp);
1447 mp->m_flags |= XFS_MOUNT_RDONLY;
1454 * Second stage of a freeze. The data is already frozen so we only
1455 * need to take care of the metadata. Once that's done write a dummy
1456 * record to dirty the log in case of a crash while frozen.
1460 struct super_block *sb)
1462 struct xfs_mount *mp = XFS_M(sb);
1464 xfs_save_resvblks(mp);
1465 xfs_quiesce_attr(mp);
1466 return -xfs_fs_log_dummy(mp);
1471 struct super_block *sb)
1473 struct xfs_mount *mp = XFS_M(sb);
1475 xfs_restore_resvblks(mp);
1480 xfs_fs_show_options(
1482 struct vfsmount *mnt)
1484 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1488 * This function fills in xfs_mount_t fields based on mount args.
1489 * Note: the superblock _has_ now been read in.
1493 struct xfs_mount *mp)
1495 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1497 /* Fail a mount where the logbuf is smaller than the log stripe */
1498 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1499 if (mp->m_logbsize <= 0 &&
1500 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1501 mp->m_logbsize = mp->m_sb.sb_logsunit;
1502 } else if (mp->m_logbsize > 0 &&
1503 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1505 "XFS: logbuf size must be greater than or equal to log stripe size");
1506 return XFS_ERROR(EINVAL);
1509 /* Fail a mount if the logbuf is larger than 32K */
1510 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1512 "XFS: logbuf size for version 1 logs must be 16K or 32K");
1513 return XFS_ERROR(EINVAL);
1518 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1519 * told by noattr2 to turn it off
1521 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1522 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1523 mp->m_flags |= XFS_MOUNT_ATTR2;
1526 * prohibit r/w mounts of read-only filesystems
1528 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1530 "XFS: cannot mount a read-only filesystem as read-write");
1531 return XFS_ERROR(EROFS);
1539 struct super_block *sb,
1544 struct xfs_mount *mp = NULL;
1545 int flags = 0, error = ENOMEM;
1548 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1552 spin_lock_init(&mp->m_sb_lock);
1553 mutex_init(&mp->m_growlock);
1554 atomic_set(&mp->m_active_trans, 0);
1555 INIT_LIST_HEAD(&mp->m_sync_list);
1556 spin_lock_init(&mp->m_sync_lock);
1557 init_waitqueue_head(&mp->m_wait_single_sync_task);
1562 error = xfs_parseargs(mp, (char *)data, &mtpt);
1564 goto out_free_fsname;
1566 sb_min_blocksize(sb, BBSIZE);
1567 sb->s_xattr = xfs_xattr_handlers;
1568 sb->s_export_op = &xfs_export_operations;
1569 #ifdef CONFIG_XFS_QUOTA
1570 sb->s_qcop = &xfs_quotactl_operations;
1572 sb->s_op = &xfs_super_operations;
1574 error = xfs_dmops_get(mp);
1576 goto out_free_fsname;
1579 flags |= XFS_MFSI_QUIET;
1581 error = xfs_open_devices(mp);
1585 if (xfs_icsb_init_counters(mp))
1586 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
1588 error = xfs_readsb(mp, flags);
1590 goto out_destroy_counters;
1592 error = xfs_finish_flags(mp);
1596 error = xfs_setup_devices(mp);
1600 if (mp->m_flags & XFS_MOUNT_BARRIER)
1601 xfs_mountfs_check_barriers(mp);
1603 error = xfs_filestream_mount(mp);
1607 error = xfs_mountfs(mp);
1609 goto out_filestream_unmount;
1611 XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
1613 sb->s_magic = XFS_SB_MAGIC;
1614 sb->s_blocksize = mp->m_sb.sb_blocksize;
1615 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1616 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1617 sb->s_time_gran = 1;
1618 set_posix_acl_flag(sb);
1620 root = igrab(VFS_I(mp->m_rootip));
1625 if (is_bad_inode(root)) {
1629 sb->s_root = d_alloc_root(root);
1635 error = xfs_syncd_init(mp);
1639 xfs_inode_shrinker_register(mp);
1644 out_filestream_unmount:
1645 xfs_filestream_unmount(mp);
1648 out_destroy_counters:
1649 xfs_icsb_destroy_counters(mp);
1650 xfs_close_devices(mp);
1654 xfs_free_fsname(mp);
1670 * Blow away any referenced inode in the filestreams cache.
1671 * This can and will cause log traffic as inodes go inactive
1674 xfs_filestream_unmount(mp);
1676 XFS_bflush(mp->m_ddev_targp);
1684 struct file_system_type *fs_type,
1686 const char *dev_name,
1688 struct vfsmount *mnt)
1690 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
1694 static const struct super_operations xfs_super_operations = {
1695 .alloc_inode = xfs_fs_alloc_inode,
1696 .destroy_inode = xfs_fs_destroy_inode,
1697 .dirty_inode = xfs_fs_dirty_inode,
1698 .write_inode = xfs_fs_write_inode,
1699 .clear_inode = xfs_fs_clear_inode,
1700 .put_super = xfs_fs_put_super,
1701 .sync_fs = xfs_fs_sync_fs,
1702 .freeze_fs = xfs_fs_freeze,
1703 .unfreeze_fs = xfs_fs_unfreeze,
1704 .statfs = xfs_fs_statfs,
1705 .remount_fs = xfs_fs_remount,
1706 .show_options = xfs_fs_show_options,
1709 static struct file_system_type xfs_fs_type = {
1710 .owner = THIS_MODULE,
1712 .get_sb = xfs_fs_get_sb,
1713 .kill_sb = kill_block_super,
1714 .fs_flags = FS_REQUIRES_DEV,
1718 xfs_init_zones(void)
1721 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1722 if (!xfs_ioend_zone)
1725 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1727 if (!xfs_ioend_pool)
1728 goto out_destroy_ioend_zone;
1730 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1732 if (!xfs_log_ticket_zone)
1733 goto out_destroy_ioend_pool;
1735 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1736 "xfs_bmap_free_item");
1737 if (!xfs_bmap_free_item_zone)
1738 goto out_destroy_log_ticket_zone;
1740 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1742 if (!xfs_btree_cur_zone)
1743 goto out_destroy_bmap_free_item_zone;
1745 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1747 if (!xfs_da_state_zone)
1748 goto out_destroy_btree_cur_zone;
1750 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1751 if (!xfs_dabuf_zone)
1752 goto out_destroy_da_state_zone;
1754 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1755 if (!xfs_ifork_zone)
1756 goto out_destroy_dabuf_zone;
1758 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1759 if (!xfs_trans_zone)
1760 goto out_destroy_ifork_zone;
1763 * The size of the zone allocated buf log item is the maximum
1764 * size possible under XFS. This wastes a little bit of memory,
1765 * but it is much faster.
1767 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1768 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1769 NBWORD) * sizeof(int))), "xfs_buf_item");
1770 if (!xfs_buf_item_zone)
1771 goto out_destroy_trans_zone;
1773 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1774 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1775 sizeof(xfs_extent_t))), "xfs_efd_item");
1777 goto out_destroy_buf_item_zone;
1779 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1780 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1781 sizeof(xfs_extent_t))), "xfs_efi_item");
1783 goto out_destroy_efd_zone;
1786 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1787 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1788 xfs_fs_inode_init_once);
1789 if (!xfs_inode_zone)
1790 goto out_destroy_efi_zone;
1793 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1794 KM_ZONE_SPREAD, NULL);
1796 goto out_destroy_inode_zone;
1800 out_destroy_inode_zone:
1801 kmem_zone_destroy(xfs_inode_zone);
1802 out_destroy_efi_zone:
1803 kmem_zone_destroy(xfs_efi_zone);
1804 out_destroy_efd_zone:
1805 kmem_zone_destroy(xfs_efd_zone);
1806 out_destroy_buf_item_zone:
1807 kmem_zone_destroy(xfs_buf_item_zone);
1808 out_destroy_trans_zone:
1809 kmem_zone_destroy(xfs_trans_zone);
1810 out_destroy_ifork_zone:
1811 kmem_zone_destroy(xfs_ifork_zone);
1812 out_destroy_dabuf_zone:
1813 kmem_zone_destroy(xfs_dabuf_zone);
1814 out_destroy_da_state_zone:
1815 kmem_zone_destroy(xfs_da_state_zone);
1816 out_destroy_btree_cur_zone:
1817 kmem_zone_destroy(xfs_btree_cur_zone);
1818 out_destroy_bmap_free_item_zone:
1819 kmem_zone_destroy(xfs_bmap_free_item_zone);
1820 out_destroy_log_ticket_zone:
1821 kmem_zone_destroy(xfs_log_ticket_zone);
1822 out_destroy_ioend_pool:
1823 mempool_destroy(xfs_ioend_pool);
1824 out_destroy_ioend_zone:
1825 kmem_zone_destroy(xfs_ioend_zone);
1831 xfs_destroy_zones(void)
1833 kmem_zone_destroy(xfs_ili_zone);
1834 kmem_zone_destroy(xfs_inode_zone);
1835 kmem_zone_destroy(xfs_efi_zone);
1836 kmem_zone_destroy(xfs_efd_zone);
1837 kmem_zone_destroy(xfs_buf_item_zone);
1838 kmem_zone_destroy(xfs_trans_zone);
1839 kmem_zone_destroy(xfs_ifork_zone);
1840 kmem_zone_destroy(xfs_dabuf_zone);
1841 kmem_zone_destroy(xfs_da_state_zone);
1842 kmem_zone_destroy(xfs_btree_cur_zone);
1843 kmem_zone_destroy(xfs_bmap_free_item_zone);
1844 kmem_zone_destroy(xfs_log_ticket_zone);
1845 mempool_destroy(xfs_ioend_pool);
1846 kmem_zone_destroy(xfs_ioend_zone);
1855 printk(KERN_INFO XFS_VERSION_STRING " with "
1856 XFS_BUILD_OPTIONS " enabled\n");
1861 error = xfs_init_zones();
1865 error = xfs_mru_cache_init();
1867 goto out_destroy_zones;
1869 error = xfs_filestream_init();
1871 goto out_mru_cache_uninit;
1873 error = xfs_buf_init();
1875 goto out_filestream_uninit;
1877 error = xfs_init_procfs();
1879 goto out_buf_terminate;
1881 error = xfs_sysctl_register();
1883 goto out_cleanup_procfs;
1886 xfs_inode_shrinker_init();
1888 error = register_filesystem(&xfs_fs_type);
1890 goto out_sysctl_unregister;
1893 out_sysctl_unregister:
1894 xfs_sysctl_unregister();
1896 xfs_cleanup_procfs();
1898 xfs_buf_terminate();
1899 out_filestream_uninit:
1900 xfs_filestream_uninit();
1901 out_mru_cache_uninit:
1902 xfs_mru_cache_uninit();
1904 xfs_destroy_zones();
1913 unregister_filesystem(&xfs_fs_type);
1914 xfs_inode_shrinker_destroy();
1915 xfs_sysctl_unregister();
1916 xfs_cleanup_procfs();
1917 xfs_buf_terminate();
1918 xfs_filestream_uninit();
1919 xfs_mru_cache_uninit();
1920 xfs_destroy_zones();
1923 module_init(init_xfs_fs);
1924 module_exit(exit_xfs_fs);
1926 MODULE_AUTHOR("Silicon Graphics, Inc.");
1927 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1928 MODULE_LICENSE("GPL");