2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_btree_trace.h"
37 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_error.h"
41 #include "xfs_itable.h"
42 #include "xfs_fsops.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_utils.h"
46 #include "xfs_vnodeops.h"
47 #include "xfs_log_priv.h"
48 #include "xfs_trans_priv.h"
49 #include "xfs_filestream.h"
50 #include "xfs_da_btree.h"
51 #include "xfs_extfree_item.h"
52 #include "xfs_mru_cache.h"
53 #include "xfs_inode_item.h"
55 #include "xfs_trace.h"
57 #include <linux/namei.h>
58 #include <linux/init.h>
59 #include <linux/slab.h>
60 #include <linux/mount.h>
61 #include <linux/mempool.h>
62 #include <linux/writeback.h>
63 #include <linux/kthread.h>
64 #include <linux/freezer.h>
65 #include <linux/parser.h>
67 static const struct super_operations xfs_super_operations;
68 static kmem_zone_t *xfs_ioend_zone;
69 mempool_t *xfs_ioend_pool;
71 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
72 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
73 #define MNTOPT_LOGDEV "logdev" /* log device */
74 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
75 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
76 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
77 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
78 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
79 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
80 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
81 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
82 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */
83 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
84 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
85 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
86 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
87 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
88 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
89 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
90 * unwritten extent conversion */
91 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
92 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
93 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
94 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
95 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
96 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
98 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
99 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
100 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
101 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */
102 #define MNTOPT_NOQUOTA "noquota" /* no quotas */
103 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
104 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
105 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
106 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
107 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
108 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
109 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
110 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
111 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
112 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
113 #define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */
114 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
117 * Table driven mount option parser.
119 * Currently only used for remount, but it will be used for mount
120 * in the future, too.
123 Opt_barrier, Opt_nobarrier, Opt_err
126 static const match_table_t tokens = {
127 {Opt_barrier, "barrier"},
128 {Opt_nobarrier, "nobarrier"},
134 suffix_strtoul(char *s, char **endp, unsigned int base)
136 int last, shift_left_factor = 0;
139 last = strlen(value) - 1;
140 if (value[last] == 'K' || value[last] == 'k') {
141 shift_left_factor = 10;
144 if (value[last] == 'M' || value[last] == 'm') {
145 shift_left_factor = 20;
148 if (value[last] == 'G' || value[last] == 'g') {
149 shift_left_factor = 30;
153 return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
157 * This function fills in xfs_mount_t fields based on mount args.
158 * Note: the superblock has _not_ yet been read in.
160 * Note that this function leaks the various device name allocations on
161 * failure. The caller takes care of them.
165 struct xfs_mount *mp,
168 struct super_block *sb = mp->m_super;
169 char *this_char, *value, *eov;
173 __uint8_t iosizelog = 0;
176 * set up the mount name first so all the errors will refer to the
179 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
182 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
185 * Copy binary VFS mount flags we are interested in.
187 if (sb->s_flags & MS_RDONLY)
188 mp->m_flags |= XFS_MOUNT_RDONLY;
189 if (sb->s_flags & MS_DIRSYNC)
190 mp->m_flags |= XFS_MOUNT_DIRSYNC;
191 if (sb->s_flags & MS_SYNCHRONOUS)
192 mp->m_flags |= XFS_MOUNT_WSYNC;
195 * Set some default flags that could be cleared by the mount option
198 mp->m_flags |= XFS_MOUNT_BARRIER;
199 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
200 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
201 mp->m_flags |= XFS_MOUNT_DELAYLOG;
204 * These can be overridden by the mount option parsing.
212 while ((this_char = strsep(&options, ",")) != NULL) {
215 if ((value = strchr(this_char, '=')) != NULL)
218 if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
219 if (!value || !*value) {
220 xfs_warn(mp, "%s option requires an argument",
224 mp->m_logbufs = simple_strtoul(value, &eov, 10);
225 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
226 if (!value || !*value) {
227 xfs_warn(mp, "%s option requires an argument",
231 mp->m_logbsize = suffix_strtoul(value, &eov, 10);
232 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
233 if (!value || !*value) {
234 xfs_warn(mp, "%s option requires an argument",
238 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
241 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
242 xfs_warn(mp, "%s option not allowed on this system",
245 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
246 if (!value || !*value) {
247 xfs_warn(mp, "%s option requires an argument",
251 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
254 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
255 if (!value || !*value) {
256 xfs_warn(mp, "%s option requires an argument",
260 iosize = simple_strtoul(value, &eov, 10);
261 iosizelog = ffs(iosize) - 1;
262 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
263 if (!value || !*value) {
264 xfs_warn(mp, "%s option requires an argument",
268 iosize = suffix_strtoul(value, &eov, 10);
269 iosizelog = ffs(iosize) - 1;
270 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
271 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
272 mp->m_flags |= XFS_MOUNT_GRPID;
273 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
274 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
275 mp->m_flags &= ~XFS_MOUNT_GRPID;
276 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
277 mp->m_flags |= XFS_MOUNT_WSYNC;
278 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
279 mp->m_flags |= XFS_MOUNT_NORECOVERY;
280 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
281 mp->m_flags |= XFS_MOUNT_NOALIGN;
282 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
283 mp->m_flags |= XFS_MOUNT_SWALLOC;
284 } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
285 if (!value || !*value) {
286 xfs_warn(mp, "%s option requires an argument",
290 dsunit = simple_strtoul(value, &eov, 10);
291 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
292 if (!value || !*value) {
293 xfs_warn(mp, "%s option requires an argument",
297 dswidth = simple_strtoul(value, &eov, 10);
298 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
299 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
301 xfs_warn(mp, "%s option not allowed on this system",
305 } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
306 mp->m_flags |= XFS_MOUNT_NOUUID;
307 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
308 mp->m_flags |= XFS_MOUNT_BARRIER;
309 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
310 mp->m_flags &= ~XFS_MOUNT_BARRIER;
311 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
312 mp->m_flags |= XFS_MOUNT_IKEEP;
313 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
314 mp->m_flags &= ~XFS_MOUNT_IKEEP;
315 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
316 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
317 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
318 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
319 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
320 mp->m_flags |= XFS_MOUNT_ATTR2;
321 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
322 mp->m_flags &= ~XFS_MOUNT_ATTR2;
323 mp->m_flags |= XFS_MOUNT_NOATTR2;
324 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
325 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
326 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
327 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
328 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
329 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
330 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
331 } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
332 !strcmp(this_char, MNTOPT_UQUOTA) ||
333 !strcmp(this_char, MNTOPT_USRQUOTA)) {
334 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
336 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
337 !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
338 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
339 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
340 } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
341 !strcmp(this_char, MNTOPT_PRJQUOTA)) {
342 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
344 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
345 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
346 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
347 } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
348 !strcmp(this_char, MNTOPT_GRPQUOTA)) {
349 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
351 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
352 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
353 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
355 mp->m_flags |= XFS_MOUNT_DELAYLOG;
356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
357 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
358 } else if (!strcmp(this_char, "ihashsize")) {
360 "ihashsize no longer used, option is deprecated.");
361 } else if (!strcmp(this_char, "osyncisdsync")) {
363 "osyncisdsync has no effect, option is deprecated.");
364 } else if (!strcmp(this_char, "osyncisosync")) {
366 "osyncisosync has no effect, option is deprecated.");
367 } else if (!strcmp(this_char, "irixsgid")) {
369 "irixsgid is now a sysctl(2) variable, option is deprecated.");
371 xfs_warn(mp, "unknown mount option [%s].", this_char);
377 * no recovery flag requires a read-only mount
379 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
380 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
381 xfs_warn(mp, "no-recovery mounts must be read-only.");
385 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
387 "sunit and swidth options incompatible with the noalign option");
391 #ifndef CONFIG_XFS_QUOTA
392 if (XFS_IS_QUOTA_RUNNING(mp)) {
393 xfs_warn(mp, "quota support not available in this kernel.");
398 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
399 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
400 xfs_warn(mp, "cannot mount with both project and group quota");
404 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
405 xfs_warn(mp, "sunit and swidth must be specified together");
409 if (dsunit && (dswidth % dsunit != 0)) {
411 "stripe width (%d) must be a multiple of the stripe unit (%d)",
417 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
419 * At this point the superblock has not been read
420 * in, therefore we do not know the block size.
421 * Before the mount call ends we will convert
425 mp->m_dalign = dsunit;
426 mp->m_flags |= XFS_MOUNT_RETERR;
430 mp->m_swidth = dswidth;
433 if (mp->m_logbufs != -1 &&
434 mp->m_logbufs != 0 &&
435 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
436 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
437 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
438 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
439 return XFS_ERROR(EINVAL);
441 if (mp->m_logbsize != -1 &&
442 mp->m_logbsize != 0 &&
443 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
444 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
445 !is_power_of_2(mp->m_logbsize))) {
447 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
449 return XFS_ERROR(EINVAL);
453 if (iosizelog > XFS_MAX_IO_LOG ||
454 iosizelog < XFS_MIN_IO_LOG) {
455 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
456 iosizelog, XFS_MIN_IO_LOG,
458 return XFS_ERROR(EINVAL);
461 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
462 mp->m_readio_log = iosizelog;
463 mp->m_writeio_log = iosizelog;
469 struct proc_xfs_info {
476 struct xfs_mount *mp,
479 static struct proc_xfs_info xfs_info_set[] = {
480 /* the few simple ones we can get from the mount struct */
481 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
482 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
483 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
484 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
485 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
486 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
487 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
488 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
489 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
490 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
493 static struct proc_xfs_info xfs_info_unset[] = {
494 /* the few simple ones we can get from the mount struct */
495 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
496 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
497 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
500 struct proc_xfs_info *xfs_infop;
502 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
503 if (mp->m_flags & xfs_infop->flag)
504 seq_puts(m, xfs_infop->str);
506 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
507 if (!(mp->m_flags & xfs_infop->flag))
508 seq_puts(m, xfs_infop->str);
511 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
512 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
513 (int)(1 << mp->m_writeio_log) >> 10);
515 if (mp->m_logbufs > 0)
516 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
517 if (mp->m_logbsize > 0)
518 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
521 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
523 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
525 if (mp->m_dalign > 0)
526 seq_printf(m, "," MNTOPT_SUNIT "=%d",
527 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
528 if (mp->m_swidth > 0)
529 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
530 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
532 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
533 seq_puts(m, "," MNTOPT_USRQUOTA);
534 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
535 seq_puts(m, "," MNTOPT_UQUOTANOENF);
537 /* Either project or group quotas can be active, not both */
539 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
540 if (mp->m_qflags & XFS_OQUOTA_ENFD)
541 seq_puts(m, "," MNTOPT_PRJQUOTA);
543 seq_puts(m, "," MNTOPT_PQUOTANOENF);
544 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
545 if (mp->m_qflags & XFS_OQUOTA_ENFD)
546 seq_puts(m, "," MNTOPT_GRPQUOTA);
548 seq_puts(m, "," MNTOPT_GQUOTANOENF);
551 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
552 seq_puts(m, "," MNTOPT_NOQUOTA);
558 unsigned int blockshift)
560 unsigned int pagefactor = 1;
561 unsigned int bitshift = BITS_PER_LONG - 1;
563 /* Figure out maximum filesize, on Linux this can depend on
564 * the filesystem blocksize (on 32 bit platforms).
565 * __block_write_begin does this in an [unsigned] long...
566 * page->index << (PAGE_CACHE_SHIFT - bbits)
567 * So, for page sized blocks (4K on 32 bit platforms),
568 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
569 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
570 * but for smaller blocksizes it is less (bbits = log2 bsize).
571 * Note1: get_block_t takes a long (implicit cast from above)
572 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
573 * can optionally convert the [unsigned] long from above into
574 * an [unsigned] long long.
577 #if BITS_PER_LONG == 32
578 # if defined(CONFIG_LBDAF)
579 ASSERT(sizeof(sector_t) == 8);
580 pagefactor = PAGE_CACHE_SIZE;
581 bitshift = BITS_PER_LONG;
583 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
587 return (((__uint64_t)pagefactor) << bitshift) - 1;
594 struct block_device **bdevp)
598 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
600 if (IS_ERR(*bdevp)) {
601 error = PTR_ERR(*bdevp);
602 xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
610 struct block_device *bdev)
613 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
617 * Try to write out the superblock using barriers.
623 xfs_buf_t *sbp = xfs_getsb(mp, 0);
628 XFS_BUF_UNDELAYWRITE(sbp);
630 XFS_BUF_UNASYNC(sbp);
631 XFS_BUF_ORDERED(sbp);
634 error = xfs_buf_iowait(sbp);
637 * Clear all the flags we set and possible error state in the
638 * buffer. We only did the write to try out whether barriers
639 * worked and shouldn't leave any traces in the superblock
643 XFS_BUF_ERROR(sbp, 0);
644 XFS_BUF_UNORDERED(sbp);
651 xfs_mountfs_check_barriers(xfs_mount_t *mp)
655 if (mp->m_logdev_targp != mp->m_ddev_targp) {
657 "Disabling barriers, not supported with external log device");
658 mp->m_flags &= ~XFS_MOUNT_BARRIER;
662 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
664 "Disabling barriers, underlying device is readonly");
665 mp->m_flags &= ~XFS_MOUNT_BARRIER;
669 error = xfs_barrier_test(mp);
672 "Disabling barriers, trial barrier write failed");
673 mp->m_flags &= ~XFS_MOUNT_BARRIER;
679 xfs_blkdev_issue_flush(
680 xfs_buftarg_t *buftarg)
682 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
687 struct xfs_mount *mp)
689 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
690 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
691 xfs_free_buftarg(mp, mp->m_logdev_targp);
692 xfs_blkdev_put(logdev);
694 if (mp->m_rtdev_targp) {
695 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
696 xfs_free_buftarg(mp, mp->m_rtdev_targp);
697 xfs_blkdev_put(rtdev);
699 xfs_free_buftarg(mp, mp->m_ddev_targp);
703 * The file system configurations are:
704 * (1) device (partition) with data and internal log
705 * (2) logical volume with data and log subvolumes.
706 * (3) logical volume with data, log, and realtime subvolumes.
708 * We only have to handle opening the log and realtime volumes here if
709 * they are present. The data subvolume has already been opened by
710 * get_sb_bdev() and is stored in sb->s_bdev.
714 struct xfs_mount *mp)
716 struct block_device *ddev = mp->m_super->s_bdev;
717 struct block_device *logdev = NULL, *rtdev = NULL;
721 * Open real time and log devices - order is important.
724 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
730 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
732 goto out_close_logdev;
734 if (rtdev == ddev || rtdev == logdev) {
736 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
738 goto out_close_rtdev;
743 * Setup xfs_mount buffer target pointers
746 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
747 if (!mp->m_ddev_targp)
748 goto out_close_rtdev;
751 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
753 if (!mp->m_rtdev_targp)
754 goto out_free_ddev_targ;
757 if (logdev && logdev != ddev) {
758 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
760 if (!mp->m_logdev_targp)
761 goto out_free_rtdev_targ;
763 mp->m_logdev_targp = mp->m_ddev_targp;
769 if (mp->m_rtdev_targp)
770 xfs_free_buftarg(mp, mp->m_rtdev_targp);
772 xfs_free_buftarg(mp, mp->m_ddev_targp);
775 xfs_blkdev_put(rtdev);
777 if (logdev && logdev != ddev)
778 xfs_blkdev_put(logdev);
784 * Setup xfs_mount buffer target pointers based on superblock
788 struct xfs_mount *mp)
792 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
793 mp->m_sb.sb_sectsize);
797 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
798 unsigned int log_sector_size = BBSIZE;
800 if (xfs_sb_version_hassector(&mp->m_sb))
801 log_sector_size = mp->m_sb.sb_logsectsize;
802 error = xfs_setsize_buftarg(mp->m_logdev_targp,
803 mp->m_sb.sb_blocksize,
808 if (mp->m_rtdev_targp) {
809 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
810 mp->m_sb.sb_blocksize,
811 mp->m_sb.sb_sectsize);
819 /* Catch misguided souls that try to use this interface on XFS */
820 STATIC struct inode *
822 struct super_block *sb)
829 * Now that the generic code is guaranteed not to be accessing
830 * the linux inode, we can reclaim the inode.
833 xfs_fs_destroy_inode(
836 struct xfs_inode *ip = XFS_I(inode);
838 trace_xfs_destroy_inode(ip);
840 XFS_STATS_INC(vn_reclaim);
842 /* bad inode, get out here ASAP */
843 if (is_bad_inode(inode))
848 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
851 * We should never get here with one of the reclaim flags already set.
853 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
854 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
857 * We always use background reclaim here because even if the
858 * inode is clean, it still may be under IO and hence we have
859 * to take the flush lock. The background reclaim path handles
860 * this more efficiently than we can here, so simply let background
861 * reclaim tear down all inodes.
864 xfs_inode_set_reclaim_tag(ip);
868 * Slab object creation initialisation for the XFS inode.
869 * This covers only the idempotent fields in the XFS inode;
870 * all other fields need to be initialised on allocation
871 * from the slab. This avoids the need to repeatedly initialise
872 * fields in the xfs inode that left in the initialise state
873 * when freeing the inode.
876 xfs_fs_inode_init_once(
879 struct xfs_inode *ip = inode;
881 memset(ip, 0, sizeof(struct xfs_inode));
884 inode_init_once(VFS_I(ip));
887 atomic_set(&ip->i_iocount, 0);
888 atomic_set(&ip->i_pincount, 0);
889 spin_lock_init(&ip->i_flags_lock);
890 init_waitqueue_head(&ip->i_ipin_wait);
892 * Because we want to use a counting completion, complete
893 * the flush completion once to allow a single access to
894 * the flush completion without blocking.
896 init_completion(&ip->i_flush);
897 complete(&ip->i_flush);
899 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
900 "xfsino", ip->i_ino);
904 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
905 * we catch unlogged VFS level updates to the inode.
907 * We need the barrier() to maintain correct ordering between unlogged
908 * updates and the transaction commit code that clears the i_update_core
909 * field. This requires all updates to be completed before marking the
917 XFS_I(inode)->i_update_core = 1;
922 struct xfs_inode *ip)
924 struct xfs_mount *mp = ip->i_mount;
925 struct xfs_trans *tp;
928 xfs_iunlock(ip, XFS_ILOCK_SHARED);
929 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
930 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
933 xfs_trans_cancel(tp, 0);
934 /* we need to return with the lock hold shared */
935 xfs_ilock(ip, XFS_ILOCK_SHARED);
939 xfs_ilock(ip, XFS_ILOCK_EXCL);
942 * Note - it's possible that we might have pushed ourselves out of the
943 * way during trans_reserve which would flush the inode. But there's
944 * no guarantee that the inode buffer has actually gone out yet (it's
945 * delwri). Plus the buffer could be pinned anyway if it's part of
946 * an inode in another recent transaction. So we play it safe and
947 * fire off the transaction anyway.
949 xfs_trans_ijoin(tp, ip);
950 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
951 error = xfs_trans_commit(tp, 0);
952 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
960 struct writeback_control *wbc)
962 struct xfs_inode *ip = XFS_I(inode);
963 struct xfs_mount *mp = ip->i_mount;
966 trace_xfs_write_inode(ip);
968 if (XFS_FORCED_SHUTDOWN(mp))
969 return XFS_ERROR(EIO);
971 if (wbc->sync_mode == WB_SYNC_ALL) {
973 * Make sure the inode has made it it into the log. Instead
974 * of forcing it all the way to stable storage using a
975 * synchronous transaction we let the log force inside the
976 * ->sync_fs call do that for thus, which reduces the number
977 * of synchronous log foces dramatically.
980 xfs_ilock(ip, XFS_ILOCK_SHARED);
981 if (ip->i_update_core) {
982 error = xfs_log_inode(ip);
988 * We make this non-blocking if the inode is contended, return
989 * EAGAIN to indicate to the caller that they did not succeed.
990 * This prevents the flush path from blocking on inodes inside
991 * another operation right now, they get caught later by
994 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
997 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1001 * Now we have the flush lock and the inode is not pinned, we
1002 * can check if the inode is really clean as we know that
1003 * there are no pending transaction completions, it is not
1004 * waiting on the delayed write queue and there is no IO in
1007 if (xfs_inode_clean(ip)) {
1012 error = xfs_iflush(ip, SYNC_TRYLOCK);
1016 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1019 * if we failed to write out the inode then mark
1020 * it dirty again so we'll try again later.
1023 xfs_mark_inode_dirty_sync(ip);
1029 struct inode *inode)
1031 xfs_inode_t *ip = XFS_I(inode);
1033 trace_xfs_evict_inode(ip);
1035 truncate_inode_pages(&inode->i_data, 0);
1036 end_writeback(inode);
1037 XFS_STATS_INC(vn_rele);
1038 XFS_STATS_INC(vn_remove);
1039 XFS_STATS_DEC(vn_active);
1042 * The iolock is used by the file system to coordinate reads,
1043 * writes, and block truncates. Up to this point the lock
1044 * protected concurrent accesses by users of the inode. But
1045 * from here forward we're doing some final processing of the
1046 * inode because we're done with it, and although we reuse the
1047 * iolock for protection it is really a distinct lock class
1048 * (in the lockdep sense) from before. To keep lockdep happy
1049 * (and basically indicate what we are doing), we explicitly
1050 * re-init the iolock here.
1052 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1053 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1054 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
1055 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
1062 struct xfs_mount *mp)
1064 kfree(mp->m_fsname);
1065 kfree(mp->m_rtname);
1066 kfree(mp->m_logname);
1071 struct super_block *sb)
1073 struct xfs_mount *mp = XFS_M(sb);
1076 * Unregister the memory shrinker before we tear down the mount
1077 * structure so we don't have memory reclaim racing with us here.
1079 xfs_inode_shrinker_unregister(mp);
1083 * Blow away any referenced inode in the filestreams cache.
1084 * This can and will cause log traffic as inodes go inactive
1087 xfs_filestream_unmount(mp);
1089 XFS_bflush(mp->m_ddev_targp);
1093 xfs_icsb_destroy_counters(mp);
1094 xfs_close_devices(mp);
1095 xfs_free_fsname(mp);
1101 struct super_block *sb,
1104 struct xfs_mount *mp = XFS_M(sb);
1108 * Not much we can do for the first async pass. Writing out the
1109 * superblock would be counter-productive as we are going to redirty
1110 * when writing out other data and metadata (and writing out a single
1111 * block is quite fast anyway).
1113 * Try to asynchronously kick off quota syncing at least.
1116 xfs_qm_sync(mp, SYNC_TRYLOCK);
1120 error = xfs_quiesce_data(mp);
1126 * The disk must be active because we're syncing.
1127 * We schedule xfssyncd now (now that the disk is
1128 * active) instead of later (when it might not be).
1130 flush_delayed_work_sync(&mp->m_sync_work);
1138 struct dentry *dentry,
1139 struct kstatfs *statp)
1141 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1142 xfs_sb_t *sbp = &mp->m_sb;
1143 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1144 __uint64_t fakeinos, id;
1148 statp->f_type = XFS_SB_MAGIC;
1149 statp->f_namelen = MAXNAMELEN - 1;
1151 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1152 statp->f_fsid.val[0] = (u32)id;
1153 statp->f_fsid.val[1] = (u32)(id >> 32);
1155 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
1157 spin_lock(&mp->m_sb_lock);
1158 statp->f_bsize = sbp->sb_blocksize;
1159 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1160 statp->f_blocks = sbp->sb_dblocks - lsize;
1161 statp->f_bfree = statp->f_bavail =
1162 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1163 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1165 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1166 if (mp->m_maxicount)
1167 statp->f_files = min_t(typeof(statp->f_files),
1171 /* make sure statp->f_ffree does not underflow */
1172 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1173 statp->f_ffree = max_t(__int64_t, ffree, 0);
1175 spin_unlock(&mp->m_sb_lock);
1177 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1178 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1179 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1180 xfs_qm_statvfs(ip, statp);
1185 xfs_save_resvblks(struct xfs_mount *mp)
1187 __uint64_t resblks = 0;
1189 mp->m_resblks_save = mp->m_resblks;
1190 xfs_reserve_blocks(mp, &resblks, NULL);
1194 xfs_restore_resvblks(struct xfs_mount *mp)
1198 if (mp->m_resblks_save) {
1199 resblks = mp->m_resblks_save;
1200 mp->m_resblks_save = 0;
1202 resblks = xfs_default_resblks(mp);
1204 xfs_reserve_blocks(mp, &resblks, NULL);
1209 struct super_block *sb,
1213 struct xfs_mount *mp = XFS_M(sb);
1214 substring_t args[MAX_OPT_ARGS];
1218 while ((p = strsep(&options, ",")) != NULL) {
1224 token = match_token(p, tokens, args);
1227 mp->m_flags |= XFS_MOUNT_BARRIER;
1230 * Test if barriers are actually working if we can,
1231 * else delay this check until the filesystem is
1234 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
1235 xfs_mountfs_check_barriers(mp);
1238 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1242 * Logically we would return an error here to prevent
1243 * users from believing they might have changed
1244 * mount options using remount which can't be changed.
1246 * But unfortunately mount(8) adds all options from
1247 * mtab and fstab to the mount arguments in some cases
1248 * so we can't blindly reject options, but have to
1249 * check for each specified option if it actually
1250 * differs from the currently set option and only
1251 * reject it if that's the case.
1253 * Until that is implemented we return success for
1254 * every remount request, and silently ignore all
1255 * options that we can't actually change.
1259 "mount option \"%s\" not supported for remount\n", p);
1268 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1269 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1270 if (mp->m_flags & XFS_MOUNT_BARRIER)
1271 xfs_mountfs_check_barriers(mp);
1274 * If this is the first remount to writeable state we
1275 * might have some superblock changes to update.
1277 if (mp->m_update_flags) {
1278 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1280 xfs_warn(mp, "failed to write sb changes");
1283 mp->m_update_flags = 0;
1287 * Fill out the reserve pool if it is empty. Use the stashed
1288 * value if it is non-zero, otherwise go with the default.
1290 xfs_restore_resvblks(mp);
1294 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1296 * After we have synced the data but before we sync the
1297 * metadata, we need to free up the reserve block pool so that
1298 * the used block count in the superblock on disk is correct at
1299 * the end of the remount. Stash the current reserve pool size
1300 * so that if we get remounted rw, we can return it to the same
1304 xfs_quiesce_data(mp);
1305 xfs_save_resvblks(mp);
1306 xfs_quiesce_attr(mp);
1307 mp->m_flags |= XFS_MOUNT_RDONLY;
1314 * Second stage of a freeze. The data is already frozen so we only
1315 * need to take care of the metadata. Once that's done write a dummy
1316 * record to dirty the log in case of a crash while frozen.
1320 struct super_block *sb)
1322 struct xfs_mount *mp = XFS_M(sb);
1324 xfs_save_resvblks(mp);
1325 xfs_quiesce_attr(mp);
1326 return -xfs_fs_log_dummy(mp);
1331 struct super_block *sb)
1333 struct xfs_mount *mp = XFS_M(sb);
1335 xfs_restore_resvblks(mp);
1340 xfs_fs_show_options(
1342 struct vfsmount *mnt)
1344 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1348 * This function fills in xfs_mount_t fields based on mount args.
1349 * Note: the superblock _has_ now been read in.
1353 struct xfs_mount *mp)
1355 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1357 /* Fail a mount where the logbuf is smaller than the log stripe */
1358 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1359 if (mp->m_logbsize <= 0 &&
1360 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1361 mp->m_logbsize = mp->m_sb.sb_logsunit;
1362 } else if (mp->m_logbsize > 0 &&
1363 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1365 "logbuf size must be greater than or equal to log stripe size");
1366 return XFS_ERROR(EINVAL);
1369 /* Fail a mount if the logbuf is larger than 32K */
1370 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1372 "logbuf size for version 1 logs must be 16K or 32K");
1373 return XFS_ERROR(EINVAL);
1378 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1379 * told by noattr2 to turn it off
1381 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1382 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1383 mp->m_flags |= XFS_MOUNT_ATTR2;
1386 * prohibit r/w mounts of read-only filesystems
1388 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1390 "cannot mount a read-only filesystem as read-write");
1391 return XFS_ERROR(EROFS);
1399 struct super_block *sb,
1404 struct xfs_mount *mp = NULL;
1405 int flags = 0, error = ENOMEM;
1407 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1411 spin_lock_init(&mp->m_sb_lock);
1412 mutex_init(&mp->m_growlock);
1413 atomic_set(&mp->m_active_trans, 0);
1418 error = xfs_parseargs(mp, (char *)data);
1420 goto out_free_fsname;
1422 sb_min_blocksize(sb, BBSIZE);
1423 sb->s_xattr = xfs_xattr_handlers;
1424 sb->s_export_op = &xfs_export_operations;
1425 #ifdef CONFIG_XFS_QUOTA
1426 sb->s_qcop = &xfs_quotactl_operations;
1428 sb->s_op = &xfs_super_operations;
1431 flags |= XFS_MFSI_QUIET;
1433 error = xfs_open_devices(mp);
1435 goto out_free_fsname;
1437 error = xfs_icsb_init_counters(mp);
1439 goto out_close_devices;
1441 error = xfs_readsb(mp, flags);
1443 goto out_destroy_counters;
1445 error = xfs_finish_flags(mp);
1449 error = xfs_setup_devices(mp);
1453 if (mp->m_flags & XFS_MOUNT_BARRIER)
1454 xfs_mountfs_check_barriers(mp);
1456 error = xfs_filestream_mount(mp);
1461 * we must configure the block size in the superblock before we run the
1462 * full mount process as the mount process can lookup and cache inodes.
1463 * For the same reason we must also initialise the syncd and register
1464 * the inode cache shrinker so that inodes can be reclaimed during
1465 * operations like a quotacheck that iterate all inodes in the
1468 sb->s_magic = XFS_SB_MAGIC;
1469 sb->s_blocksize = mp->m_sb.sb_blocksize;
1470 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1471 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1472 sb->s_time_gran = 1;
1473 set_posix_acl_flag(sb);
1475 error = xfs_syncd_init(mp);
1477 goto out_filestream_unmount;
1479 xfs_inode_shrinker_register(mp);
1481 error = xfs_mountfs(mp);
1483 goto out_syncd_stop;
1485 root = igrab(VFS_I(mp->m_rootip));
1490 if (is_bad_inode(root)) {
1494 sb->s_root = d_alloc_root(root);
1503 xfs_inode_shrinker_unregister(mp);
1505 out_filestream_unmount:
1506 xfs_filestream_unmount(mp);
1509 out_destroy_counters:
1510 xfs_icsb_destroy_counters(mp);
1512 xfs_close_devices(mp);
1514 xfs_free_fsname(mp);
1528 xfs_inode_shrinker_unregister(mp);
1532 * Blow away any referenced inode in the filestreams cache.
1533 * This can and will cause log traffic as inodes go inactive
1536 xfs_filestream_unmount(mp);
1538 XFS_bflush(mp->m_ddev_targp);
1544 STATIC struct dentry *
1546 struct file_system_type *fs_type,
1548 const char *dev_name,
1551 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1554 static const struct super_operations xfs_super_operations = {
1555 .alloc_inode = xfs_fs_alloc_inode,
1556 .destroy_inode = xfs_fs_destroy_inode,
1557 .dirty_inode = xfs_fs_dirty_inode,
1558 .write_inode = xfs_fs_write_inode,
1559 .evict_inode = xfs_fs_evict_inode,
1560 .put_super = xfs_fs_put_super,
1561 .sync_fs = xfs_fs_sync_fs,
1562 .freeze_fs = xfs_fs_freeze,
1563 .unfreeze_fs = xfs_fs_unfreeze,
1564 .statfs = xfs_fs_statfs,
1565 .remount_fs = xfs_fs_remount,
1566 .show_options = xfs_fs_show_options,
1569 static struct file_system_type xfs_fs_type = {
1570 .owner = THIS_MODULE,
1572 .mount = xfs_fs_mount,
1573 .kill_sb = kill_block_super,
1574 .fs_flags = FS_REQUIRES_DEV,
1578 xfs_init_zones(void)
1581 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1582 if (!xfs_ioend_zone)
1585 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1587 if (!xfs_ioend_pool)
1588 goto out_destroy_ioend_zone;
1590 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1592 if (!xfs_log_ticket_zone)
1593 goto out_destroy_ioend_pool;
1595 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1596 "xfs_bmap_free_item");
1597 if (!xfs_bmap_free_item_zone)
1598 goto out_destroy_log_ticket_zone;
1600 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1602 if (!xfs_btree_cur_zone)
1603 goto out_destroy_bmap_free_item_zone;
1605 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1607 if (!xfs_da_state_zone)
1608 goto out_destroy_btree_cur_zone;
1610 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1611 if (!xfs_dabuf_zone)
1612 goto out_destroy_da_state_zone;
1614 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1615 if (!xfs_ifork_zone)
1616 goto out_destroy_dabuf_zone;
1618 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1619 if (!xfs_trans_zone)
1620 goto out_destroy_ifork_zone;
1622 xfs_log_item_desc_zone =
1623 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1624 "xfs_log_item_desc");
1625 if (!xfs_log_item_desc_zone)
1626 goto out_destroy_trans_zone;
1629 * The size of the zone allocated buf log item is the maximum
1630 * size possible under XFS. This wastes a little bit of memory,
1631 * but it is much faster.
1633 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1634 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1635 NBWORD) * sizeof(int))), "xfs_buf_item");
1636 if (!xfs_buf_item_zone)
1637 goto out_destroy_log_item_desc_zone;
1639 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1640 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1641 sizeof(xfs_extent_t))), "xfs_efd_item");
1643 goto out_destroy_buf_item_zone;
1645 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1646 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1647 sizeof(xfs_extent_t))), "xfs_efi_item");
1649 goto out_destroy_efd_zone;
1652 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1653 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1654 xfs_fs_inode_init_once);
1655 if (!xfs_inode_zone)
1656 goto out_destroy_efi_zone;
1659 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1660 KM_ZONE_SPREAD, NULL);
1662 goto out_destroy_inode_zone;
1666 out_destroy_inode_zone:
1667 kmem_zone_destroy(xfs_inode_zone);
1668 out_destroy_efi_zone:
1669 kmem_zone_destroy(xfs_efi_zone);
1670 out_destroy_efd_zone:
1671 kmem_zone_destroy(xfs_efd_zone);
1672 out_destroy_buf_item_zone:
1673 kmem_zone_destroy(xfs_buf_item_zone);
1674 out_destroy_log_item_desc_zone:
1675 kmem_zone_destroy(xfs_log_item_desc_zone);
1676 out_destroy_trans_zone:
1677 kmem_zone_destroy(xfs_trans_zone);
1678 out_destroy_ifork_zone:
1679 kmem_zone_destroy(xfs_ifork_zone);
1680 out_destroy_dabuf_zone:
1681 kmem_zone_destroy(xfs_dabuf_zone);
1682 out_destroy_da_state_zone:
1683 kmem_zone_destroy(xfs_da_state_zone);
1684 out_destroy_btree_cur_zone:
1685 kmem_zone_destroy(xfs_btree_cur_zone);
1686 out_destroy_bmap_free_item_zone:
1687 kmem_zone_destroy(xfs_bmap_free_item_zone);
1688 out_destroy_log_ticket_zone:
1689 kmem_zone_destroy(xfs_log_ticket_zone);
1690 out_destroy_ioend_pool:
1691 mempool_destroy(xfs_ioend_pool);
1692 out_destroy_ioend_zone:
1693 kmem_zone_destroy(xfs_ioend_zone);
1699 xfs_destroy_zones(void)
1701 kmem_zone_destroy(xfs_ili_zone);
1702 kmem_zone_destroy(xfs_inode_zone);
1703 kmem_zone_destroy(xfs_efi_zone);
1704 kmem_zone_destroy(xfs_efd_zone);
1705 kmem_zone_destroy(xfs_buf_item_zone);
1706 kmem_zone_destroy(xfs_log_item_desc_zone);
1707 kmem_zone_destroy(xfs_trans_zone);
1708 kmem_zone_destroy(xfs_ifork_zone);
1709 kmem_zone_destroy(xfs_dabuf_zone);
1710 kmem_zone_destroy(xfs_da_state_zone);
1711 kmem_zone_destroy(xfs_btree_cur_zone);
1712 kmem_zone_destroy(xfs_bmap_free_item_zone);
1713 kmem_zone_destroy(xfs_log_ticket_zone);
1714 mempool_destroy(xfs_ioend_pool);
1715 kmem_zone_destroy(xfs_ioend_zone);
1720 xfs_init_workqueues(void)
1723 * max_active is set to 8 to give enough concurency to allow
1724 * multiple work operations on each CPU to run. This allows multiple
1725 * filesystems to be running sync work concurrently, and scales with
1726 * the number of CPUs in the system.
1728 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1732 xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1734 goto out_destroy_syncd;
1739 destroy_workqueue(xfs_syncd_wq);
1745 xfs_destroy_workqueues(void)
1747 destroy_workqueue(xfs_ail_wq);
1748 destroy_workqueue(xfs_syncd_wq);
1756 printk(KERN_INFO XFS_VERSION_STRING " with "
1757 XFS_BUILD_OPTIONS " enabled\n");
1762 error = xfs_init_zones();
1766 error = xfs_init_workqueues();
1768 goto out_destroy_zones;
1770 error = xfs_mru_cache_init();
1772 goto out_destroy_wq;
1774 error = xfs_filestream_init();
1776 goto out_mru_cache_uninit;
1778 error = xfs_buf_init();
1780 goto out_filestream_uninit;
1782 error = xfs_init_procfs();
1784 goto out_buf_terminate;
1786 error = xfs_sysctl_register();
1788 goto out_cleanup_procfs;
1790 error = xfs_init_workqueues();
1792 goto out_sysctl_unregister;
1796 error = register_filesystem(&xfs_fs_type);
1798 goto out_sysctl_unregister;
1801 out_sysctl_unregister:
1802 xfs_sysctl_unregister();
1804 xfs_cleanup_procfs();
1806 xfs_buf_terminate();
1807 out_filestream_uninit:
1808 xfs_filestream_uninit();
1809 out_mru_cache_uninit:
1810 xfs_mru_cache_uninit();
1812 xfs_destroy_workqueues();
1814 xfs_destroy_zones();
1823 unregister_filesystem(&xfs_fs_type);
1824 xfs_sysctl_unregister();
1825 xfs_cleanup_procfs();
1826 xfs_buf_terminate();
1827 xfs_filestream_uninit();
1828 xfs_mru_cache_uninit();
1829 xfs_destroy_workqueues();
1830 xfs_destroy_zones();
1833 module_init(init_xfs_fs);
1834 module_exit(exit_xfs_fs);
1836 MODULE_AUTHOR("Silicon Graphics, Inc.");
1837 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1838 MODULE_LICENSE("GPL");