2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@infradead.org>
8 * For licensing information, see the file 'LICENCE' in this directory.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
16 #include <linux/list.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/pagemap.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/vfs.h>
22 #include <linux/crc32.h>
25 static int jffs2_flash_setup(struct jffs2_sb_info *c);
27 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
29 struct jffs2_full_dnode *old_metadata, *new_metadata;
30 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
31 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
32 struct jffs2_raw_inode *ri;
33 union jffs2_device_node dev;
34 unsigned char *mdata = NULL;
39 int alloc_type = ALLOC_NORMAL;
41 D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
43 /* Special cases - we don't want more than one data node
44 for these types on the medium at any time. So setattr
45 must read the original data associated with the node
46 (i.e. the device numbers or the target name) and write
47 it out again with the appropriate data attached */
48 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
49 /* For these, we don't actually need to read the old node */
50 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
52 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
53 } else if (S_ISLNK(inode->i_mode)) {
55 mdatalen = f->metadata->size;
56 mdata = kmalloc(f->metadata->size, GFP_USER);
58 mutex_unlock(&f->sem);
61 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
63 mutex_unlock(&f->sem);
67 mutex_unlock(&f->sem);
68 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
71 ri = jffs2_alloc_raw_inode();
73 if (S_ISLNK(inode->i_mode))
78 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
79 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
81 jffs2_free_raw_inode(ri);
82 if (S_ISLNK(inode->i_mode & S_IFMT))
87 ivalid = iattr->ia_valid;
89 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
90 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
91 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
92 ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
94 ri->ino = cpu_to_je32(inode->i_ino);
95 ri->version = cpu_to_je32(++f->highest_version);
97 ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
98 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
100 if (ivalid & ATTR_MODE)
101 ri->mode = cpu_to_jemode(iattr->ia_mode);
103 ri->mode = cpu_to_jemode(inode->i_mode);
106 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
107 ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
108 ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
109 ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
111 ri->offset = cpu_to_je32(0);
112 ri->csize = ri->dsize = cpu_to_je32(mdatalen);
113 ri->compr = JFFS2_COMPR_NONE;
114 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
115 /* It's an extension. Make it a hole node */
116 ri->compr = JFFS2_COMPR_ZERO;
117 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
118 ri->offset = cpu_to_je32(inode->i_size);
119 } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
120 /* For truncate-to-zero, treat it as deletion because
121 it'll always be obsoleting all previous nodes */
122 alloc_type = ALLOC_DELETION;
124 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
126 ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
128 ri->data_crc = cpu_to_je32(0);
130 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
131 if (S_ISLNK(inode->i_mode))
134 if (IS_ERR(new_metadata)) {
135 jffs2_complete_reservation(c);
136 jffs2_free_raw_inode(ri);
137 mutex_unlock(&f->sem);
138 return PTR_ERR(new_metadata);
140 /* It worked. Update the inode */
141 inode->i_atime = ITIME(je32_to_cpu(ri->atime));
142 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
143 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
144 inode->i_mode = jemode_to_cpu(ri->mode);
145 inode->i_uid = je16_to_cpu(ri->uid);
146 inode->i_gid = je16_to_cpu(ri->gid);
149 old_metadata = f->metadata;
151 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
152 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
154 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
155 jffs2_add_full_dnode_to_inode(c, f, new_metadata);
156 inode->i_size = iattr->ia_size;
157 inode->i_blocks = (inode->i_size + 511) >> 9;
160 f->metadata = new_metadata;
163 jffs2_mark_node_obsolete(c, old_metadata->raw);
164 jffs2_free_full_dnode(old_metadata);
166 jffs2_free_raw_inode(ri);
168 mutex_unlock(&f->sem);
169 jffs2_complete_reservation(c);
171 /* We have to do the vmtruncate() without f->sem held, since
172 some pages may be locked and waiting for it in readpage().
173 We are protected from a simultaneous write() extending i_size
174 back past iattr->ia_size, because do_truncate() holds the
175 generic inode semaphore. */
176 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
177 vmtruncate(inode, iattr->ia_size);
178 inode->i_blocks = (inode->i_size + 511) >> 9;
184 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
188 rc = inode_change_ok(dentry->d_inode, iattr);
192 rc = jffs2_do_setattr(dentry->d_inode, iattr);
193 if (!rc && (iattr->ia_valid & ATTR_MODE))
194 rc = jffs2_acl_chmod(dentry->d_inode);
199 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
201 struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
204 buf->f_type = JFFS2_SUPER_MAGIC;
205 buf->f_bsize = 1 << PAGE_SHIFT;
206 buf->f_blocks = c->flash_size >> PAGE_SHIFT;
209 buf->f_namelen = JFFS2_MAX_NAME_LEN;
210 buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
211 buf->f_fsid.val[1] = c->mtd->index;
213 spin_lock(&c->erase_completion_lock);
214 avail = c->dirty_size + c->free_size;
215 if (avail > c->sector_size * c->resv_blocks_write)
216 avail -= c->sector_size * c->resv_blocks_write;
219 spin_unlock(&c->erase_completion_lock);
221 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
227 void jffs2_clear_inode (struct inode *inode)
229 /* We can forget about this inode for now - drop all
230 * the nodelists associated with it, etc.
232 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
233 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
235 D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
236 jffs2_do_clear_inode(c, f);
239 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
241 struct jffs2_inode_info *f;
242 struct jffs2_sb_info *c;
243 struct jffs2_raw_inode latest_node;
244 union jffs2_device_node jdev;
249 D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino));
251 inode = iget_locked(sb, ino);
253 return ERR_PTR(-ENOMEM);
254 if (!(inode->i_state & I_NEW))
257 f = JFFS2_INODE_INFO(inode);
258 c = JFFS2_SB_INFO(inode->i_sb);
260 jffs2_init_inode_info(f);
263 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
266 mutex_unlock(&f->sem);
270 inode->i_mode = jemode_to_cpu(latest_node.mode);
271 inode->i_uid = je16_to_cpu(latest_node.uid);
272 inode->i_gid = je16_to_cpu(latest_node.gid);
273 inode->i_size = je32_to_cpu(latest_node.isize);
274 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
275 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
276 inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
278 inode->i_nlink = f->inocache->pino_nlink;
280 inode->i_blocks = (inode->i_size + 511) >> 9;
282 switch (inode->i_mode & S_IFMT) {
285 inode->i_op = &jffs2_symlink_inode_operations;
290 struct jffs2_full_dirent *fd;
291 inode->i_nlink = 2; /* parent and '.' */
293 for (fd=f->dents; fd; fd = fd->next) {
294 if (fd->type == DT_DIR && fd->ino)
297 /* Root dir gets i_nlink 3 for some reason */
298 if (inode->i_ino == 1)
301 inode->i_op = &jffs2_dir_inode_operations;
302 inode->i_fop = &jffs2_dir_operations;
306 inode->i_op = &jffs2_file_inode_operations;
307 inode->i_fop = &jffs2_file_operations;
308 inode->i_mapping->a_ops = &jffs2_file_address_operations;
309 inode->i_mapping->nrpages = 0;
314 /* Read the device numbers from the media */
315 if (f->metadata->size != sizeof(jdev.old) &&
316 f->metadata->size != sizeof(jdev.new)) {
317 printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
320 D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
321 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
324 printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
327 if (f->metadata->size == sizeof(jdev.old))
328 rdev = old_decode_dev(je16_to_cpu(jdev.old));
330 rdev = new_decode_dev(je32_to_cpu(jdev.new));
334 inode->i_op = &jffs2_file_inode_operations;
335 init_special_inode(inode, inode->i_mode, rdev);
339 printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
342 mutex_unlock(&f->sem);
344 D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
345 unlock_new_inode(inode);
351 mutex_unlock(&f->sem);
352 jffs2_do_clear_inode(c, f);
357 void jffs2_dirty_inode(struct inode *inode)
361 if (!(inode->i_state & I_DIRTY_DATASYNC)) {
362 D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
366 D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino));
368 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
369 iattr.ia_mode = inode->i_mode;
370 iattr.ia_uid = inode->i_uid;
371 iattr.ia_gid = inode->i_gid;
372 iattr.ia_atime = inode->i_atime;
373 iattr.ia_mtime = inode->i_mtime;
374 iattr.ia_ctime = inode->i_ctime;
376 jffs2_do_setattr(inode, &iattr);
379 int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
381 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
383 if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
386 /* We stop if it was running, then restart if it needs to.
387 This also catches the case where it was stopped and this
388 is just a remount to restart it.
389 Flush the writebuffer, if neccecary, else we loose it */
390 if (!(sb->s_flags & MS_RDONLY)) {
391 jffs2_stop_garbage_collect_thread(c);
392 mutex_lock(&c->alloc_sem);
393 jffs2_flush_wbuf_pad(c);
394 mutex_unlock(&c->alloc_sem);
397 if (!(*flags & MS_RDONLY))
398 jffs2_start_garbage_collect_thread(c);
400 *flags |= MS_NOATIME;
405 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
406 fill in the raw_inode while you're at it. */
407 struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
410 struct super_block *sb = dir_i->i_sb;
411 struct jffs2_sb_info *c;
412 struct jffs2_inode_info *f;
415 D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
417 c = JFFS2_SB_INFO(sb);
419 inode = new_inode(sb);
422 return ERR_PTR(-ENOMEM);
424 f = JFFS2_INODE_INFO(inode);
425 jffs2_init_inode_info(f);
428 memset(ri, 0, sizeof(*ri));
429 /* Set OS-specific defaults for new inodes */
430 ri->uid = cpu_to_je16(current_fsuid());
432 if (dir_i->i_mode & S_ISGID) {
433 ri->gid = cpu_to_je16(dir_i->i_gid);
437 ri->gid = cpu_to_je16(current_fsgid());
440 /* POSIX ACLs have to be processed now, at least partly.
441 The umask is only applied if there's no default ACL */
442 ret = jffs2_init_acl_pre(dir_i, inode, &mode);
444 make_bad_inode(inode);
448 ret = jffs2_do_new_inode (c, f, mode, ri);
450 make_bad_inode(inode);
455 inode->i_ino = je32_to_cpu(ri->ino);
456 inode->i_mode = jemode_to_cpu(ri->mode);
457 inode->i_gid = je16_to_cpu(ri->gid);
458 inode->i_uid = je16_to_cpu(ri->uid);
459 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
460 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
465 insert_inode_hash(inode);
471 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
473 struct jffs2_sb_info *c;
474 struct inode *root_i;
478 c = JFFS2_SB_INFO(sb);
480 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
481 if (c->mtd->type == MTD_NANDFLASH) {
482 printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
485 if (c->mtd->type == MTD_DATAFLASH) {
486 printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n");
491 c->flash_size = c->mtd->size;
492 c->sector_size = c->mtd->erasesize;
493 blocks = c->flash_size / c->sector_size;
496 * Size alignment check
498 if ((c->sector_size * blocks) != c->flash_size) {
499 c->flash_size = c->sector_size * blocks;
500 printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
501 c->flash_size / 1024);
504 if (c->flash_size < 5*c->sector_size) {
505 printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
509 c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
511 /* NAND (or other bizarre) flash... do setup accordingly */
512 ret = jffs2_flash_setup(c);
516 c->inocache_list = kcalloc(INOCACHE_HASHSIZE, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
517 if (!c->inocache_list) {
522 jffs2_init_xattr_subsystem(c);
524 if ((ret = jffs2_do_mount_fs(c)))
527 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
528 root_i = jffs2_iget(sb, 1);
529 if (IS_ERR(root_i)) {
530 D1(printk(KERN_WARNING "get root inode failed\n"));
531 ret = PTR_ERR(root_i);
537 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
538 sb->s_root = d_alloc_root(root_i);
542 sb->s_maxbytes = 0xFFFFFFFF;
543 sb->s_blocksize = PAGE_CACHE_SIZE;
544 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
545 sb->s_magic = JFFS2_SUPER_MAGIC;
546 if (!(sb->s_flags & MS_RDONLY))
547 jffs2_start_garbage_collect_thread(c);
553 jffs2_free_ino_caches(c);
554 jffs2_free_raw_node_refs(c);
555 if (jffs2_blocks_use_vmalloc(c))
560 jffs2_clear_xattr_subsystem(c);
561 kfree(c->inocache_list);
563 jffs2_flash_cleanup(c);
568 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
569 struct jffs2_inode_info *f)
571 iput(OFNI_EDONI_2SFFJ(f));
574 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
575 int inum, int unlinked)
578 struct jffs2_inode_cache *ic;
581 /* The inode has zero nlink but its nodes weren't yet marked
582 obsolete. This has to be because we're still waiting for
583 the final (close() and) iput() to happen.
585 There's a possibility that the final iput() could have
586 happened while we were contemplating. In order to ensure
587 that we don't cause a new read_inode() (which would fail)
588 for the inode in question, we use ilookup() in this case
591 The nlink can't _become_ zero at this point because we're
592 holding the alloc_sem, and jffs2_do_unlink() would also
593 need that while decrementing nlink on any inode.
595 inode = ilookup(OFNI_BS_2SFFJ(c), inum);
597 D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
600 spin_lock(&c->inocache_lock);
601 ic = jffs2_get_ino_cache(c, inum);
603 D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
604 spin_unlock(&c->inocache_lock);
607 if (ic->state != INO_STATE_CHECKEDABSENT) {
608 /* Wait for progress. Don't just loop */
609 D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
610 ic->ino, ic->state));
611 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
613 spin_unlock(&c->inocache_lock);
619 /* Inode has links to it still; they're not going away because
620 jffs2_do_unlink() would need the alloc_sem and we have it.
621 Just iget() it, and if read_inode() is necessary that's OK.
623 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
625 return ERR_CAST(inode);
627 if (is_bad_inode(inode)) {
628 printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. unlinked %d\n",
630 /* NB. This will happen again. We need to do something appropriate here. */
632 return ERR_PTR(-EIO);
635 return JFFS2_INODE_INFO(inode);
638 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
639 struct jffs2_inode_info *f,
640 unsigned long offset,
643 struct inode *inode = OFNI_EDONI_2SFFJ(f);
646 pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
647 (void *)jffs2_do_readpage_unlock, inode);
651 *priv = (unsigned long)pg;
655 void jffs2_gc_release_page(struct jffs2_sb_info *c,
659 struct page *pg = (void *)*priv;
662 page_cache_release(pg);
665 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
668 if (jffs2_cleanmarker_oob(c)) {
669 /* NAND flash... do setup accordingly */
670 ret = jffs2_nand_flash_setup(c);
676 if (jffs2_dataflash(c)) {
677 ret = jffs2_dataflash_setup(c);
682 /* and Intel "Sibley" flash */
683 if (jffs2_nor_wbuf_flash(c)) {
684 ret = jffs2_nor_wbuf_flash_setup(c);
689 /* and an UBI volume */
690 if (jffs2_ubivol(c)) {
691 ret = jffs2_ubivol_setup(c);
699 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
701 if (jffs2_cleanmarker_oob(c)) {
702 jffs2_nand_flash_cleanup(c);
706 if (jffs2_dataflash(c)) {
707 jffs2_dataflash_cleanup(c);
710 /* and Intel "Sibley" flash */
711 if (jffs2_nor_wbuf_flash(c)) {
712 jffs2_nor_wbuf_flash_cleanup(c);
715 /* and an UBI volume */
716 if (jffs2_ubivol(c)) {
717 jffs2_ubivol_cleanup(c);