2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
9 * For licensing information, see the file 'LICENCE' in this directory.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
19 #include <linux/list.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/vfs.h>
25 #include <linux/crc32.h>
28 static int jffs2_flash_setup(struct jffs2_sb_info *c);
30 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
32 struct jffs2_full_dnode *old_metadata, *new_metadata;
33 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
34 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
35 struct jffs2_raw_inode *ri;
36 union jffs2_device_node dev;
37 unsigned char *mdata = NULL;
42 int alloc_type = ALLOC_NORMAL;
44 jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
46 /* Special cases - we don't want more than one data node
47 for these types on the medium at any time. So setattr
48 must read the original data associated with the node
49 (i.e. the device numbers or the target name) and write
50 it out again with the appropriate data attached */
51 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
52 /* For these, we don't actually need to read the old node */
53 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
55 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
57 } else if (S_ISLNK(inode->i_mode)) {
59 mdatalen = f->metadata->size;
60 mdata = kmalloc(f->metadata->size, GFP_USER);
62 mutex_unlock(&f->sem);
65 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
67 mutex_unlock(&f->sem);
71 mutex_unlock(&f->sem);
72 jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
76 ri = jffs2_alloc_raw_inode();
78 if (S_ISLNK(inode->i_mode))
83 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
84 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
86 jffs2_free_raw_inode(ri);
87 if (S_ISLNK(inode->i_mode))
92 ivalid = iattr->ia_valid;
94 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
95 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
96 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
97 ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
99 ri->ino = cpu_to_je32(inode->i_ino);
100 ri->version = cpu_to_je32(++f->highest_version);
102 ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
103 from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
104 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
105 from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
107 if (ivalid & ATTR_MODE)
108 ri->mode = cpu_to_jemode(iattr->ia_mode);
110 ri->mode = cpu_to_jemode(inode->i_mode);
113 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
114 ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
115 ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
116 ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
118 ri->offset = cpu_to_je32(0);
119 ri->csize = ri->dsize = cpu_to_je32(mdatalen);
120 ri->compr = JFFS2_COMPR_NONE;
121 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
122 /* It's an extension. Make it a hole node */
123 ri->compr = JFFS2_COMPR_ZERO;
124 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
125 ri->offset = cpu_to_je32(inode->i_size);
126 } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
127 /* For truncate-to-zero, treat it as deletion because
128 it'll always be obsoleting all previous nodes */
129 alloc_type = ALLOC_DELETION;
131 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
133 ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
135 ri->data_crc = cpu_to_je32(0);
137 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
138 if (S_ISLNK(inode->i_mode))
141 if (IS_ERR(new_metadata)) {
142 jffs2_complete_reservation(c);
143 jffs2_free_raw_inode(ri);
144 mutex_unlock(&f->sem);
145 return PTR_ERR(new_metadata);
147 /* It worked. Update the inode */
148 inode->i_atime = ITIME(je32_to_cpu(ri->atime));
149 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
150 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
151 inode->i_mode = jemode_to_cpu(ri->mode);
152 i_uid_write(inode, je16_to_cpu(ri->uid));
153 i_gid_write(inode, je16_to_cpu(ri->gid));
156 old_metadata = f->metadata;
158 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
159 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
161 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
162 jffs2_add_full_dnode_to_inode(c, f, new_metadata);
163 inode->i_size = iattr->ia_size;
164 inode->i_blocks = (inode->i_size + 511) >> 9;
167 f->metadata = new_metadata;
170 jffs2_mark_node_obsolete(c, old_metadata->raw);
171 jffs2_free_full_dnode(old_metadata);
173 jffs2_free_raw_inode(ri);
175 mutex_unlock(&f->sem);
176 jffs2_complete_reservation(c);
178 /* We have to do the truncate_setsize() without f->sem held, since
179 some pages may be locked and waiting for it in readpage().
180 We are protected from a simultaneous write() extending i_size
181 back past iattr->ia_size, because do_truncate() holds the
182 generic inode semaphore. */
183 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
184 truncate_setsize(inode, iattr->ia_size);
185 inode->i_blocks = (inode->i_size + 511) >> 9;
191 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
195 rc = inode_change_ok(dentry->d_inode, iattr);
199 rc = jffs2_do_setattr(dentry->d_inode, iattr);
200 if (!rc && (iattr->ia_valid & ATTR_MODE))
201 rc = jffs2_acl_chmod(dentry->d_inode);
206 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
208 struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
211 buf->f_type = JFFS2_SUPER_MAGIC;
212 buf->f_bsize = 1 << PAGE_SHIFT;
213 buf->f_blocks = c->flash_size >> PAGE_SHIFT;
216 buf->f_namelen = JFFS2_MAX_NAME_LEN;
217 buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
218 buf->f_fsid.val[1] = c->mtd->index;
220 spin_lock(&c->erase_completion_lock);
221 avail = c->dirty_size + c->free_size;
222 if (avail > c->sector_size * c->resv_blocks_write)
223 avail -= c->sector_size * c->resv_blocks_write;
226 spin_unlock(&c->erase_completion_lock);
228 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
234 void jffs2_evict_inode (struct inode *inode)
236 /* We can forget about this inode for now - drop all
237 * the nodelists associated with it, etc.
239 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
240 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
242 jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
243 __func__, inode->i_ino, inode->i_mode);
244 truncate_inode_pages(&inode->i_data, 0);
246 jffs2_do_clear_inode(c, f);
249 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
251 struct jffs2_inode_info *f;
252 struct jffs2_sb_info *c;
253 struct jffs2_raw_inode latest_node;
254 union jffs2_device_node jdev;
259 jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
261 inode = iget_locked(sb, ino);
263 return ERR_PTR(-ENOMEM);
264 if (!(inode->i_state & I_NEW))
267 f = JFFS2_INODE_INFO(inode);
268 c = JFFS2_SB_INFO(inode->i_sb);
270 jffs2_init_inode_info(f);
273 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
276 mutex_unlock(&f->sem);
280 inode->i_mode = jemode_to_cpu(latest_node.mode);
281 i_uid_write(inode, je16_to_cpu(latest_node.uid));
282 i_gid_write(inode, je16_to_cpu(latest_node.gid));
283 inode->i_size = je32_to_cpu(latest_node.isize);
284 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
285 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
286 inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
288 set_nlink(inode, f->inocache->pino_nlink);
290 inode->i_blocks = (inode->i_size + 511) >> 9;
292 switch (inode->i_mode & S_IFMT) {
295 inode->i_op = &jffs2_symlink_inode_operations;
300 struct jffs2_full_dirent *fd;
301 set_nlink(inode, 2); /* parent and '.' */
303 for (fd=f->dents; fd; fd = fd->next) {
304 if (fd->type == DT_DIR && fd->ino)
307 /* Root dir gets i_nlink 3 for some reason */
308 if (inode->i_ino == 1)
311 inode->i_op = &jffs2_dir_inode_operations;
312 inode->i_fop = &jffs2_dir_operations;
316 inode->i_op = &jffs2_file_inode_operations;
317 inode->i_fop = &jffs2_file_operations;
318 inode->i_mapping->a_ops = &jffs2_file_address_operations;
319 inode->i_mapping->nrpages = 0;
324 /* Read the device numbers from the media */
325 if (f->metadata->size != sizeof(jdev.old_id) &&
326 f->metadata->size != sizeof(jdev.new_id)) {
327 pr_notice("Device node has strange size %d\n",
331 jffs2_dbg(1, "Reading device numbers from flash\n");
332 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
335 pr_notice("Read device numbers for inode %lu failed\n",
336 (unsigned long)inode->i_ino);
339 if (f->metadata->size == sizeof(jdev.old_id))
340 rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
342 rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
346 inode->i_op = &jffs2_file_inode_operations;
347 init_special_inode(inode, inode->i_mode, rdev);
351 pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
352 __func__, inode->i_mode, (unsigned long)inode->i_ino);
355 mutex_unlock(&f->sem);
357 jffs2_dbg(1, "jffs2_read_inode() returning\n");
358 unlock_new_inode(inode);
364 mutex_unlock(&f->sem);
365 jffs2_do_clear_inode(c, f);
370 void jffs2_dirty_inode(struct inode *inode, int flags)
374 if (!(inode->i_state & I_DIRTY_DATASYNC)) {
375 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
376 __func__, inode->i_ino);
380 jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
381 __func__, inode->i_ino);
383 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
384 iattr.ia_mode = inode->i_mode;
385 iattr.ia_uid = inode->i_uid;
386 iattr.ia_gid = inode->i_gid;
387 iattr.ia_atime = inode->i_atime;
388 iattr.ia_mtime = inode->i_mtime;
389 iattr.ia_ctime = inode->i_ctime;
391 jffs2_do_setattr(inode, &iattr);
394 int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
396 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
398 if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
401 /* We stop if it was running, then restart if it needs to.
402 This also catches the case where it was stopped and this
403 is just a remount to restart it.
404 Flush the writebuffer, if neccecary, else we loose it */
405 if (!(sb->s_flags & MS_RDONLY)) {
406 jffs2_stop_garbage_collect_thread(c);
407 mutex_lock(&c->alloc_sem);
408 jffs2_flush_wbuf_pad(c);
409 mutex_unlock(&c->alloc_sem);
412 if (!(*flags & MS_RDONLY))
413 jffs2_start_garbage_collect_thread(c);
415 *flags |= MS_NOATIME;
419 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
420 fill in the raw_inode while you're at it. */
421 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
424 struct super_block *sb = dir_i->i_sb;
425 struct jffs2_sb_info *c;
426 struct jffs2_inode_info *f;
429 jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
430 __func__, dir_i->i_ino, mode);
432 c = JFFS2_SB_INFO(sb);
434 inode = new_inode(sb);
437 return ERR_PTR(-ENOMEM);
439 f = JFFS2_INODE_INFO(inode);
440 jffs2_init_inode_info(f);
443 memset(ri, 0, sizeof(*ri));
444 /* Set OS-specific defaults for new inodes */
445 ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
447 if (dir_i->i_mode & S_ISGID) {
448 ri->gid = cpu_to_je16(i_gid_read(dir_i));
452 ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
455 /* POSIX ACLs have to be processed now, at least partly.
456 The umask is only applied if there's no default ACL */
457 ret = jffs2_init_acl_pre(dir_i, inode, &mode);
459 make_bad_inode(inode);
463 ret = jffs2_do_new_inode (c, f, mode, ri);
465 make_bad_inode(inode);
470 inode->i_ino = je32_to_cpu(ri->ino);
471 inode->i_mode = jemode_to_cpu(ri->mode);
472 i_gid_write(inode, je16_to_cpu(ri->gid));
473 i_uid_write(inode, je16_to_cpu(ri->uid));
474 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
475 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
480 if (insert_inode_locked(inode) < 0) {
481 make_bad_inode(inode);
483 return ERR_PTR(-EINVAL);
489 static int calculate_inocache_hashsize(uint32_t flash_size)
492 * Pick a inocache hash size based on the size of the medium.
493 * Count how many megabytes we're dealing with, apply a hashsize twice
494 * that size, but rounding down to the usual big powers of 2. And keep
495 * to sensible bounds.
498 int size_mb = flash_size / 1024 / 1024;
499 int hashsize = (size_mb * 2) & ~0x3f;
501 if (hashsize < INOCACHE_HASHSIZE_MIN)
502 return INOCACHE_HASHSIZE_MIN;
503 if (hashsize > INOCACHE_HASHSIZE_MAX)
504 return INOCACHE_HASHSIZE_MAX;
509 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
511 struct jffs2_sb_info *c;
512 struct inode *root_i;
516 c = JFFS2_SB_INFO(sb);
518 /* Do not support the MLC nand */
519 if (c->mtd->type == MTD_MLCNANDFLASH)
522 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
523 if (c->mtd->type == MTD_NANDFLASH) {
524 pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
527 if (c->mtd->type == MTD_DATAFLASH) {
528 pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
533 c->flash_size = c->mtd->size;
534 c->sector_size = c->mtd->erasesize;
535 blocks = c->flash_size / c->sector_size;
538 * Size alignment check
540 if ((c->sector_size * blocks) != c->flash_size) {
541 c->flash_size = c->sector_size * blocks;
542 pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
543 c->flash_size / 1024);
546 if (c->flash_size < 5*c->sector_size) {
547 pr_err("Too few erase blocks (%d)\n",
548 c->flash_size / c->sector_size);
552 c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
554 /* NAND (or other bizarre) flash... do setup accordingly */
555 ret = jffs2_flash_setup(c);
559 c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
560 c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
561 if (!c->inocache_list) {
566 jffs2_init_xattr_subsystem(c);
568 if ((ret = jffs2_do_mount_fs(c)))
571 jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
572 root_i = jffs2_iget(sb, 1);
573 if (IS_ERR(root_i)) {
574 jffs2_dbg(1, "get root inode failed\n");
575 ret = PTR_ERR(root_i);
581 jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
582 sb->s_root = d_make_root(root_i);
586 sb->s_maxbytes = 0xFFFFFFFF;
587 sb->s_blocksize = PAGE_CACHE_SIZE;
588 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
589 sb->s_magic = JFFS2_SUPER_MAGIC;
590 if (!(sb->s_flags & MS_RDONLY))
591 jffs2_start_garbage_collect_thread(c);
595 jffs2_free_ino_caches(c);
596 jffs2_free_raw_node_refs(c);
597 if (jffs2_blocks_use_vmalloc(c))
602 jffs2_clear_xattr_subsystem(c);
603 kfree(c->inocache_list);
605 jffs2_flash_cleanup(c);
610 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
611 struct jffs2_inode_info *f)
613 iput(OFNI_EDONI_2SFFJ(f));
616 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
617 int inum, int unlinked)
620 struct jffs2_inode_cache *ic;
623 /* The inode has zero nlink but its nodes weren't yet marked
624 obsolete. This has to be because we're still waiting for
625 the final (close() and) iput() to happen.
627 There's a possibility that the final iput() could have
628 happened while we were contemplating. In order to ensure
629 that we don't cause a new read_inode() (which would fail)
630 for the inode in question, we use ilookup() in this case
633 The nlink can't _become_ zero at this point because we're
634 holding the alloc_sem, and jffs2_do_unlink() would also
635 need that while decrementing nlink on any inode.
637 inode = ilookup(OFNI_BS_2SFFJ(c), inum);
639 jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
642 spin_lock(&c->inocache_lock);
643 ic = jffs2_get_ino_cache(c, inum);
645 jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
647 spin_unlock(&c->inocache_lock);
650 if (ic->state != INO_STATE_CHECKEDABSENT) {
651 /* Wait for progress. Don't just loop */
652 jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
654 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
656 spin_unlock(&c->inocache_lock);
662 /* Inode has links to it still; they're not going away because
663 jffs2_do_unlink() would need the alloc_sem and we have it.
664 Just iget() it, and if read_inode() is necessary that's OK.
666 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
668 return ERR_CAST(inode);
670 if (is_bad_inode(inode)) {
671 pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
673 /* NB. This will happen again. We need to do something appropriate here. */
675 return ERR_PTR(-EIO);
678 return JFFS2_INODE_INFO(inode);
681 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
682 struct jffs2_inode_info *f,
683 unsigned long offset,
686 struct inode *inode = OFNI_EDONI_2SFFJ(f);
689 pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
690 (void *)jffs2_do_readpage_unlock, inode);
694 *priv = (unsigned long)pg;
698 void jffs2_gc_release_page(struct jffs2_sb_info *c,
702 struct page *pg = (void *)*priv;
705 page_cache_release(pg);
708 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
711 if (jffs2_cleanmarker_oob(c)) {
712 /* NAND flash... do setup accordingly */
713 ret = jffs2_nand_flash_setup(c);
719 if (jffs2_dataflash(c)) {
720 ret = jffs2_dataflash_setup(c);
725 /* and Intel "Sibley" flash */
726 if (jffs2_nor_wbuf_flash(c)) {
727 ret = jffs2_nor_wbuf_flash_setup(c);
732 /* and an UBI volume */
733 if (jffs2_ubivol(c)) {
734 ret = jffs2_ubivol_setup(c);
742 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
744 if (jffs2_cleanmarker_oob(c)) {
745 jffs2_nand_flash_cleanup(c);
749 if (jffs2_dataflash(c)) {
750 jffs2_dataflash_cleanup(c);
753 /* and Intel "Sibley" flash */
754 if (jffs2_nor_wbuf_flash(c)) {
755 jffs2_nor_wbuf_flash_cleanup(c);
758 /* and an UBI volume */
759 if (jffs2_ubivol(c)) {
760 jffs2_ubivol_cleanup(c);