]> Git Repo - linux.git/blame - fs/jffs2/build.c
Linux 6.14-rc3
[linux.git] / fs / jffs2 / build.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
c00c310e 4 * Copyright © 2001-2007 Red Hat, Inc.
6088c058 5 * Copyright © 2004-2010 David Woodhouse <[email protected]>
1da177e4
LT
6 *
7 * Created by David Woodhouse <[email protected]>
8 *
9 * For licensing information, see the file 'LICENCE' in this directory.
10 *
1da177e4
LT
11 */
12
5a528957
JP
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
1da177e4
LT
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/mtd/mtd.h>
1d5cfdb0 20#include <linux/mm.h> /* kvfree() */
1da177e4
LT
21#include "nodelist.h"
22
733802d9
AB
23static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
24 struct jffs2_inode_cache *, struct jffs2_full_dirent **);
1da177e4
LT
25
26static inline struct jffs2_inode_cache *
27first_inode_chain(int *i, struct jffs2_sb_info *c)
28{
65e5a0e1 29 for (; *i < c->inocache_hashsize; (*i)++) {
1da177e4
LT
30 if (c->inocache_list[*i])
31 return c->inocache_list[*i];
32 }
33 return NULL;
34}
35
36static inline struct jffs2_inode_cache *
37next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
38{
39 /* More in this chain? */
40 if (ic->next)
41 return ic->next;
42 (*i)++;
43 return first_inode_chain(i, c);
44}
45
46#define for_each_inode(i, c, ic) \
47 for (i = 0, ic = first_inode_chain(&i, (c)); \
48 ic; \
49 ic = next_inode(&i, ic, (c)))
50
51
858119e1 52static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
be629c62
DW
53 struct jffs2_inode_cache *ic,
54 int *dir_hardlinks)
1da177e4
LT
55{
56 struct jffs2_full_dirent *fd;
57
733802d9 58 dbg_fsbuild("building directory inode #%u\n", ic->ino);
1da177e4
LT
59
60 /* For each child, increase nlink */
61 for(fd = ic->scan_dents; fd; fd = fd->next) {
62 struct jffs2_inode_cache *child_ic;
63 if (!fd->ino)
64 continue;
65
733802d9 66 /* we can get high latency here with huge directories */
1da177e4
LT
67
68 child_ic = jffs2_get_ino_cache(c, fd->ino);
69 if (!child_ic) {
733802d9 70 dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
1da177e4
LT
71 fd->name, fd->ino, ic->ino);
72 jffs2_mark_node_obsolete(c, fd->raw);
be629c62
DW
73 /* Clear the ic/raw union so it doesn't cause problems later. */
74 fd->ic = NULL;
1da177e4
LT
75 continue;
76 }
77
be629c62
DW
78 /* From this point, fd->raw is no longer used so we can set fd->ic */
79 fd->ic = child_ic;
80 child_ic->pino_nlink++;
81 /* If we appear (at this stage) to have hard-linked directories,
82 * set a flag to trigger a scan later */
27c72b04 83 if (fd->type == DT_DIR) {
be629c62
DW
84 child_ic->flags |= INO_FLAGS_IS_DIR;
85 if (child_ic->pino_nlink > 1)
86 *dir_hardlinks = 1;
87 }
27c72b04 88
733802d9
AB
89 dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
90 /* Can't free scan_dents so far. We might need them in pass 2 */
1da177e4
LT
91 }
92}
93
94/* Scan plan:
95 - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
96 - Scan directory tree from top down, setting nlink in inocaches
97 - Scan inocaches for inodes with nlink==0
98*/
99static int jffs2_build_filesystem(struct jffs2_sb_info *c)
100{
be629c62 101 int ret, i, dir_hardlinks = 0;
1da177e4
LT
102 struct jffs2_inode_cache *ic;
103 struct jffs2_full_dirent *fd;
104 struct jffs2_full_dirent *dead_fds = NULL;
105
733802d9
AB
106 dbg_fsbuild("build FS data structures\n");
107
1da177e4
LT
108 /* First, scan the medium and build all the inode caches with
109 lists of physical nodes */
110
31fbdf7a 111 c->flags |= JFFS2_SB_FLAG_SCANNING;
1da177e4 112 ret = jffs2_scan_medium(c);
31fbdf7a 113 c->flags &= ~JFFS2_SB_FLAG_SCANNING;
1da177e4
LT
114 if (ret)
115 goto exit;
116
733802d9 117 dbg_fsbuild("scanned flash completely\n");
e0c8e42f 118 jffs2_dbg_dump_block_lists_nolock(c);
1da177e4 119
733802d9 120 dbg_fsbuild("pass 1 starting\n");
31fbdf7a 121 c->flags |= JFFS2_SB_FLAG_BUILDING;
1da177e4
LT
122 /* Now scan the directory tree, increasing nlink according to every dirent found. */
123 for_each_inode(i, c, ic) {
1da177e4 124 if (ic->scan_dents) {
be629c62 125 jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
1da177e4
LT
126 cond_resched();
127 }
128 }
1da177e4 129
733802d9 130 dbg_fsbuild("pass 1 complete\n");
1da177e4
LT
131
132 /* Next, scan for inodes with nlink == 0 and remove them. If
133 they were directories, then decrement the nlink of their
134 children too, and repeat the scan. As that's going to be
135 a fairly uncommon occurrence, it's not so evil to do it this
136 way. Recursion bad. */
733802d9 137 dbg_fsbuild("pass 2 starting\n");
1da177e4
LT
138
139 for_each_inode(i, c, ic) {
27c72b04 140 if (ic->pino_nlink)
1da177e4 141 continue;
182ec4ee 142
1da177e4
LT
143 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
144 cond_resched();
182ec4ee 145 }
1da177e4 146
733802d9 147 dbg_fsbuild("pass 2a starting\n");
1da177e4
LT
148
149 while (dead_fds) {
150 fd = dead_fds;
151 dead_fds = fd->next;
152
153 ic = jffs2_get_ino_cache(c, fd->ino);
1da177e4
LT
154
155 if (ic)
156 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
157 jffs2_free_full_dirent(fd);
158 }
159
733802d9 160 dbg_fsbuild("pass 2a complete\n");
be629c62
DW
161
162 if (dir_hardlinks) {
163 /* If we detected directory hardlinks earlier, *hopefully*
164 * they are gone now because some of the links were from
165 * dead directories which still had some old dirents lying
166 * around and not yet garbage-collected, but which have
167 * been discarded above. So clear the pino_nlink field
168 * in each directory, so that the final scan below can
169 * print appropriate warnings. */
170 for_each_inode(i, c, ic) {
171 if (ic->flags & INO_FLAGS_IS_DIR)
172 ic->pino_nlink = 0;
173 }
174 }
733802d9 175 dbg_fsbuild("freeing temporary data structures\n");
182ec4ee 176
1da177e4
LT
177 /* Finally, we can scan again and free the dirent structs */
178 for_each_inode(i, c, ic) {
1da177e4
LT
179 while(ic->scan_dents) {
180 fd = ic->scan_dents;
181 ic->scan_dents = fd->next;
be629c62
DW
182 /* We do use the pino_nlink field to count nlink of
183 * directories during fs build, so set it to the
184 * parent ino# now. Now that there's hopefully only
185 * one. */
186 if (fd->type == DT_DIR) {
187 if (!fd->ic) {
188 /* We'll have complained about it and marked the coresponding
189 raw node obsolete already. Just skip it. */
190 continue;
191 }
192
193 /* We *have* to have set this in jffs2_build_inode_pass1() */
194 BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
195
196 /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
197 * is set. Otherwise, we know this should never trigger anyway, so
198 * we don't do the check. And ic->pino_nlink still contains the nlink
199 * value (which is 1). */
200 if (dir_hardlinks && fd->ic->pino_nlink) {
201 JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
202 fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
203 /* Should we unlink it from its previous parent? */
204 }
205
206 /* For directories, ic->pino_nlink holds that parent inode # */
207 fd->ic->pino_nlink = ic->ino;
208 }
1da177e4
LT
209 jffs2_free_full_dirent(fd);
210 }
211 ic->scan_dents = NULL;
212 cond_resched();
213 }
1168f095
FF
214 ret = jffs2_build_xattr_subsystem(c);
215 if (ret)
216 goto exit;
217
31fbdf7a 218 c->flags &= ~JFFS2_SB_FLAG_BUILDING;
182ec4ee 219
733802d9 220 dbg_fsbuild("FS build complete\n");
1da177e4
LT
221
222 /* Rotate the lists by some number to ensure wear levelling */
223 jffs2_rotate_lists(c);
224
225 ret = 0;
226
227exit:
228 if (ret) {
229 for_each_inode(i, c, ic) {
230 while(ic->scan_dents) {
231 fd = ic->scan_dents;
232 ic->scan_dents = fd->next;
233 jffs2_free_full_dirent(fd);
234 }
235 }
aa98d7cf 236 jffs2_clear_xattr_subsystem(c);
1da177e4
LT
237 }
238
239 return ret;
240}
241
733802d9
AB
242static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
243 struct jffs2_inode_cache *ic,
244 struct jffs2_full_dirent **dead_fds)
1da177e4
LT
245{
246 struct jffs2_raw_node_ref *raw;
247 struct jffs2_full_dirent *fd;
248
733802d9 249 dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
182ec4ee 250
1da177e4
LT
251 raw = ic->nodes;
252 while (raw != (void *)ic) {
253 struct jffs2_raw_node_ref *next = raw->next_in_ino;
733802d9 254 dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
1da177e4
LT
255 jffs2_mark_node_obsolete(c, raw);
256 raw = next;
257 }
258
259 if (ic->scan_dents) {
260 int whinged = 0;
733802d9 261 dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
1da177e4
LT
262
263 while(ic->scan_dents) {
264 struct jffs2_inode_cache *child_ic;
265
266 fd = ic->scan_dents;
267 ic->scan_dents = fd->next;
268
269 if (!fd->ino) {
270 /* It's a deletion dirent. Ignore it */
733802d9 271 dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
1da177e4
LT
272 jffs2_free_full_dirent(fd);
273 continue;
274 }
733802d9 275 if (!whinged)
1da177e4 276 whinged = 1;
1da177e4 277
733802d9 278 dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
182ec4ee 279
1da177e4
LT
280 child_ic = jffs2_get_ino_cache(c, fd->ino);
281 if (!child_ic) {
733802d9
AB
282 dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
283 fd->name, fd->ino);
1da177e4
LT
284 jffs2_free_full_dirent(fd);
285 continue;
286 }
287
182ec4ee 288 /* Reduce nlink of the child. If it's now zero, stick it on the
1da177e4 289 dead_fds list to be cleaned up later. Else just free the fd */
be629c62 290 child_ic->pino_nlink--;
182ec4ee 291
27c72b04
DW
292 if (!child_ic->pino_nlink) {
293 dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
733802d9 294 fd->ino, fd->name);
1da177e4
LT
295 fd->next = *dead_fds;
296 *dead_fds = fd;
297 } else {
733802d9 298 dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
27c72b04 299 fd->ino, fd->name, child_ic->pino_nlink);
1da177e4
LT
300 jffs2_free_full_dirent(fd);
301 }
302 }
303 }
304
305 /*
182ec4ee 306 We don't delete the inocache from the hash list and free it yet.
1da177e4
LT
307 The erase code will do that, when all the nodes are completely gone.
308 */
309}
310
311static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
312{
313 uint32_t size;
314
315 /* Deletion should almost _always_ be allowed. We're fairly
316 buggered once we stop allowing people to delete stuff
317 because there's not enough free space... */
318 c->resv_blocks_deletion = 2;
319
182ec4ee 320 /* Be conservative about how much space we need before we allow writes.
1da177e4
LT
321 On top of that which is required for deletia, require an extra 2%
322 of the medium to be available, for overhead caused by nodes being
323 split across blocks, etc. */
324
325 size = c->flash_size / 50; /* 2% of flash size */
326 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
327 size += c->sector_size - 1; /* ... and round up */
328
329 c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
330
331 /* When do we let the GC thread run in the background */
332
333 c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
334
182ec4ee 335 /* When do we allow garbage collection to merge nodes to make
1da177e4
LT
336 long-term progress at the expense of short-term space exhaustion? */
337 c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
338
339 /* When do we allow garbage collection to eat from bad blocks rather
340 than actually making progress? */
341 c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
342
8fb870df
DW
343 /* What number of 'very dirty' eraseblocks do we allow before we
344 trigger the GC thread even if we don't _need_ the space. When we
345 can't mark nodes obsolete on the medium, the old dirty nodes cause
346 performance problems because we have to inspect and discard them. */
85becc53 347 c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger;
8fb870df
DW
348 if (jffs2_can_mark_obsolete(c))
349 c->vdirty_blocks_gctrigger *= 10;
350
1da177e4
LT
351 /* If there's less than this amount of dirty space, don't bother
352 trying to GC to make more space. It'll be a fruitless task */
353 c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
354
5a528957
JP
355 dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
356 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
733802d9
AB
357 dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
358 c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
359 dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
360 c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
361 dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
362 c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
363 dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
364 c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
365 dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
366 c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
367 dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
368 c->nospc_dirty_size);
8fb870df
DW
369 dbg_fsbuild("Very dirty blocks before GC triggered: %d\n",
370 c->vdirty_blocks_gctrigger);
182ec4ee 371}
1da177e4
LT
372
373int jffs2_do_mount_fs(struct jffs2_sb_info *c)
374{
c617e842 375 int ret;
1da177e4 376 int i;
d55849aa 377 int size;
1da177e4
LT
378
379 c->free_size = c->flash_size;
380 c->nr_blocks = c->flash_size / c->sector_size;
d55849aa 381 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
737b7661 382#ifndef __ECOS
4ce1f562 383 if (jffs2_blocks_use_vmalloc(c))
7ddbead6 384 c->blocks = vzalloc(size);
1da177e4 385 else
737b7661 386#endif
7ddbead6 387 c->blocks = kzalloc(size, GFP_KERNEL);
1da177e4
LT
388 if (!c->blocks)
389 return -ENOMEM;
d55849aa 390
1da177e4
LT
391 for (i=0; i<c->nr_blocks; i++) {
392 INIT_LIST_HEAD(&c->blocks[i].list);
393 c->blocks[i].offset = i * c->sector_size;
394 c->blocks[i].free_size = c->sector_size;
1da177e4
LT
395 }
396
1da177e4
LT
397 INIT_LIST_HEAD(&c->clean_list);
398 INIT_LIST_HEAD(&c->very_dirty_list);
399 INIT_LIST_HEAD(&c->dirty_list);
400 INIT_LIST_HEAD(&c->erasable_list);
401 INIT_LIST_HEAD(&c->erasing_list);
e2bc322b 402 INIT_LIST_HEAD(&c->erase_checking_list);
1da177e4
LT
403 INIT_LIST_HEAD(&c->erase_pending_list);
404 INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
405 INIT_LIST_HEAD(&c->erase_complete_list);
406 INIT_LIST_HEAD(&c->free_list);
407 INIT_LIST_HEAD(&c->bad_list);
408 INIT_LIST_HEAD(&c->bad_used_list);
409 c->highest_ino = 1;
e631ddba
FH
410 c->summary = NULL;
411
c617e842
FH
412 ret = jffs2_sum_init(c);
413 if (ret)
cfa72397 414 goto out_free;
1da177e4
LT
415
416 if (jffs2_build_filesystem(c)) {
733802d9 417 dbg_fsbuild("build_fs failed\n");
1da177e4
LT
418 jffs2_free_ino_caches(c);
419 jffs2_free_raw_node_refs(c);
cfa72397 420 ret = -EIO;
d051cef7 421 goto out_sum_exit;
1da177e4
LT
422 }
423
424 jffs2_calc_trigger_levels(c);
425
426 return 0;
cfa72397 427
d051cef7
BL
428 out_sum_exit:
429 jffs2_sum_exit(c);
cfa72397 430 out_free:
1d5cfdb0 431 kvfree(c->blocks);
cfa72397
DA
432
433 return ret;
1da177e4 434}
This page took 3.131394 seconds and 5 git commands to generate.