]>
Commit | Line | Data |
---|---|---|
8a9d2191 RK |
1 | /* |
2 | * the_nilfs.c - the_nilfs shared structure. | |
3 | * | |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | * Written by Ryusuke Konishi <[email protected]> | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/buffer_head.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/backing-dev.h> | |
e339ad31 | 28 | #include <linux/crc32.h> |
8a9d2191 RK |
29 | #include "nilfs.h" |
30 | #include "segment.h" | |
31 | #include "alloc.h" | |
32 | #include "cpfile.h" | |
33 | #include "sufile.h" | |
34 | #include "dat.h" | |
35 | #include "seglist.h" | |
36 | #include "segbuf.h" | |
37 | ||
38 | void nilfs_set_last_segment(struct the_nilfs *nilfs, | |
39 | sector_t start_blocknr, u64 seq, __u64 cno) | |
40 | { | |
41 | spin_lock(&nilfs->ns_last_segment_lock); | |
42 | nilfs->ns_last_pseg = start_blocknr; | |
43 | nilfs->ns_last_seq = seq; | |
44 | nilfs->ns_last_cno = cno; | |
45 | spin_unlock(&nilfs->ns_last_segment_lock); | |
46 | } | |
47 | ||
48 | /** | |
49 | * alloc_nilfs - allocate the_nilfs structure | |
50 | * @bdev: block device to which the_nilfs is related | |
51 | * | |
52 | * alloc_nilfs() allocates memory for the_nilfs and | |
53 | * initializes its reference count and locks. | |
54 | * | |
55 | * Return Value: On success, pointer to the_nilfs is returned. | |
56 | * On error, NULL is returned. | |
57 | */ | |
58 | struct the_nilfs *alloc_nilfs(struct block_device *bdev) | |
59 | { | |
60 | struct the_nilfs *nilfs; | |
61 | ||
62 | nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL); | |
63 | if (!nilfs) | |
64 | return NULL; | |
65 | ||
66 | nilfs->ns_bdev = bdev; | |
67 | atomic_set(&nilfs->ns_count, 1); | |
68 | atomic_set(&nilfs->ns_writer_refcount, -1); | |
69 | atomic_set(&nilfs->ns_ndirtyblks, 0); | |
70 | init_rwsem(&nilfs->ns_sem); | |
71 | mutex_init(&nilfs->ns_writer_mutex); | |
72 | INIT_LIST_HEAD(&nilfs->ns_supers); | |
73 | spin_lock_init(&nilfs->ns_last_segment_lock); | |
74 | nilfs->ns_gc_inodes_h = NULL; | |
8a9d2191 | 75 | init_rwsem(&nilfs->ns_segctor_sem); |
8a9d2191 RK |
76 | |
77 | return nilfs; | |
78 | } | |
79 | ||
80 | /** | |
81 | * put_nilfs - release a reference to the_nilfs | |
82 | * @nilfs: the_nilfs structure to be released | |
83 | * | |
84 | * put_nilfs() decrements a reference counter of the_nilfs. | |
85 | * If the reference count reaches zero, the_nilfs is freed. | |
86 | */ | |
87 | void put_nilfs(struct the_nilfs *nilfs) | |
88 | { | |
89 | if (!atomic_dec_and_test(&nilfs->ns_count)) | |
90 | return; | |
91 | /* | |
92 | * Increment of ns_count never occur below because the caller | |
93 | * of get_nilfs() holds at least one reference to the_nilfs. | |
94 | * Thus its exclusion control is not required here. | |
95 | */ | |
96 | might_sleep(); | |
97 | if (nilfs_loaded(nilfs)) { | |
8a9d2191 RK |
98 | nilfs_mdt_clear(nilfs->ns_sufile); |
99 | nilfs_mdt_destroy(nilfs->ns_sufile); | |
100 | nilfs_mdt_clear(nilfs->ns_cpfile); | |
101 | nilfs_mdt_destroy(nilfs->ns_cpfile); | |
102 | nilfs_mdt_clear(nilfs->ns_dat); | |
103 | nilfs_mdt_destroy(nilfs->ns_dat); | |
104 | /* XXX: how and when to clear nilfs->ns_gc_dat? */ | |
105 | nilfs_mdt_destroy(nilfs->ns_gc_dat); | |
106 | } | |
107 | if (nilfs_init(nilfs)) { | |
108 | nilfs_destroy_gccache(nilfs); | |
e339ad31 RK |
109 | brelse(nilfs->ns_sbh[0]); |
110 | brelse(nilfs->ns_sbh[1]); | |
8a9d2191 RK |
111 | } |
112 | kfree(nilfs); | |
113 | } | |
114 | ||
115 | static int nilfs_load_super_root(struct the_nilfs *nilfs, | |
116 | struct nilfs_sb_info *sbi, sector_t sr_block) | |
117 | { | |
c2698e50 | 118 | static struct lock_class_key dat_lock_key; |
8a9d2191 RK |
119 | struct buffer_head *bh_sr; |
120 | struct nilfs_super_root *raw_sr; | |
e339ad31 | 121 | struct nilfs_super_block **sbp = nilfs->ns_sbp; |
8a9d2191 RK |
122 | unsigned dat_entry_size, segment_usage_size, checkpoint_size; |
123 | unsigned inode_size; | |
124 | int err; | |
125 | ||
126 | err = nilfs_read_super_root_block(sbi->s_super, sr_block, &bh_sr, 1); | |
127 | if (unlikely(err)) | |
128 | return err; | |
129 | ||
130 | down_read(&nilfs->ns_sem); | |
e339ad31 RK |
131 | dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size); |
132 | checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size); | |
133 | segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size); | |
8a9d2191 RK |
134 | up_read(&nilfs->ns_sem); |
135 | ||
136 | inode_size = nilfs->ns_inode_size; | |
137 | ||
138 | err = -ENOMEM; | |
139 | nilfs->ns_dat = nilfs_mdt_new( | |
140 | nilfs, NULL, NILFS_DAT_INO, NILFS_DAT_GFP); | |
141 | if (unlikely(!nilfs->ns_dat)) | |
142 | goto failed; | |
143 | ||
144 | nilfs->ns_gc_dat = nilfs_mdt_new( | |
145 | nilfs, NULL, NILFS_DAT_INO, NILFS_DAT_GFP); | |
146 | if (unlikely(!nilfs->ns_gc_dat)) | |
147 | goto failed_dat; | |
148 | ||
149 | nilfs->ns_cpfile = nilfs_mdt_new( | |
150 | nilfs, NULL, NILFS_CPFILE_INO, NILFS_CPFILE_GFP); | |
151 | if (unlikely(!nilfs->ns_cpfile)) | |
152 | goto failed_gc_dat; | |
153 | ||
154 | nilfs->ns_sufile = nilfs_mdt_new( | |
155 | nilfs, NULL, NILFS_SUFILE_INO, NILFS_SUFILE_GFP); | |
156 | if (unlikely(!nilfs->ns_sufile)) | |
157 | goto failed_cpfile; | |
158 | ||
159 | err = nilfs_palloc_init_blockgroup(nilfs->ns_dat, dat_entry_size); | |
160 | if (unlikely(err)) | |
161 | goto failed_sufile; | |
162 | ||
163 | err = nilfs_palloc_init_blockgroup(nilfs->ns_gc_dat, dat_entry_size); | |
164 | if (unlikely(err)) | |
165 | goto failed_sufile; | |
166 | ||
c2698e50 RK |
167 | lockdep_set_class(&NILFS_MDT(nilfs->ns_dat)->mi_sem, &dat_lock_key); |
168 | lockdep_set_class(&NILFS_MDT(nilfs->ns_gc_dat)->mi_sem, &dat_lock_key); | |
169 | ||
8a9d2191 RK |
170 | nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); |
171 | nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size, | |
172 | sizeof(struct nilfs_cpfile_header)); | |
173 | nilfs_mdt_set_entry_size(nilfs->ns_sufile, segment_usage_size, | |
174 | sizeof(struct nilfs_sufile_header)); | |
175 | ||
176 | err = nilfs_mdt_read_inode_direct( | |
177 | nilfs->ns_dat, bh_sr, NILFS_SR_DAT_OFFSET(inode_size)); | |
178 | if (unlikely(err)) | |
179 | goto failed_sufile; | |
180 | ||
181 | err = nilfs_mdt_read_inode_direct( | |
182 | nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(inode_size)); | |
183 | if (unlikely(err)) | |
184 | goto failed_sufile; | |
185 | ||
186 | err = nilfs_mdt_read_inode_direct( | |
187 | nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(inode_size)); | |
188 | if (unlikely(err)) | |
189 | goto failed_sufile; | |
190 | ||
191 | raw_sr = (struct nilfs_super_root *)bh_sr->b_data; | |
192 | nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); | |
193 | ||
194 | failed: | |
195 | brelse(bh_sr); | |
196 | return err; | |
197 | ||
198 | failed_sufile: | |
199 | nilfs_mdt_destroy(nilfs->ns_sufile); | |
200 | ||
201 | failed_cpfile: | |
202 | nilfs_mdt_destroy(nilfs->ns_cpfile); | |
203 | ||
204 | failed_gc_dat: | |
205 | nilfs_mdt_destroy(nilfs->ns_gc_dat); | |
206 | ||
207 | failed_dat: | |
208 | nilfs_mdt_destroy(nilfs->ns_dat); | |
209 | goto failed; | |
210 | } | |
211 | ||
212 | static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri) | |
213 | { | |
214 | memset(ri, 0, sizeof(*ri)); | |
215 | INIT_LIST_HEAD(&ri->ri_used_segments); | |
216 | } | |
217 | ||
218 | static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) | |
219 | { | |
220 | nilfs_dispose_segment_list(&ri->ri_used_segments); | |
221 | } | |
222 | ||
223 | /** | |
224 | * load_nilfs - load and recover the nilfs | |
225 | * @nilfs: the_nilfs structure to be released | |
226 | * @sbi: nilfs_sb_info used to recover past segment | |
227 | * | |
228 | * load_nilfs() searches and load the latest super root, | |
229 | * attaches the last segment, and does recovery if needed. | |
230 | * The caller must call this exclusively for simultaneous mounts. | |
231 | */ | |
232 | int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) | |
233 | { | |
234 | struct nilfs_recovery_info ri; | |
235 | unsigned int s_flags = sbi->s_super->s_flags; | |
236 | int really_read_only = bdev_read_only(nilfs->ns_bdev); | |
237 | unsigned valid_fs; | |
238 | int err = 0; | |
239 | ||
240 | nilfs_init_recovery_info(&ri); | |
241 | ||
242 | down_write(&nilfs->ns_sem); | |
243 | valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS); | |
244 | up_write(&nilfs->ns_sem); | |
245 | ||
246 | if (!valid_fs && (s_flags & MS_RDONLY)) { | |
247 | printk(KERN_INFO "NILFS: INFO: recovery " | |
248 | "required for readonly filesystem.\n"); | |
249 | if (really_read_only) { | |
250 | printk(KERN_ERR "NILFS: write access " | |
251 | "unavailable, cannot proceed.\n"); | |
252 | err = -EROFS; | |
253 | goto failed; | |
254 | } | |
255 | printk(KERN_INFO "NILFS: write access will " | |
256 | "be enabled during recovery.\n"); | |
257 | sbi->s_super->s_flags &= ~MS_RDONLY; | |
258 | } | |
259 | ||
260 | err = nilfs_search_super_root(nilfs, sbi, &ri); | |
261 | if (unlikely(err)) { | |
262 | printk(KERN_ERR "NILFS: error searching super root.\n"); | |
263 | goto failed; | |
264 | } | |
265 | ||
266 | err = nilfs_load_super_root(nilfs, sbi, ri.ri_super_root); | |
267 | if (unlikely(err)) { | |
268 | printk(KERN_ERR "NILFS: error loading super root.\n"); | |
269 | goto failed; | |
270 | } | |
271 | ||
272 | if (!valid_fs) { | |
273 | err = nilfs_recover_logical_segments(nilfs, sbi, &ri); | |
274 | if (unlikely(err)) { | |
275 | nilfs_mdt_destroy(nilfs->ns_cpfile); | |
276 | nilfs_mdt_destroy(nilfs->ns_sufile); | |
277 | nilfs_mdt_destroy(nilfs->ns_dat); | |
278 | goto failed; | |
279 | } | |
e339ad31 RK |
280 | if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED) |
281 | sbi->s_super->s_dirt = 1; | |
8a9d2191 RK |
282 | } |
283 | ||
284 | set_nilfs_loaded(nilfs); | |
285 | ||
286 | failed: | |
287 | nilfs_clear_recovery_info(&ri); | |
288 | sbi->s_super->s_flags = s_flags; | |
289 | return err; | |
290 | } | |
291 | ||
292 | static unsigned long long nilfs_max_size(unsigned int blkbits) | |
293 | { | |
294 | unsigned int max_bits; | |
295 | unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */ | |
296 | ||
297 | max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */ | |
298 | if (max_bits < 64) | |
299 | res = min_t(unsigned long long, res, (1ULL << max_bits) - 1); | |
300 | return res; | |
301 | } | |
302 | ||
e339ad31 RK |
303 | static int nilfs_store_disk_layout(struct the_nilfs *nilfs, |
304 | struct nilfs_super_block *sbp) | |
8a9d2191 RK |
305 | { |
306 | if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) { | |
307 | printk(KERN_ERR "NILFS: revision mismatch " | |
308 | "(superblock rev.=%d.%d, current rev.=%d.%d). " | |
309 | "Please check the version of mkfs.nilfs.\n", | |
310 | le32_to_cpu(sbp->s_rev_level), | |
311 | le16_to_cpu(sbp->s_minor_rev_level), | |
312 | NILFS_CURRENT_REV, NILFS_MINOR_REV); | |
313 | return -EINVAL; | |
314 | } | |
e339ad31 RK |
315 | nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); |
316 | if (nilfs->ns_sbsize > BLOCK_SIZE) | |
317 | return -EINVAL; | |
318 | ||
8a9d2191 RK |
319 | nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); |
320 | nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); | |
321 | ||
322 | nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); | |
323 | if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { | |
324 | printk(KERN_ERR "NILFS: too short segment. \n"); | |
325 | return -EINVAL; | |
326 | } | |
327 | ||
328 | nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); | |
329 | nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments); | |
330 | nilfs->ns_r_segments_percentage = | |
331 | le32_to_cpu(sbp->s_r_segments_percentage); | |
332 | nilfs->ns_nrsvsegs = | |
333 | max_t(unsigned long, NILFS_MIN_NRSVSEGS, | |
334 | DIV_ROUND_UP(nilfs->ns_nsegments * | |
335 | nilfs->ns_r_segments_percentage, 100)); | |
336 | nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); | |
337 | return 0; | |
338 | } | |
339 | ||
e339ad31 RK |
340 | static int nilfs_valid_sb(struct nilfs_super_block *sbp) |
341 | { | |
342 | static unsigned char sum[4]; | |
343 | const int sumoff = offsetof(struct nilfs_super_block, s_sum); | |
344 | size_t bytes; | |
345 | u32 crc; | |
346 | ||
347 | if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) | |
348 | return 0; | |
349 | bytes = le16_to_cpu(sbp->s_bytes); | |
350 | if (bytes > BLOCK_SIZE) | |
351 | return 0; | |
352 | crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, | |
353 | sumoff); | |
354 | crc = crc32_le(crc, sum, 4); | |
355 | crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4, | |
356 | bytes - sumoff - 4); | |
357 | return crc == le32_to_cpu(sbp->s_sum); | |
358 | } | |
359 | ||
360 | static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) | |
361 | { | |
362 | return offset < ((le64_to_cpu(sbp->s_nsegments) * | |
363 | le32_to_cpu(sbp->s_blocks_per_segment)) << | |
364 | (le32_to_cpu(sbp->s_log_block_size) + 10)); | |
365 | } | |
366 | ||
367 | static void nilfs_release_super_block(struct the_nilfs *nilfs) | |
368 | { | |
369 | int i; | |
370 | ||
371 | for (i = 0; i < 2; i++) { | |
372 | if (nilfs->ns_sbp[i]) { | |
373 | brelse(nilfs->ns_sbh[i]); | |
374 | nilfs->ns_sbh[i] = NULL; | |
375 | nilfs->ns_sbp[i] = NULL; | |
376 | } | |
377 | } | |
378 | } | |
379 | ||
380 | void nilfs_fall_back_super_block(struct the_nilfs *nilfs) | |
381 | { | |
382 | brelse(nilfs->ns_sbh[0]); | |
383 | nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; | |
384 | nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; | |
385 | nilfs->ns_sbh[1] = NULL; | |
386 | nilfs->ns_sbp[1] = NULL; | |
387 | } | |
388 | ||
389 | void nilfs_swap_super_block(struct the_nilfs *nilfs) | |
390 | { | |
391 | struct buffer_head *tsbh = nilfs->ns_sbh[0]; | |
392 | struct nilfs_super_block *tsbp = nilfs->ns_sbp[0]; | |
393 | ||
394 | nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; | |
395 | nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; | |
396 | nilfs->ns_sbh[1] = tsbh; | |
397 | nilfs->ns_sbp[1] = tsbp; | |
398 | } | |
399 | ||
400 | static int nilfs_load_super_block(struct the_nilfs *nilfs, | |
401 | struct super_block *sb, int blocksize, | |
402 | struct nilfs_super_block **sbpp) | |
403 | { | |
404 | struct nilfs_super_block **sbp = nilfs->ns_sbp; | |
405 | struct buffer_head **sbh = nilfs->ns_sbh; | |
406 | u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size); | |
407 | int valid[2], swp = 0; | |
408 | ||
409 | sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, | |
410 | &sbh[0]); | |
411 | sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); | |
412 | ||
413 | if (!sbp[0]) { | |
414 | if (!sbp[1]) { | |
415 | printk(KERN_ERR "NILFS: unable to read superblock\n"); | |
416 | return -EIO; | |
417 | } | |
418 | printk(KERN_WARNING | |
419 | "NILFS warning: unable to read primary superblock\n"); | |
420 | } else if (!sbp[1]) | |
421 | printk(KERN_WARNING | |
422 | "NILFS warning: unable to read secondary superblock\n"); | |
423 | ||
424 | valid[0] = nilfs_valid_sb(sbp[0]); | |
425 | valid[1] = nilfs_valid_sb(sbp[1]); | |
426 | swp = valid[1] && | |
427 | (!valid[0] || | |
428 | le64_to_cpu(sbp[1]->s_wtime) > le64_to_cpu(sbp[0]->s_wtime)); | |
429 | ||
430 | if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) { | |
431 | brelse(sbh[1]); | |
432 | sbh[1] = NULL; | |
433 | sbp[1] = NULL; | |
434 | swp = 0; | |
435 | } | |
436 | if (!valid[swp]) { | |
437 | nilfs_release_super_block(nilfs); | |
438 | printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n", | |
439 | sb->s_id); | |
440 | return -EINVAL; | |
441 | } | |
442 | ||
443 | if (swp) { | |
444 | printk(KERN_WARNING "NILFS warning: broken superblock. " | |
445 | "using spare superblock.\n"); | |
446 | nilfs_swap_super_block(nilfs); | |
447 | } | |
448 | ||
449 | nilfs->ns_sbwtime[0] = le64_to_cpu(sbp[0]->s_wtime); | |
450 | nilfs->ns_sbwtime[1] = valid[!swp] ? le64_to_cpu(sbp[1]->s_wtime) : 0; | |
451 | nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq); | |
452 | *sbpp = sbp[0]; | |
453 | return 0; | |
454 | } | |
455 | ||
8a9d2191 RK |
456 | /** |
457 | * init_nilfs - initialize a NILFS instance. | |
458 | * @nilfs: the_nilfs structure | |
459 | * @sbi: nilfs_sb_info | |
460 | * @sb: super block | |
461 | * @data: mount options | |
462 | * | |
463 | * init_nilfs() performs common initialization per block device (e.g. | |
464 | * reading the super block, getting disk layout information, initializing | |
465 | * shared fields in the_nilfs). It takes on some portion of the jobs | |
466 | * typically done by a fill_super() routine. This division arises from | |
467 | * the nature that multiple NILFS instances may be simultaneously | |
468 | * mounted on a device. | |
469 | * For multiple mounts on the same device, only the first mount | |
470 | * invokes these tasks. | |
471 | * | |
472 | * Return Value: On success, 0 is returned. On error, a negative error | |
473 | * code is returned. | |
474 | */ | |
475 | int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) | |
476 | { | |
477 | struct super_block *sb = sbi->s_super; | |
8a9d2191 RK |
478 | struct nilfs_super_block *sbp; |
479 | struct backing_dev_info *bdi; | |
480 | int blocksize; | |
e339ad31 | 481 | int err; |
8a9d2191 RK |
482 | |
483 | down_write(&nilfs->ns_sem); | |
484 | if (nilfs_init(nilfs)) { | |
485 | /* Load values from existing the_nilfs */ | |
e339ad31 | 486 | sbp = nilfs->ns_sbp[0]; |
8a9d2191 RK |
487 | err = nilfs_store_magic_and_option(sb, sbp, data); |
488 | if (err) | |
489 | goto out; | |
490 | ||
491 | blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); | |
492 | if (sb->s_blocksize != blocksize && | |
493 | !sb_set_blocksize(sb, blocksize)) { | |
494 | printk(KERN_ERR "NILFS: blocksize %d unfit to device\n", | |
495 | blocksize); | |
496 | err = -EINVAL; | |
497 | } | |
498 | sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); | |
499 | goto out; | |
500 | } | |
501 | ||
e339ad31 RK |
502 | blocksize = sb_min_blocksize(sb, BLOCK_SIZE); |
503 | if (!blocksize) { | |
504 | printk(KERN_ERR "NILFS: unable to set blocksize\n"); | |
8a9d2191 RK |
505 | err = -EINVAL; |
506 | goto out; | |
507 | } | |
e339ad31 RK |
508 | err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); |
509 | if (err) | |
510 | goto out; | |
511 | ||
8a9d2191 RK |
512 | err = nilfs_store_magic_and_option(sb, sbp, data); |
513 | if (err) | |
514 | goto failed_sbh; | |
515 | ||
516 | blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); | |
517 | if (sb->s_blocksize != blocksize) { | |
e339ad31 RK |
518 | int hw_blocksize = bdev_hardsect_size(sb->s_bdev); |
519 | ||
520 | if (blocksize < hw_blocksize) { | |
521 | printk(KERN_ERR | |
522 | "NILFS: blocksize %d too small for device " | |
523 | "(sector-size = %d).\n", | |
524 | blocksize, hw_blocksize); | |
8a9d2191 | 525 | err = -EINVAL; |
e339ad31 RK |
526 | goto failed_sbh; |
527 | } | |
528 | nilfs_release_super_block(nilfs); | |
529 | sb_set_blocksize(sb, blocksize); | |
530 | ||
531 | err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); | |
532 | if (err) | |
8a9d2191 RK |
533 | goto out; |
534 | /* not failed_sbh; sbh is released automatically | |
535 | when reloading fails. */ | |
8a9d2191 RK |
536 | } |
537 | nilfs->ns_blocksize_bits = sb->s_blocksize_bits; | |
538 | ||
e339ad31 | 539 | err = nilfs_store_disk_layout(nilfs, sbp); |
8a9d2191 RK |
540 | if (err) |
541 | goto failed_sbh; | |
542 | ||
543 | sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); | |
544 | ||
545 | nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); | |
8a9d2191 RK |
546 | |
547 | bdi = nilfs->ns_bdev->bd_inode_backing_dev_info; | |
548 | if (!bdi) | |
549 | bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; | |
550 | nilfs->ns_bdi = bdi ? : &default_backing_dev_info; | |
551 | ||
552 | /* Finding last segment */ | |
553 | nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); | |
554 | nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); | |
555 | nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); | |
556 | ||
557 | nilfs->ns_seg_seq = nilfs->ns_last_seq; | |
558 | nilfs->ns_segnum = | |
559 | nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); | |
560 | nilfs->ns_cno = nilfs->ns_last_cno + 1; | |
561 | if (nilfs->ns_segnum >= nilfs->ns_nsegments) { | |
562 | printk(KERN_ERR "NILFS invalid last segment number.\n"); | |
563 | err = -EINVAL; | |
564 | goto failed_sbh; | |
565 | } | |
566 | /* Dummy values */ | |
567 | nilfs->ns_free_segments_count = | |
568 | nilfs->ns_nsegments - (nilfs->ns_segnum + 1); | |
569 | ||
570 | /* Initialize gcinode cache */ | |
571 | err = nilfs_init_gccache(nilfs); | |
572 | if (err) | |
573 | goto failed_sbh; | |
574 | ||
575 | set_nilfs_init(nilfs); | |
576 | err = 0; | |
577 | out: | |
578 | up_write(&nilfs->ns_sem); | |
579 | return err; | |
580 | ||
581 | failed_sbh: | |
e339ad31 | 582 | nilfs_release_super_block(nilfs); |
8a9d2191 RK |
583 | goto out; |
584 | } | |
585 | ||
586 | int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) | |
587 | { | |
588 | struct inode *dat = nilfs_dat_inode(nilfs); | |
589 | unsigned long ncleansegs; | |
590 | int err; | |
591 | ||
592 | down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | |
593 | err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs); | |
594 | up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | |
595 | if (likely(!err)) | |
596 | *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; | |
597 | return err; | |
598 | } | |
599 | ||
8a9d2191 RK |
600 | int nilfs_near_disk_full(struct the_nilfs *nilfs) |
601 | { | |
602 | struct inode *sufile = nilfs->ns_sufile; | |
603 | unsigned long ncleansegs, nincsegs; | |
604 | int ret; | |
605 | ||
606 | ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs); | |
607 | if (likely(!ret)) { | |
608 | nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / | |
609 | nilfs->ns_blocks_per_segment + 1; | |
610 | if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs) | |
611 | ret++; | |
612 | } | |
613 | return ret; | |
614 | } | |
615 | ||
616 | int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno, | |
617 | int snapshot_mount) | |
618 | { | |
619 | struct nilfs_sb_info *sbi; | |
620 | int ret = 0; | |
621 | ||
622 | down_read(&nilfs->ns_sem); | |
623 | if (cno == 0 || cno > nilfs->ns_cno) | |
624 | goto out_unlock; | |
625 | ||
626 | list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { | |
627 | if (sbi->s_snapshot_cno == cno && | |
628 | (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) { | |
629 | /* exclude read-only mounts */ | |
630 | ret++; | |
631 | break; | |
632 | } | |
633 | } | |
634 | /* for protecting recent checkpoints */ | |
635 | if (cno >= nilfs_last_cno(nilfs)) | |
636 | ret++; | |
637 | ||
638 | out_unlock: | |
639 | up_read(&nilfs->ns_sem); | |
640 | return ret; | |
641 | } |