]>
Commit | Line | Data |
---|---|---|
05fe58fd RK |
1 | /* |
2 | * inode.c - NILFS inode operations. | |
3 | * | |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | * Written by Ryusuke Konishi <[email protected]> | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/buffer_head.h> | |
5a0e3ad6 | 25 | #include <linux/gfp.h> |
05fe58fd RK |
26 | #include <linux/mpage.h> |
27 | #include <linux/writeback.h> | |
f30bf3e4 | 28 | #include <linux/uio.h> |
05fe58fd RK |
29 | #include "nilfs.h" |
30 | #include "segment.h" | |
31 | #include "page.h" | |
32 | #include "mdt.h" | |
33 | #include "cpfile.h" | |
34 | #include "ifile.h" | |
35 | ||
36 | ||
37 | /** | |
38 | * nilfs_get_block() - get a file block on the filesystem (callback function) | |
39 | * @inode - inode struct of the target file | |
40 | * @blkoff - file block number | |
41 | * @bh_result - buffer head to be mapped on | |
42 | * @create - indicate whether allocating the block or not when it has not | |
43 | * been allocated yet. | |
44 | * | |
45 | * This function does not issue actual read request of the specified data | |
46 | * block. It is done by VFS. | |
05fe58fd RK |
47 | */ |
48 | int nilfs_get_block(struct inode *inode, sector_t blkoff, | |
49 | struct buffer_head *bh_result, int create) | |
50 | { | |
51 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
c3a7abf0 | 52 | __u64 blknum = 0; |
05fe58fd RK |
53 | int err = 0, ret; |
54 | struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); | |
c3a7abf0 | 55 | unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; |
05fe58fd | 56 | |
c3a7abf0 RK |
57 | down_read(&NILFS_MDT(dat)->mi_sem); |
58 | ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); | |
59 | up_read(&NILFS_MDT(dat)->mi_sem); | |
60 | if (ret >= 0) { /* found */ | |
05fe58fd | 61 | map_bh(bh_result, inode->i_sb, blknum); |
c3a7abf0 RK |
62 | if (ret > 0) |
63 | bh_result->b_size = (ret << inode->i_blkbits); | |
05fe58fd RK |
64 | goto out; |
65 | } | |
05fe58fd RK |
66 | /* data block was not found */ |
67 | if (ret == -ENOENT && create) { | |
68 | struct nilfs_transaction_info ti; | |
69 | ||
70 | bh_result->b_blocknr = 0; | |
71 | err = nilfs_transaction_begin(inode->i_sb, &ti, 1); | |
72 | if (unlikely(err)) | |
73 | goto out; | |
74 | err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, | |
75 | (unsigned long)bh_result); | |
05fe58fd RK |
76 | if (unlikely(err != 0)) { |
77 | if (err == -EEXIST) { | |
78 | /* | |
79 | * The get_block() function could be called | |
80 | * from multiple callers for an inode. | |
81 | * However, the page having this block must | |
82 | * be locked in this case. | |
83 | */ | |
1f5abe7e | 84 | printk(KERN_WARNING |
05fe58fd RK |
85 | "nilfs_get_block: a race condition " |
86 | "while inserting a data block. " | |
87 | "(inode number=%lu, file block " | |
88 | "offset=%llu)\n", | |
89 | inode->i_ino, | |
90 | (unsigned long long)blkoff); | |
1f5abe7e | 91 | err = 0; |
05fe58fd RK |
92 | } else if (err == -EINVAL) { |
93 | nilfs_error(inode->i_sb, __func__, | |
94 | "broken bmap (inode=%lu)\n", | |
95 | inode->i_ino); | |
96 | err = -EIO; | |
97 | } | |
47420c79 | 98 | nilfs_transaction_abort(inode->i_sb); |
05fe58fd RK |
99 | goto out; |
100 | } | |
abdb318b | 101 | nilfs_mark_inode_dirty(inode); |
47420c79 | 102 | nilfs_transaction_commit(inode->i_sb); /* never fails */ |
05fe58fd RK |
103 | /* Error handling should be detailed */ |
104 | set_buffer_new(bh_result); | |
105 | map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed | |
106 | to proper value */ | |
107 | } else if (ret == -ENOENT) { | |
108 | /* not found is not error (e.g. hole); must return without | |
109 | the mapped state flag. */ | |
110 | ; | |
111 | } else { | |
112 | err = ret; | |
113 | } | |
114 | ||
115 | out: | |
116 | return err; | |
117 | } | |
118 | ||
119 | /** | |
120 | * nilfs_readpage() - implement readpage() method of nilfs_aops {} | |
121 | * address_space_operations. | |
122 | * @file - file struct of the file to be read | |
123 | * @page - the page to be read | |
124 | */ | |
125 | static int nilfs_readpage(struct file *file, struct page *page) | |
126 | { | |
127 | return mpage_readpage(page, nilfs_get_block); | |
128 | } | |
129 | ||
130 | /** | |
131 | * nilfs_readpages() - implement readpages() method of nilfs_aops {} | |
132 | * address_space_operations. | |
133 | * @file - file struct of the file to be read | |
134 | * @mapping - address_space struct used for reading multiple pages | |
135 | * @pages - the pages to be read | |
136 | * @nr_pages - number of pages to be read | |
137 | */ | |
138 | static int nilfs_readpages(struct file *file, struct address_space *mapping, | |
139 | struct list_head *pages, unsigned nr_pages) | |
140 | { | |
141 | return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); | |
142 | } | |
143 | ||
144 | static int nilfs_writepages(struct address_space *mapping, | |
145 | struct writeback_control *wbc) | |
146 | { | |
f30bf3e4 RK |
147 | struct inode *inode = mapping->host; |
148 | int err = 0; | |
149 | ||
150 | if (wbc->sync_mode == WB_SYNC_ALL) | |
151 | err = nilfs_construct_dsync_segment(inode->i_sb, inode, | |
152 | wbc->range_start, | |
153 | wbc->range_end); | |
154 | return err; | |
05fe58fd RK |
155 | } |
156 | ||
157 | static int nilfs_writepage(struct page *page, struct writeback_control *wbc) | |
158 | { | |
159 | struct inode *inode = page->mapping->host; | |
160 | int err; | |
161 | ||
162 | redirty_page_for_writepage(wbc, page); | |
163 | unlock_page(page); | |
164 | ||
165 | if (wbc->sync_mode == WB_SYNC_ALL) { | |
166 | err = nilfs_construct_segment(inode->i_sb); | |
167 | if (unlikely(err)) | |
168 | return err; | |
169 | } else if (wbc->for_reclaim) | |
170 | nilfs_flush_segment(inode->i_sb, inode->i_ino); | |
171 | ||
172 | return 0; | |
173 | } | |
174 | ||
175 | static int nilfs_set_page_dirty(struct page *page) | |
176 | { | |
177 | int ret = __set_page_dirty_buffers(page); | |
178 | ||
179 | if (ret) { | |
180 | struct inode *inode = page->mapping->host; | |
181 | struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); | |
182 | unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); | |
183 | ||
184 | nilfs_set_file_dirty(sbi, inode, nr_dirty); | |
185 | } | |
186 | return ret; | |
187 | } | |
188 | ||
189 | static int nilfs_write_begin(struct file *file, struct address_space *mapping, | |
190 | loff_t pos, unsigned len, unsigned flags, | |
191 | struct page **pagep, void **fsdata) | |
192 | ||
193 | { | |
194 | struct inode *inode = mapping->host; | |
195 | int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); | |
196 | ||
197 | if (unlikely(err)) | |
198 | return err; | |
199 | ||
200 | *pagep = NULL; | |
201 | err = block_write_begin(file, mapping, pos, len, flags, pagep, | |
202 | fsdata, nilfs_get_block); | |
203 | if (unlikely(err)) | |
47420c79 | 204 | nilfs_transaction_abort(inode->i_sb); |
05fe58fd RK |
205 | return err; |
206 | } | |
207 | ||
208 | static int nilfs_write_end(struct file *file, struct address_space *mapping, | |
209 | loff_t pos, unsigned len, unsigned copied, | |
210 | struct page *page, void *fsdata) | |
211 | { | |
212 | struct inode *inode = mapping->host; | |
213 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); | |
214 | unsigned nr_dirty; | |
215 | int err; | |
216 | ||
217 | nr_dirty = nilfs_page_count_clean_buffers(page, start, | |
218 | start + copied); | |
219 | copied = generic_write_end(file, mapping, pos, len, copied, page, | |
220 | fsdata); | |
221 | nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); | |
47420c79 | 222 | err = nilfs_transaction_commit(inode->i_sb); |
05fe58fd RK |
223 | return err ? : copied; |
224 | } | |
225 | ||
226 | static ssize_t | |
227 | nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |
228 | loff_t offset, unsigned long nr_segs) | |
229 | { | |
230 | struct file *file = iocb->ki_filp; | |
231 | struct inode *inode = file->f_mapping->host; | |
232 | ssize_t size; | |
05fe58fd RK |
233 | |
234 | if (rw == WRITE) | |
235 | return 0; | |
236 | ||
237 | /* Needs synchronization with the cleaner */ | |
238 | size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | |
239 | offset, nr_segs, nilfs_get_block, NULL); | |
240 | return size; | |
241 | } | |
242 | ||
7f09410b | 243 | const struct address_space_operations nilfs_aops = { |
05fe58fd RK |
244 | .writepage = nilfs_writepage, |
245 | .readpage = nilfs_readpage, | |
e85dc1d5 | 246 | .sync_page = block_sync_page, |
05fe58fd RK |
247 | .writepages = nilfs_writepages, |
248 | .set_page_dirty = nilfs_set_page_dirty, | |
249 | .readpages = nilfs_readpages, | |
250 | .write_begin = nilfs_write_begin, | |
251 | .write_end = nilfs_write_end, | |
252 | /* .releasepage = nilfs_releasepage, */ | |
253 | .invalidatepage = block_invalidatepage, | |
254 | .direct_IO = nilfs_direct_IO, | |
258ef67e | 255 | .is_partially_uptodate = block_is_partially_uptodate, |
05fe58fd RK |
256 | }; |
257 | ||
258 | struct inode *nilfs_new_inode(struct inode *dir, int mode) | |
259 | { | |
260 | struct super_block *sb = dir->i_sb; | |
261 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
262 | struct inode *inode; | |
263 | struct nilfs_inode_info *ii; | |
264 | int err = -ENOMEM; | |
265 | ino_t ino; | |
266 | ||
267 | inode = new_inode(sb); | |
268 | if (unlikely(!inode)) | |
269 | goto failed; | |
270 | ||
271 | mapping_set_gfp_mask(inode->i_mapping, | |
272 | mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); | |
273 | ||
274 | ii = NILFS_I(inode); | |
275 | ii->i_state = 1 << NILFS_I_NEW; | |
276 | ||
277 | err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh); | |
278 | if (unlikely(err)) | |
279 | goto failed_ifile_create_inode; | |
280 | /* reference count of i_bh inherits from nilfs_mdt_read_block() */ | |
281 | ||
282 | atomic_inc(&sbi->s_inodes_count); | |
283 | ||
284 | inode->i_uid = current_fsuid(); | |
285 | if (dir->i_mode & S_ISGID) { | |
286 | inode->i_gid = dir->i_gid; | |
287 | if (S_ISDIR(mode)) | |
288 | mode |= S_ISGID; | |
289 | } else | |
290 | inode->i_gid = current_fsgid(); | |
291 | ||
292 | inode->i_mode = mode; | |
293 | inode->i_ino = ino; | |
294 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | |
295 | ||
296 | if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { | |
297 | err = nilfs_bmap_read(ii->i_bmap, NULL); | |
298 | if (err < 0) | |
299 | goto failed_bmap; | |
300 | ||
301 | set_bit(NILFS_I_BMAP, &ii->i_state); | |
302 | /* No lock is needed; iget() ensures it. */ | |
303 | } | |
304 | ||
305 | ii->i_flags = NILFS_I(dir)->i_flags; | |
306 | if (S_ISLNK(mode)) | |
307 | ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL); | |
308 | if (!S_ISDIR(mode)) | |
309 | ii->i_flags &= ~NILFS_DIRSYNC_FL; | |
310 | ||
311 | /* ii->i_file_acl = 0; */ | |
312 | /* ii->i_dir_acl = 0; */ | |
05fe58fd | 313 | ii->i_dir_start_lookup = 0; |
05fe58fd RK |
314 | ii->i_cno = 0; |
315 | nilfs_set_inode_flags(inode); | |
316 | spin_lock(&sbi->s_next_gen_lock); | |
317 | inode->i_generation = sbi->s_next_generation++; | |
318 | spin_unlock(&sbi->s_next_gen_lock); | |
319 | insert_inode_hash(inode); | |
320 | ||
321 | err = nilfs_init_acl(inode, dir); | |
322 | if (unlikely(err)) | |
323 | goto failed_acl; /* never occur. When supporting | |
324 | nilfs_init_acl(), proper cancellation of | |
325 | above jobs should be considered */ | |
326 | ||
05fe58fd RK |
327 | return inode; |
328 | ||
329 | failed_acl: | |
330 | failed_bmap: | |
331 | inode->i_nlink = 0; | |
332 | iput(inode); /* raw_inode will be deleted through | |
333 | generic_delete_inode() */ | |
334 | goto failed; | |
335 | ||
336 | failed_ifile_create_inode: | |
337 | make_bad_inode(inode); | |
338 | iput(inode); /* if i_nlink == 1, generic_forget_inode() will be | |
339 | called */ | |
340 | failed: | |
341 | return ERR_PTR(err); | |
342 | } | |
343 | ||
344 | void nilfs_free_inode(struct inode *inode) | |
345 | { | |
346 | struct super_block *sb = inode->i_sb; | |
347 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
348 | ||
349 | clear_inode(inode); | |
350 | /* XXX: check error code? Is there any thing I can do? */ | |
351 | (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino); | |
352 | atomic_dec(&sbi->s_inodes_count); | |
353 | } | |
354 | ||
355 | void nilfs_set_inode_flags(struct inode *inode) | |
356 | { | |
357 | unsigned int flags = NILFS_I(inode)->i_flags; | |
358 | ||
359 | inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | | |
360 | S_DIRSYNC); | |
361 | if (flags & NILFS_SYNC_FL) | |
362 | inode->i_flags |= S_SYNC; | |
363 | if (flags & NILFS_APPEND_FL) | |
364 | inode->i_flags |= S_APPEND; | |
365 | if (flags & NILFS_IMMUTABLE_FL) | |
366 | inode->i_flags |= S_IMMUTABLE; | |
367 | #ifndef NILFS_ATIME_DISABLE | |
368 | if (flags & NILFS_NOATIME_FL) | |
369 | #endif | |
370 | inode->i_flags |= S_NOATIME; | |
371 | if (flags & NILFS_DIRSYNC_FL) | |
372 | inode->i_flags |= S_DIRSYNC; | |
373 | mapping_set_gfp_mask(inode->i_mapping, | |
374 | mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); | |
375 | } | |
376 | ||
377 | int nilfs_read_inode_common(struct inode *inode, | |
378 | struct nilfs_inode *raw_inode) | |
379 | { | |
380 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
381 | int err; | |
382 | ||
383 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); | |
384 | inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); | |
385 | inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); | |
386 | inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); | |
387 | inode->i_size = le64_to_cpu(raw_inode->i_size); | |
388 | inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); | |
389 | inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); | |
390 | inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); | |
61239230 RK |
391 | inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); |
392 | inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); | |
393 | inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); | |
394 | if (inode->i_nlink == 0 && inode->i_mode == 0) | |
05fe58fd RK |
395 | return -EINVAL; /* this inode is deleted */ |
396 | ||
397 | inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); | |
398 | ii->i_flags = le32_to_cpu(raw_inode->i_flags); | |
399 | #if 0 | |
400 | ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); | |
401 | ii->i_dir_acl = S_ISREG(inode->i_mode) ? | |
402 | 0 : le32_to_cpu(raw_inode->i_dir_acl); | |
403 | #endif | |
3cc811bf | 404 | ii->i_dir_start_lookup = 0; |
05fe58fd RK |
405 | ii->i_cno = 0; |
406 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); | |
407 | ||
408 | if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | |
409 | S_ISLNK(inode->i_mode)) { | |
410 | err = nilfs_bmap_read(ii->i_bmap, raw_inode); | |
411 | if (err < 0) | |
412 | return err; | |
413 | set_bit(NILFS_I_BMAP, &ii->i_state); | |
414 | /* No lock is needed; iget() ensures it. */ | |
415 | } | |
416 | return 0; | |
417 | } | |
418 | ||
05fe58fd RK |
419 | static int __nilfs_read_inode(struct super_block *sb, unsigned long ino, |
420 | struct inode *inode) | |
421 | { | |
422 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
423 | struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); | |
424 | struct buffer_head *bh; | |
425 | struct nilfs_inode *raw_inode; | |
426 | int err; | |
427 | ||
428 | down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | |
429 | err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh); | |
430 | if (unlikely(err)) | |
431 | goto bad_inode; | |
432 | ||
433 | raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); | |
434 | ||
1b2f5a64 RK |
435 | err = nilfs_read_inode_common(inode, raw_inode); |
436 | if (err) | |
05fe58fd RK |
437 | goto failed_unmap; |
438 | ||
439 | if (S_ISREG(inode->i_mode)) { | |
440 | inode->i_op = &nilfs_file_inode_operations; | |
441 | inode->i_fop = &nilfs_file_operations; | |
442 | inode->i_mapping->a_ops = &nilfs_aops; | |
05fe58fd RK |
443 | } else if (S_ISDIR(inode->i_mode)) { |
444 | inode->i_op = &nilfs_dir_inode_operations; | |
445 | inode->i_fop = &nilfs_dir_operations; | |
446 | inode->i_mapping->a_ops = &nilfs_aops; | |
447 | } else if (S_ISLNK(inode->i_mode)) { | |
448 | inode->i_op = &nilfs_symlink_inode_operations; | |
449 | inode->i_mapping->a_ops = &nilfs_aops; | |
450 | } else { | |
451 | inode->i_op = &nilfs_special_inode_operations; | |
452 | init_special_inode( | |
453 | inode, inode->i_mode, | |
454 | new_decode_dev(le64_to_cpu(raw_inode->i_device_code))); | |
455 | } | |
456 | nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); | |
457 | brelse(bh); | |
458 | up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | |
459 | nilfs_set_inode_flags(inode); | |
460 | return 0; | |
461 | ||
462 | failed_unmap: | |
463 | nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); | |
464 | brelse(bh); | |
465 | ||
466 | bad_inode: | |
467 | up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | |
468 | return err; | |
469 | } | |
470 | ||
471 | struct inode *nilfs_iget(struct super_block *sb, unsigned long ino) | |
472 | { | |
473 | struct inode *inode; | |
474 | int err; | |
475 | ||
476 | inode = iget_locked(sb, ino); | |
477 | if (unlikely(!inode)) | |
478 | return ERR_PTR(-ENOMEM); | |
479 | if (!(inode->i_state & I_NEW)) | |
480 | return inode; | |
481 | ||
482 | err = __nilfs_read_inode(sb, ino, inode); | |
483 | if (unlikely(err)) { | |
484 | iget_failed(inode); | |
485 | return ERR_PTR(err); | |
486 | } | |
487 | unlock_new_inode(inode); | |
488 | return inode; | |
489 | } | |
490 | ||
491 | void nilfs_write_inode_common(struct inode *inode, | |
492 | struct nilfs_inode *raw_inode, int has_bmap) | |
493 | { | |
494 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
495 | ||
496 | raw_inode->i_mode = cpu_to_le16(inode->i_mode); | |
497 | raw_inode->i_uid = cpu_to_le32(inode->i_uid); | |
498 | raw_inode->i_gid = cpu_to_le32(inode->i_gid); | |
499 | raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); | |
500 | raw_inode->i_size = cpu_to_le64(inode->i_size); | |
501 | raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); | |
502 | raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); | |
61239230 RK |
503 | raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); |
504 | raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); | |
05fe58fd RK |
505 | raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); |
506 | ||
05fe58fd RK |
507 | raw_inode->i_flags = cpu_to_le32(ii->i_flags); |
508 | raw_inode->i_generation = cpu_to_le32(inode->i_generation); | |
509 | ||
510 | if (has_bmap) | |
511 | nilfs_bmap_write(ii->i_bmap, raw_inode); | |
512 | else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) | |
513 | raw_inode->i_device_code = | |
514 | cpu_to_le64(new_encode_dev(inode->i_rdev)); | |
515 | /* When extending inode, nilfs->ns_inode_size should be checked | |
516 | for substitutions of appended fields */ | |
517 | } | |
518 | ||
519 | void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) | |
520 | { | |
521 | ino_t ino = inode->i_ino; | |
522 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
523 | struct super_block *sb = inode->i_sb; | |
524 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
525 | struct nilfs_inode *raw_inode; | |
526 | ||
527 | raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); | |
528 | ||
05fe58fd RK |
529 | if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) |
530 | memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); | |
531 | set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); | |
532 | ||
533 | nilfs_write_inode_common(inode, raw_inode, 0); | |
534 | /* XXX: call with has_bmap = 0 is a workaround to avoid | |
535 | deadlock of bmap. This delays update of i_bmap to just | |
536 | before writing */ | |
537 | nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh); | |
538 | } | |
539 | ||
540 | #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ | |
541 | ||
542 | static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, | |
543 | unsigned long from) | |
544 | { | |
545 | unsigned long b; | |
546 | int ret; | |
547 | ||
548 | if (!test_bit(NILFS_I_BMAP, &ii->i_state)) | |
549 | return; | |
550 | repeat: | |
551 | ret = nilfs_bmap_last_key(ii->i_bmap, &b); | |
552 | if (ret == -ENOENT) | |
553 | return; | |
554 | else if (ret < 0) | |
555 | goto failed; | |
556 | ||
557 | if (b < from) | |
558 | return; | |
559 | ||
560 | b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); | |
561 | ret = nilfs_bmap_truncate(ii->i_bmap, b); | |
562 | nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); | |
563 | if (!ret || (ret == -ENOMEM && | |
564 | nilfs_bmap_truncate(ii->i_bmap, b) == 0)) | |
565 | goto repeat; | |
566 | ||
567 | failed: | |
568 | if (ret == -EINVAL) | |
569 | nilfs_error(ii->vfs_inode.i_sb, __func__, | |
570 | "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); | |
571 | else | |
572 | nilfs_warning(ii->vfs_inode.i_sb, __func__, | |
573 | "failed to truncate bmap (ino=%lu, err=%d)", | |
574 | ii->vfs_inode.i_ino, ret); | |
575 | } | |
576 | ||
577 | void nilfs_truncate(struct inode *inode) | |
578 | { | |
579 | unsigned long blkoff; | |
580 | unsigned int blocksize; | |
581 | struct nilfs_transaction_info ti; | |
582 | struct super_block *sb = inode->i_sb; | |
583 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
05fe58fd RK |
584 | |
585 | if (!test_bit(NILFS_I_BMAP, &ii->i_state)) | |
586 | return; | |
587 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | |
588 | return; | |
589 | ||
590 | blocksize = sb->s_blocksize; | |
591 | blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; | |
1f5abe7e | 592 | nilfs_transaction_begin(sb, &ti, 0); /* never fails */ |
05fe58fd RK |
593 | |
594 | block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); | |
595 | ||
596 | nilfs_truncate_bmap(ii, blkoff); | |
597 | ||
598 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | |
599 | if (IS_SYNC(inode)) | |
600 | nilfs_set_transaction_flag(NILFS_TI_SYNC); | |
601 | ||
abdb318b | 602 | nilfs_mark_inode_dirty(inode); |
05fe58fd | 603 | nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); |
47420c79 | 604 | nilfs_transaction_commit(sb); |
05fe58fd RK |
605 | /* May construct a logical segment and may fail in sync mode. |
606 | But truncate has no return value. */ | |
607 | } | |
608 | ||
609 | void nilfs_delete_inode(struct inode *inode) | |
610 | { | |
611 | struct nilfs_transaction_info ti; | |
612 | struct super_block *sb = inode->i_sb; | |
613 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
05fe58fd RK |
614 | |
615 | if (unlikely(is_bad_inode(inode))) { | |
616 | if (inode->i_data.nrpages) | |
617 | truncate_inode_pages(&inode->i_data, 0); | |
618 | clear_inode(inode); | |
619 | return; | |
620 | } | |
1f5abe7e RK |
621 | nilfs_transaction_begin(sb, &ti, 0); /* never fails */ |
622 | ||
05fe58fd RK |
623 | if (inode->i_data.nrpages) |
624 | truncate_inode_pages(&inode->i_data, 0); | |
625 | ||
626 | nilfs_truncate_bmap(ii, 0); | |
abdb318b | 627 | nilfs_mark_inode_dirty(inode); |
05fe58fd RK |
628 | nilfs_free_inode(inode); |
629 | /* nilfs_free_inode() marks inode buffer dirty */ | |
630 | if (IS_SYNC(inode)) | |
631 | nilfs_set_transaction_flag(NILFS_TI_SYNC); | |
47420c79 | 632 | nilfs_transaction_commit(sb); |
05fe58fd RK |
633 | /* May construct a logical segment and may fail in sync mode. |
634 | But delete_inode has no return value. */ | |
635 | } | |
636 | ||
637 | int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) | |
638 | { | |
639 | struct nilfs_transaction_info ti; | |
640 | struct inode *inode = dentry->d_inode; | |
641 | struct super_block *sb = inode->i_sb; | |
47420c79 | 642 | int err; |
05fe58fd RK |
643 | |
644 | err = inode_change_ok(inode, iattr); | |
645 | if (err) | |
646 | return err; | |
647 | ||
648 | err = nilfs_transaction_begin(sb, &ti, 0); | |
649 | if (unlikely(err)) | |
650 | return err; | |
651 | err = inode_setattr(inode, iattr); | |
652 | if (!err && (iattr->ia_valid & ATTR_MODE)) | |
653 | err = nilfs_acl_chmod(inode); | |
47420c79 RK |
654 | if (likely(!err)) |
655 | err = nilfs_transaction_commit(sb); | |
656 | else | |
657 | nilfs_transaction_abort(sb); | |
658 | ||
659 | return err; | |
05fe58fd RK |
660 | } |
661 | ||
662 | int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, | |
663 | struct buffer_head **pbh) | |
664 | { | |
665 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
666 | int err; | |
667 | ||
668 | spin_lock(&sbi->s_inode_lock); | |
05fe58fd RK |
669 | if (ii->i_bh == NULL) { |
670 | spin_unlock(&sbi->s_inode_lock); | |
671 | err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, | |
672 | pbh); | |
673 | if (unlikely(err)) | |
674 | return err; | |
675 | spin_lock(&sbi->s_inode_lock); | |
676 | if (ii->i_bh == NULL) | |
677 | ii->i_bh = *pbh; | |
678 | else { | |
679 | brelse(*pbh); | |
680 | *pbh = ii->i_bh; | |
681 | } | |
682 | } else | |
683 | *pbh = ii->i_bh; | |
684 | ||
685 | get_bh(*pbh); | |
686 | spin_unlock(&sbi->s_inode_lock); | |
687 | return 0; | |
688 | } | |
689 | ||
690 | int nilfs_inode_dirty(struct inode *inode) | |
691 | { | |
692 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
693 | struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); | |
694 | int ret = 0; | |
695 | ||
696 | if (!list_empty(&ii->i_dirty)) { | |
697 | spin_lock(&sbi->s_inode_lock); | |
698 | ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || | |
699 | test_bit(NILFS_I_BUSY, &ii->i_state); | |
700 | spin_unlock(&sbi->s_inode_lock); | |
701 | } | |
702 | return ret; | |
703 | } | |
704 | ||
705 | int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, | |
706 | unsigned nr_dirty) | |
707 | { | |
708 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
709 | ||
710 | atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); | |
711 | ||
458c5b08 | 712 | if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) |
05fe58fd RK |
713 | return 0; |
714 | ||
715 | spin_lock(&sbi->s_inode_lock); | |
716 | if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && | |
717 | !test_bit(NILFS_I_BUSY, &ii->i_state)) { | |
718 | /* Because this routine may race with nilfs_dispose_list(), | |
719 | we have to check NILFS_I_QUEUED here, too. */ | |
720 | if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { | |
721 | /* This will happen when somebody is freeing | |
722 | this inode. */ | |
723 | nilfs_warning(sbi->s_super, __func__, | |
724 | "cannot get inode (ino=%lu)\n", | |
725 | inode->i_ino); | |
726 | spin_unlock(&sbi->s_inode_lock); | |
727 | return -EINVAL; /* NILFS_I_DIRTY may remain for | |
728 | freeing inode */ | |
729 | } | |
730 | list_del(&ii->i_dirty); | |
731 | list_add_tail(&ii->i_dirty, &sbi->s_dirty_files); | |
732 | set_bit(NILFS_I_QUEUED, &ii->i_state); | |
733 | } | |
734 | spin_unlock(&sbi->s_inode_lock); | |
735 | return 0; | |
736 | } | |
737 | ||
738 | int nilfs_mark_inode_dirty(struct inode *inode) | |
739 | { | |
740 | struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); | |
741 | struct buffer_head *ibh; | |
742 | int err; | |
743 | ||
744 | err = nilfs_load_inode_block(sbi, inode, &ibh); | |
745 | if (unlikely(err)) { | |
746 | nilfs_warning(inode->i_sb, __func__, | |
747 | "failed to reget inode block.\n"); | |
748 | return err; | |
749 | } | |
05fe58fd | 750 | nilfs_update_inode(inode, ibh); |
05fe58fd RK |
751 | nilfs_mdt_mark_buffer_dirty(ibh); |
752 | nilfs_mdt_mark_dirty(sbi->s_ifile); | |
753 | brelse(ibh); | |
754 | return 0; | |
755 | } | |
756 | ||
757 | /** | |
758 | * nilfs_dirty_inode - reflect changes on given inode to an inode block. | |
759 | * @inode: inode of the file to be registered. | |
760 | * | |
761 | * nilfs_dirty_inode() loads a inode block containing the specified | |
762 | * @inode and copies data from a nilfs_inode to a corresponding inode | |
763 | * entry in the inode block. This operation is excluded from the segment | |
764 | * construction. This function can be called both as a single operation | |
765 | * and as a part of indivisible file operations. | |
766 | */ | |
767 | void nilfs_dirty_inode(struct inode *inode) | |
768 | { | |
769 | struct nilfs_transaction_info ti; | |
770 | ||
771 | if (is_bad_inode(inode)) { | |
772 | nilfs_warning(inode->i_sb, __func__, | |
773 | "tried to mark bad_inode dirty. ignored.\n"); | |
774 | dump_stack(); | |
775 | return; | |
776 | } | |
777 | nilfs_transaction_begin(inode->i_sb, &ti, 0); | |
458c5b08 | 778 | nilfs_mark_inode_dirty(inode); |
47420c79 | 779 | nilfs_transaction_commit(inode->i_sb); /* never fails */ |
05fe58fd | 780 | } |