]>
Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
eb47b800 JK |
2 | * fs/f2fs/data.c |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/fs.h> | |
12 | #include <linux/f2fs_fs.h> | |
13 | #include <linux/buffer_head.h> | |
14 | #include <linux/mpage.h> | |
15 | #include <linux/writeback.h> | |
16 | #include <linux/backing-dev.h> | |
8f46dcae | 17 | #include <linux/pagevec.h> |
eb47b800 JK |
18 | #include <linux/blkdev.h> |
19 | #include <linux/bio.h> | |
690e4a3e | 20 | #include <linux/prefetch.h> |
e2e40f2c | 21 | #include <linux/uio.h> |
f1e88660 | 22 | #include <linux/cleancache.h> |
eb47b800 JK |
23 | |
24 | #include "f2fs.h" | |
25 | #include "node.h" | |
26 | #include "segment.h" | |
db9f7c1a | 27 | #include "trace.h" |
848753aa | 28 | #include <trace/events/f2fs.h> |
eb47b800 | 29 | |
4246a0b6 | 30 | static void f2fs_read_end_io(struct bio *bio) |
93dfe2ac | 31 | { |
f568849e LT |
32 | struct bio_vec *bvec; |
33 | int i; | |
93dfe2ac | 34 | |
4375a336 | 35 | if (f2fs_bio_encrypted(bio)) { |
4246a0b6 | 36 | if (bio->bi_error) { |
4375a336 JK |
37 | f2fs_release_crypto_ctx(bio->bi_private); |
38 | } else { | |
39 | f2fs_end_io_crypto_work(bio->bi_private, bio); | |
40 | return; | |
41 | } | |
42 | } | |
43 | ||
12377024 CY |
44 | bio_for_each_segment_all(bvec, bio, i) { |
45 | struct page *page = bvec->bv_page; | |
f1e88660 | 46 | |
4246a0b6 | 47 | if (!bio->bi_error) { |
f1e88660 JK |
48 | SetPageUptodate(page); |
49 | } else { | |
50 | ClearPageUptodate(page); | |
51 | SetPageError(page); | |
52 | } | |
53 | unlock_page(page); | |
54 | } | |
f1e88660 JK |
55 | bio_put(bio); |
56 | } | |
57 | ||
4246a0b6 | 58 | static void f2fs_write_end_io(struct bio *bio) |
93dfe2ac | 59 | { |
1b1f559f | 60 | struct f2fs_sb_info *sbi = bio->bi_private; |
f568849e LT |
61 | struct bio_vec *bvec; |
62 | int i; | |
93dfe2ac | 63 | |
f568849e | 64 | bio_for_each_segment_all(bvec, bio, i) { |
93dfe2ac JK |
65 | struct page *page = bvec->bv_page; |
66 | ||
4375a336 JK |
67 | f2fs_restore_and_release_control_page(&page); |
68 | ||
4246a0b6 | 69 | if (unlikely(bio->bi_error)) { |
cf779cab | 70 | set_page_dirty(page); |
93dfe2ac | 71 | set_bit(AS_EIO, &page->mapping->flags); |
744602cf | 72 | f2fs_stop_checkpoint(sbi); |
93dfe2ac JK |
73 | } |
74 | end_page_writeback(page); | |
75 | dec_page_count(sbi, F2FS_WRITEBACK); | |
f568849e | 76 | } |
93dfe2ac | 77 | |
93dfe2ac JK |
78 | if (!get_pages(sbi, F2FS_WRITEBACK) && |
79 | !list_empty(&sbi->cp_wait.task_list)) | |
80 | wake_up(&sbi->cp_wait); | |
81 | ||
82 | bio_put(bio); | |
83 | } | |
84 | ||
940a6d34 GZ |
85 | /* |
86 | * Low-level block read/write IO operations. | |
87 | */ | |
88 | static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, | |
89 | int npages, bool is_read) | |
90 | { | |
91 | struct bio *bio; | |
92 | ||
740432f8 | 93 | bio = f2fs_bio_alloc(npages); |
940a6d34 GZ |
94 | |
95 | bio->bi_bdev = sbi->sb->s_bdev; | |
55cf9cb6 | 96 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); |
940a6d34 | 97 | bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; |
12377024 | 98 | bio->bi_private = is_read ? NULL : sbi; |
940a6d34 GZ |
99 | |
100 | return bio; | |
101 | } | |
102 | ||
458e6197 | 103 | static void __submit_merged_bio(struct f2fs_bio_info *io) |
93dfe2ac | 104 | { |
458e6197 | 105 | struct f2fs_io_info *fio = &io->fio; |
93dfe2ac JK |
106 | |
107 | if (!io->bio) | |
108 | return; | |
109 | ||
6a8f8ca5 | 110 | if (is_read_io(fio->rw)) |
2ace38e0 | 111 | trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); |
6a8f8ca5 | 112 | else |
2ace38e0 | 113 | trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); |
940a6d34 | 114 | |
6a8f8ca5 | 115 | submit_bio(fio->rw, io->bio); |
93dfe2ac JK |
116 | io->bio = NULL; |
117 | } | |
118 | ||
119 | void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |
458e6197 | 120 | enum page_type type, int rw) |
93dfe2ac JK |
121 | { |
122 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | |
123 | struct f2fs_bio_info *io; | |
124 | ||
125 | io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; | |
126 | ||
df0f8dc0 | 127 | down_write(&io->io_rwsem); |
458e6197 JK |
128 | |
129 | /* change META to META_FLUSH in the checkpoint procedure */ | |
130 | if (type >= META_FLUSH) { | |
131 | io->fio.type = META_FLUSH; | |
0f7b2abd JK |
132 | if (test_opt(sbi, NOBARRIER)) |
133 | io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; | |
134 | else | |
135 | io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; | |
458e6197 JK |
136 | } |
137 | __submit_merged_bio(io); | |
df0f8dc0 | 138 | up_write(&io->io_rwsem); |
93dfe2ac JK |
139 | } |
140 | ||
141 | /* | |
142 | * Fill the locked page with data located in the block address. | |
143 | * Return unlocked page. | |
144 | */ | |
05ca3632 | 145 | int f2fs_submit_page_bio(struct f2fs_io_info *fio) |
93dfe2ac | 146 | { |
93dfe2ac | 147 | struct bio *bio; |
4375a336 | 148 | struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; |
93dfe2ac | 149 | |
2ace38e0 | 150 | trace_f2fs_submit_page_bio(page, fio); |
05ca3632 | 151 | f2fs_trace_ios(fio, 0); |
93dfe2ac JK |
152 | |
153 | /* Allocate a new bio */ | |
05ca3632 | 154 | bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw)); |
93dfe2ac JK |
155 | |
156 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { | |
157 | bio_put(bio); | |
93dfe2ac JK |
158 | return -EFAULT; |
159 | } | |
160 | ||
cf04e8eb | 161 | submit_bio(fio->rw, bio); |
93dfe2ac JK |
162 | return 0; |
163 | } | |
164 | ||
05ca3632 | 165 | void f2fs_submit_page_mbio(struct f2fs_io_info *fio) |
93dfe2ac | 166 | { |
05ca3632 | 167 | struct f2fs_sb_info *sbi = fio->sbi; |
458e6197 | 168 | enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); |
93dfe2ac | 169 | struct f2fs_bio_info *io; |
940a6d34 | 170 | bool is_read = is_read_io(fio->rw); |
4375a336 | 171 | struct page *bio_page; |
93dfe2ac | 172 | |
940a6d34 | 173 | io = is_read ? &sbi->read_io : &sbi->write_io[btype]; |
93dfe2ac | 174 | |
cf04e8eb | 175 | verify_block_addr(sbi, fio->blk_addr); |
93dfe2ac | 176 | |
df0f8dc0 | 177 | down_write(&io->io_rwsem); |
93dfe2ac | 178 | |
940a6d34 | 179 | if (!is_read) |
93dfe2ac JK |
180 | inc_page_count(sbi, F2FS_WRITEBACK); |
181 | ||
cf04e8eb | 182 | if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || |
458e6197 JK |
183 | io->fio.rw != fio->rw)) |
184 | __submit_merged_bio(io); | |
93dfe2ac JK |
185 | alloc_new: |
186 | if (io->bio == NULL) { | |
90a893c7 | 187 | int bio_blocks = MAX_BIO_BLOCKS(sbi); |
940a6d34 | 188 | |
cf04e8eb | 189 | io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); |
458e6197 | 190 | io->fio = *fio; |
93dfe2ac JK |
191 | } |
192 | ||
4375a336 JK |
193 | bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; |
194 | ||
195 | if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < | |
93dfe2ac | 196 | PAGE_CACHE_SIZE) { |
458e6197 | 197 | __submit_merged_bio(io); |
93dfe2ac JK |
198 | goto alloc_new; |
199 | } | |
200 | ||
cf04e8eb | 201 | io->last_block_in_bio = fio->blk_addr; |
05ca3632 | 202 | f2fs_trace_ios(fio, 0); |
93dfe2ac | 203 | |
df0f8dc0 | 204 | up_write(&io->io_rwsem); |
05ca3632 | 205 | trace_f2fs_submit_page_mbio(fio->page, fio); |
93dfe2ac JK |
206 | } |
207 | ||
0a8165d7 | 208 | /* |
eb47b800 JK |
209 | * Lock ordering for the change of data block address: |
210 | * ->data_page | |
211 | * ->node_page | |
212 | * update block addresses in the node page | |
213 | */ | |
216a620a | 214 | void set_data_blkaddr(struct dnode_of_data *dn) |
eb47b800 JK |
215 | { |
216 | struct f2fs_node *rn; | |
217 | __le32 *addr_array; | |
218 | struct page *node_page = dn->node_page; | |
219 | unsigned int ofs_in_node = dn->ofs_in_node; | |
220 | ||
5514f0aa | 221 | f2fs_wait_on_page_writeback(node_page, NODE); |
eb47b800 | 222 | |
45590710 | 223 | rn = F2FS_NODE(node_page); |
eb47b800 JK |
224 | |
225 | /* Get physical address of data block */ | |
226 | addr_array = blkaddr_in_node(rn); | |
e1509cf2 | 227 | addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); |
eb47b800 JK |
228 | set_page_dirty(node_page); |
229 | } | |
230 | ||
231 | int reserve_new_block(struct dnode_of_data *dn) | |
232 | { | |
4081363f | 233 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
eb47b800 | 234 | |
6bacf52f | 235 | if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) |
eb47b800 | 236 | return -EPERM; |
cfb271d4 | 237 | if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) |
eb47b800 JK |
238 | return -ENOSPC; |
239 | ||
c01e2853 NJ |
240 | trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); |
241 | ||
eb47b800 | 242 | dn->data_blkaddr = NEW_ADDR; |
216a620a | 243 | set_data_blkaddr(dn); |
a18ff063 | 244 | mark_inode_dirty(dn->inode); |
eb47b800 JK |
245 | sync_inode_page(dn); |
246 | return 0; | |
247 | } | |
248 | ||
b600965c HL |
249 | int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) |
250 | { | |
251 | bool need_put = dn->inode_page ? false : true; | |
252 | int err; | |
253 | ||
254 | err = get_dnode_of_data(dn, index, ALLOC_NODE); | |
255 | if (err) | |
256 | return err; | |
a8865372 | 257 | |
b600965c HL |
258 | if (dn->data_blkaddr == NULL_ADDR) |
259 | err = reserve_new_block(dn); | |
a8865372 | 260 | if (err || need_put) |
b600965c HL |
261 | f2fs_put_dnode(dn); |
262 | return err; | |
263 | } | |
264 | ||
759af1c9 | 265 | int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) |
eb47b800 | 266 | { |
028a41e8 | 267 | struct extent_info ei; |
759af1c9 | 268 | struct inode *inode = dn->inode; |
028a41e8 | 269 | |
759af1c9 FL |
270 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { |
271 | dn->data_blkaddr = ei.blk + index - ei.fofs; | |
272 | return 0; | |
429511cd | 273 | } |
028a41e8 | 274 | |
759af1c9 | 275 | return f2fs_reserve_block(dn, index); |
eb47b800 JK |
276 | } |
277 | ||
a56c7c6f JK |
278 | struct page *get_read_data_page(struct inode *inode, pgoff_t index, |
279 | int rw, bool for_write) | |
eb47b800 | 280 | { |
eb47b800 JK |
281 | struct address_space *mapping = inode->i_mapping; |
282 | struct dnode_of_data dn; | |
283 | struct page *page; | |
cb3bc9ee | 284 | struct extent_info ei; |
eb47b800 | 285 | int err; |
cf04e8eb | 286 | struct f2fs_io_info fio = { |
05ca3632 | 287 | .sbi = F2FS_I_SB(inode), |
cf04e8eb | 288 | .type = DATA, |
43f3eae1 | 289 | .rw = rw, |
4375a336 | 290 | .encrypted_page = NULL, |
cf04e8eb | 291 | }; |
eb47b800 | 292 | |
4375a336 JK |
293 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
294 | return read_mapping_page(mapping, index, NULL); | |
295 | ||
a56c7c6f | 296 | page = f2fs_grab_cache_page(mapping, index, for_write); |
650495de JK |
297 | if (!page) |
298 | return ERR_PTR(-ENOMEM); | |
299 | ||
cb3bc9ee CY |
300 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { |
301 | dn.data_blkaddr = ei.blk + index - ei.fofs; | |
302 | goto got_it; | |
303 | } | |
304 | ||
eb47b800 | 305 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
266e97a8 | 306 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); |
86531d6b JK |
307 | if (err) |
308 | goto put_err; | |
eb47b800 JK |
309 | f2fs_put_dnode(&dn); |
310 | ||
6bacf52f | 311 | if (unlikely(dn.data_blkaddr == NULL_ADDR)) { |
86531d6b JK |
312 | err = -ENOENT; |
313 | goto put_err; | |
650495de | 314 | } |
cb3bc9ee | 315 | got_it: |
43f3eae1 JK |
316 | if (PageUptodate(page)) { |
317 | unlock_page(page); | |
eb47b800 | 318 | return page; |
43f3eae1 | 319 | } |
eb47b800 | 320 | |
d59ff4df JK |
321 | /* |
322 | * A new dentry page is allocated but not able to be written, since its | |
323 | * new inode page couldn't be allocated due to -ENOSPC. | |
324 | * In such the case, its blkaddr can be remained as NEW_ADDR. | |
325 | * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. | |
326 | */ | |
327 | if (dn.data_blkaddr == NEW_ADDR) { | |
328 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
329 | SetPageUptodate(page); | |
43f3eae1 | 330 | unlock_page(page); |
d59ff4df JK |
331 | return page; |
332 | } | |
eb47b800 | 333 | |
cf04e8eb | 334 | fio.blk_addr = dn.data_blkaddr; |
05ca3632 JK |
335 | fio.page = page; |
336 | err = f2fs_submit_page_bio(&fio); | |
393ff91f | 337 | if (err) |
86531d6b | 338 | goto put_err; |
43f3eae1 | 339 | return page; |
86531d6b JK |
340 | |
341 | put_err: | |
342 | f2fs_put_page(page, 1); | |
343 | return ERR_PTR(err); | |
43f3eae1 JK |
344 | } |
345 | ||
346 | struct page *find_data_page(struct inode *inode, pgoff_t index) | |
347 | { | |
348 | struct address_space *mapping = inode->i_mapping; | |
349 | struct page *page; | |
350 | ||
351 | page = find_get_page(mapping, index); | |
352 | if (page && PageUptodate(page)) | |
353 | return page; | |
354 | f2fs_put_page(page, 0); | |
355 | ||
a56c7c6f | 356 | page = get_read_data_page(inode, index, READ_SYNC, false); |
43f3eae1 JK |
357 | if (IS_ERR(page)) |
358 | return page; | |
359 | ||
360 | if (PageUptodate(page)) | |
361 | return page; | |
362 | ||
363 | wait_on_page_locked(page); | |
364 | if (unlikely(!PageUptodate(page))) { | |
365 | f2fs_put_page(page, 0); | |
366 | return ERR_PTR(-EIO); | |
367 | } | |
368 | return page; | |
369 | } | |
370 | ||
371 | /* | |
372 | * If it tries to access a hole, return an error. | |
373 | * Because, the callers, functions in dir.c and GC, should be able to know | |
374 | * whether this page exists or not. | |
375 | */ | |
a56c7c6f JK |
376 | struct page *get_lock_data_page(struct inode *inode, pgoff_t index, |
377 | bool for_write) | |
43f3eae1 JK |
378 | { |
379 | struct address_space *mapping = inode->i_mapping; | |
380 | struct page *page; | |
381 | repeat: | |
a56c7c6f | 382 | page = get_read_data_page(inode, index, READ_SYNC, for_write); |
43f3eae1 JK |
383 | if (IS_ERR(page)) |
384 | return page; | |
393ff91f | 385 | |
43f3eae1 | 386 | /* wait for read completion */ |
393ff91f | 387 | lock_page(page); |
6bacf52f | 388 | if (unlikely(!PageUptodate(page))) { |
393ff91f JK |
389 | f2fs_put_page(page, 1); |
390 | return ERR_PTR(-EIO); | |
eb47b800 | 391 | } |
6bacf52f | 392 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
393 | f2fs_put_page(page, 1); |
394 | goto repeat; | |
eb47b800 JK |
395 | } |
396 | return page; | |
397 | } | |
398 | ||
0a8165d7 | 399 | /* |
eb47b800 JK |
400 | * Caller ensures that this data page is never allocated. |
401 | * A new zero-filled data page is allocated in the page cache. | |
39936837 | 402 | * |
4f4124d0 CY |
403 | * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and |
404 | * f2fs_unlock_op(). | |
470f00e9 CY |
405 | * Note that, ipage is set only by make_empty_dir, and if any error occur, |
406 | * ipage should be released by this function. | |
eb47b800 | 407 | */ |
64aa7ed9 | 408 | struct page *get_new_data_page(struct inode *inode, |
a8865372 | 409 | struct page *ipage, pgoff_t index, bool new_i_size) |
eb47b800 | 410 | { |
eb47b800 JK |
411 | struct address_space *mapping = inode->i_mapping; |
412 | struct page *page; | |
413 | struct dnode_of_data dn; | |
414 | int err; | |
01f28610 | 415 | repeat: |
a56c7c6f | 416 | page = f2fs_grab_cache_page(mapping, index, true); |
470f00e9 CY |
417 | if (!page) { |
418 | /* | |
419 | * before exiting, we should make sure ipage will be released | |
420 | * if any error occur. | |
421 | */ | |
422 | f2fs_put_page(ipage, 1); | |
01f28610 | 423 | return ERR_PTR(-ENOMEM); |
470f00e9 | 424 | } |
eb47b800 | 425 | |
a8865372 | 426 | set_new_dnode(&dn, inode, ipage, NULL, 0); |
b600965c | 427 | err = f2fs_reserve_block(&dn, index); |
01f28610 JK |
428 | if (err) { |
429 | f2fs_put_page(page, 1); | |
eb47b800 | 430 | return ERR_PTR(err); |
a8865372 | 431 | } |
01f28610 JK |
432 | if (!ipage) |
433 | f2fs_put_dnode(&dn); | |
eb47b800 JK |
434 | |
435 | if (PageUptodate(page)) | |
01f28610 | 436 | goto got_it; |
eb47b800 JK |
437 | |
438 | if (dn.data_blkaddr == NEW_ADDR) { | |
439 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
393ff91f | 440 | SetPageUptodate(page); |
eb47b800 | 441 | } else { |
4375a336 | 442 | f2fs_put_page(page, 1); |
a8865372 | 443 | |
a56c7c6f | 444 | page = get_read_data_page(inode, index, READ_SYNC, true); |
4375a336 | 445 | if (IS_ERR(page)) |
afcb7ca0 | 446 | goto repeat; |
4375a336 JK |
447 | |
448 | /* wait for read completion */ | |
449 | lock_page(page); | |
eb47b800 | 450 | } |
01f28610 | 451 | got_it: |
9edcdabf CY |
452 | if (new_i_size && i_size_read(inode) < |
453 | ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) { | |
454 | i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)); | |
699489bb JK |
455 | /* Only the directory inode sets new_i_size */ |
456 | set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); | |
eb47b800 JK |
457 | } |
458 | return page; | |
459 | } | |
460 | ||
bfad7c2d JK |
461 | static int __allocate_data_block(struct dnode_of_data *dn) |
462 | { | |
4081363f | 463 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
976e4c50 | 464 | struct f2fs_inode_info *fi = F2FS_I(dn->inode); |
bfad7c2d | 465 | struct f2fs_summary sum; |
bfad7c2d | 466 | struct node_info ni; |
38aa0889 | 467 | int seg = CURSEG_WARM_DATA; |
976e4c50 | 468 | pgoff_t fofs; |
bfad7c2d JK |
469 | |
470 | if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) | |
471 | return -EPERM; | |
df6136ef CY |
472 | |
473 | dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); | |
474 | if (dn->data_blkaddr == NEW_ADDR) | |
475 | goto alloc; | |
476 | ||
bfad7c2d JK |
477 | if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) |
478 | return -ENOSPC; | |
479 | ||
df6136ef | 480 | alloc: |
bfad7c2d JK |
481 | get_node_info(sbi, dn->nid, &ni); |
482 | set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); | |
483 | ||
38aa0889 JK |
484 | if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) |
485 | seg = CURSEG_DIRECT_IO; | |
486 | ||
df6136ef CY |
487 | allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, |
488 | &sum, seg); | |
216a620a | 489 | set_data_blkaddr(dn); |
bfad7c2d | 490 | |
976e4c50 JK |
491 | /* update i_size */ |
492 | fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + | |
493 | dn->ofs_in_node; | |
9edcdabf CY |
494 | if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) |
495 | i_size_write(dn->inode, | |
496 | ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); | |
976e4c50 | 497 | |
3e72f721 | 498 | /* direct IO doesn't use extent cache to maximize the performance */ |
a28ef1f5 | 499 | f2fs_drop_largest_extent(dn->inode, fofs); |
3e72f721 | 500 | |
bfad7c2d JK |
501 | return 0; |
502 | } | |
503 | ||
59b802e5 JK |
504 | static void __allocate_data_blocks(struct inode *inode, loff_t offset, |
505 | size_t count) | |
506 | { | |
507 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
508 | struct dnode_of_data dn; | |
509 | u64 start = F2FS_BYTES_TO_BLK(offset); | |
510 | u64 len = F2FS_BYTES_TO_BLK(count); | |
511 | bool allocated; | |
512 | u64 end_offset; | |
513 | ||
514 | while (len) { | |
515 | f2fs_balance_fs(sbi); | |
516 | f2fs_lock_op(sbi); | |
517 | ||
518 | /* When reading holes, we need its node page */ | |
519 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
520 | if (get_dnode_of_data(&dn, start, ALLOC_NODE)) | |
521 | goto out; | |
522 | ||
523 | allocated = false; | |
524 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | |
525 | ||
526 | while (dn.ofs_in_node < end_offset && len) { | |
d6d4f1cb CY |
527 | block_t blkaddr; |
528 | ||
f9811703 CY |
529 | if (unlikely(f2fs_cp_error(sbi))) |
530 | goto sync_out; | |
531 | ||
d6d4f1cb | 532 | blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); |
df6136ef | 533 | if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) { |
59b802e5 JK |
534 | if (__allocate_data_block(&dn)) |
535 | goto sync_out; | |
536 | allocated = true; | |
537 | } | |
538 | len--; | |
539 | start++; | |
540 | dn.ofs_in_node++; | |
541 | } | |
542 | ||
543 | if (allocated) | |
544 | sync_inode_page(&dn); | |
545 | ||
546 | f2fs_put_dnode(&dn); | |
547 | f2fs_unlock_op(sbi); | |
548 | } | |
549 | return; | |
550 | ||
551 | sync_out: | |
552 | if (allocated) | |
553 | sync_inode_page(&dn); | |
554 | f2fs_put_dnode(&dn); | |
555 | out: | |
556 | f2fs_unlock_op(sbi); | |
557 | return; | |
558 | } | |
559 | ||
0a8165d7 | 560 | /* |
003a3e1d JK |
561 | * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with |
562 | * f2fs_map_blocks structure. | |
4f4124d0 CY |
563 | * If original data blocks are allocated, then give them to blockdev. |
564 | * Otherwise, | |
565 | * a. preallocate requested block addresses | |
566 | * b. do not use extent cache for better performance | |
567 | * c. give the block addresses to blockdev | |
eb47b800 | 568 | */ |
003a3e1d | 569 | static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, |
e2b4e2bc | 570 | int create, int flag) |
eb47b800 | 571 | { |
003a3e1d | 572 | unsigned int maxblocks = map->m_len; |
eb47b800 | 573 | struct dnode_of_data dn; |
f9811703 | 574 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
bfad7c2d JK |
575 | int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; |
576 | pgoff_t pgofs, end_offset; | |
577 | int err = 0, ofs = 1; | |
a2e7d1bf | 578 | struct extent_info ei; |
bfad7c2d | 579 | bool allocated = false; |
eb47b800 | 580 | |
003a3e1d JK |
581 | map->m_len = 0; |
582 | map->m_flags = 0; | |
583 | ||
584 | /* it only supports block size == page size */ | |
585 | pgofs = (pgoff_t)map->m_lblk; | |
eb47b800 | 586 | |
7e4dde79 | 587 | if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) { |
003a3e1d JK |
588 | map->m_pblk = ei.blk + pgofs - ei.fofs; |
589 | map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); | |
590 | map->m_flags = F2FS_MAP_MAPPED; | |
bfad7c2d | 591 | goto out; |
a2e7d1bf | 592 | } |
bfad7c2d | 593 | |
59b802e5 | 594 | if (create) |
4081363f | 595 | f2fs_lock_op(F2FS_I_SB(inode)); |
eb47b800 JK |
596 | |
597 | /* When reading holes, we need its node page */ | |
598 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
bfad7c2d | 599 | err = get_dnode_of_data(&dn, pgofs, mode); |
1ec79083 | 600 | if (err) { |
bfad7c2d JK |
601 | if (err == -ENOENT) |
602 | err = 0; | |
603 | goto unlock_out; | |
848753aa | 604 | } |
973163fc CY |
605 | |
606 | if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) { | |
607 | if (create) { | |
f9811703 CY |
608 | if (unlikely(f2fs_cp_error(sbi))) { |
609 | err = -EIO; | |
610 | goto put_out; | |
611 | } | |
973163fc CY |
612 | err = __allocate_data_block(&dn); |
613 | if (err) | |
614 | goto put_out; | |
615 | allocated = true; | |
616 | map->m_flags = F2FS_MAP_NEW; | |
617 | } else { | |
618 | if (flag != F2FS_GET_BLOCK_FIEMAP || | |
619 | dn.data_blkaddr != NEW_ADDR) { | |
620 | if (flag == F2FS_GET_BLOCK_BMAP) | |
621 | err = -ENOENT; | |
622 | goto put_out; | |
623 | } | |
624 | ||
625 | /* | |
626 | * preallocated unwritten block should be mapped | |
627 | * for fiemap. | |
628 | */ | |
629 | if (dn.data_blkaddr == NEW_ADDR) | |
630 | map->m_flags = F2FS_MAP_UNWRITTEN; | |
e2b4e2bc | 631 | } |
e2b4e2bc | 632 | } |
eb47b800 | 633 | |
973163fc CY |
634 | map->m_flags |= F2FS_MAP_MAPPED; |
635 | map->m_pblk = dn.data_blkaddr; | |
636 | map->m_len = 1; | |
bfad7c2d | 637 | |
6403eb1f | 638 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); |
bfad7c2d JK |
639 | dn.ofs_in_node++; |
640 | pgofs++; | |
641 | ||
642 | get_next: | |
643 | if (dn.ofs_in_node >= end_offset) { | |
644 | if (allocated) | |
645 | sync_inode_page(&dn); | |
646 | allocated = false; | |
647 | f2fs_put_dnode(&dn); | |
648 | ||
649 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
650 | err = get_dnode_of_data(&dn, pgofs, mode); | |
1ec79083 | 651 | if (err) { |
bfad7c2d JK |
652 | if (err == -ENOENT) |
653 | err = 0; | |
654 | goto unlock_out; | |
655 | } | |
e2b4e2bc | 656 | |
6403eb1f | 657 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); |
bfad7c2d | 658 | } |
eb47b800 | 659 | |
003a3e1d | 660 | if (maxblocks > map->m_len) { |
bfad7c2d | 661 | block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); |
973163fc CY |
662 | |
663 | if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) { | |
664 | if (create) { | |
f9811703 CY |
665 | if (unlikely(f2fs_cp_error(sbi))) { |
666 | err = -EIO; | |
667 | goto sync_out; | |
668 | } | |
973163fc CY |
669 | err = __allocate_data_block(&dn); |
670 | if (err) | |
671 | goto sync_out; | |
672 | allocated = true; | |
673 | map->m_flags |= F2FS_MAP_NEW; | |
674 | blkaddr = dn.data_blkaddr; | |
675 | } else { | |
676 | /* | |
677 | * we only merge preallocated unwritten blocks | |
678 | * for fiemap. | |
679 | */ | |
680 | if (flag != F2FS_GET_BLOCK_FIEMAP || | |
681 | blkaddr != NEW_ADDR) | |
682 | goto sync_out; | |
683 | } | |
bfad7c2d | 684 | } |
973163fc | 685 | |
e1c42045 | 686 | /* Give more consecutive addresses for the readahead */ |
7f63eb77 JK |
687 | if ((map->m_pblk != NEW_ADDR && |
688 | blkaddr == (map->m_pblk + ofs)) || | |
689 | (map->m_pblk == NEW_ADDR && | |
690 | blkaddr == NEW_ADDR)) { | |
bfad7c2d JK |
691 | ofs++; |
692 | dn.ofs_in_node++; | |
693 | pgofs++; | |
003a3e1d | 694 | map->m_len++; |
bfad7c2d JK |
695 | goto get_next; |
696 | } | |
eb47b800 | 697 | } |
bfad7c2d JK |
698 | sync_out: |
699 | if (allocated) | |
700 | sync_inode_page(&dn); | |
701 | put_out: | |
eb47b800 | 702 | f2fs_put_dnode(&dn); |
bfad7c2d JK |
703 | unlock_out: |
704 | if (create) | |
4081363f | 705 | f2fs_unlock_op(F2FS_I_SB(inode)); |
bfad7c2d | 706 | out: |
003a3e1d | 707 | trace_f2fs_map_blocks(inode, map, err); |
bfad7c2d | 708 | return err; |
eb47b800 JK |
709 | } |
710 | ||
003a3e1d | 711 | static int __get_data_block(struct inode *inode, sector_t iblock, |
e2b4e2bc | 712 | struct buffer_head *bh, int create, int flag) |
003a3e1d JK |
713 | { |
714 | struct f2fs_map_blocks map; | |
715 | int ret; | |
716 | ||
717 | map.m_lblk = iblock; | |
718 | map.m_len = bh->b_size >> inode->i_blkbits; | |
719 | ||
e2b4e2bc | 720 | ret = f2fs_map_blocks(inode, &map, create, flag); |
003a3e1d JK |
721 | if (!ret) { |
722 | map_bh(bh, inode->i_sb, map.m_pblk); | |
723 | bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; | |
724 | bh->b_size = map.m_len << inode->i_blkbits; | |
725 | } | |
726 | return ret; | |
727 | } | |
728 | ||
ccfb3000 | 729 | static int get_data_block(struct inode *inode, sector_t iblock, |
e2b4e2bc CY |
730 | struct buffer_head *bh_result, int create, int flag) |
731 | { | |
732 | return __get_data_block(inode, iblock, bh_result, create, flag); | |
733 | } | |
734 | ||
735 | static int get_data_block_dio(struct inode *inode, sector_t iblock, | |
ccfb3000 JK |
736 | struct buffer_head *bh_result, int create) |
737 | { | |
e2b4e2bc CY |
738 | return __get_data_block(inode, iblock, bh_result, create, |
739 | F2FS_GET_BLOCK_DIO); | |
ccfb3000 JK |
740 | } |
741 | ||
e2b4e2bc | 742 | static int get_data_block_bmap(struct inode *inode, sector_t iblock, |
ccfb3000 JK |
743 | struct buffer_head *bh_result, int create) |
744 | { | |
e2b4e2bc CY |
745 | return __get_data_block(inode, iblock, bh_result, create, |
746 | F2FS_GET_BLOCK_BMAP); | |
ccfb3000 JK |
747 | } |
748 | ||
7f63eb77 JK |
749 | static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) |
750 | { | |
751 | return (offset >> inode->i_blkbits); | |
752 | } | |
753 | ||
754 | static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) | |
755 | { | |
756 | return (blk << inode->i_blkbits); | |
757 | } | |
758 | ||
9ab70134 JK |
759 | int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
760 | u64 start, u64 len) | |
761 | { | |
7f63eb77 JK |
762 | struct buffer_head map_bh; |
763 | sector_t start_blk, last_blk; | |
764 | loff_t isize = i_size_read(inode); | |
765 | u64 logical = 0, phys = 0, size = 0; | |
766 | u32 flags = 0; | |
767 | bool past_eof = false, whole_file = false; | |
768 | int ret = 0; | |
769 | ||
770 | ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); | |
771 | if (ret) | |
772 | return ret; | |
773 | ||
774 | mutex_lock(&inode->i_mutex); | |
775 | ||
776 | if (len >= isize) { | |
777 | whole_file = true; | |
778 | len = isize; | |
779 | } | |
780 | ||
781 | if (logical_to_blk(inode, len) == 0) | |
782 | len = blk_to_logical(inode, 1); | |
783 | ||
784 | start_blk = logical_to_blk(inode, start); | |
785 | last_blk = logical_to_blk(inode, start + len - 1); | |
786 | next: | |
787 | memset(&map_bh, 0, sizeof(struct buffer_head)); | |
788 | map_bh.b_size = len; | |
789 | ||
e2b4e2bc CY |
790 | ret = get_data_block(inode, start_blk, &map_bh, 0, |
791 | F2FS_GET_BLOCK_FIEMAP); | |
7f63eb77 JK |
792 | if (ret) |
793 | goto out; | |
794 | ||
795 | /* HOLE */ | |
796 | if (!buffer_mapped(&map_bh)) { | |
797 | start_blk++; | |
798 | ||
799 | if (!past_eof && blk_to_logical(inode, start_blk) >= isize) | |
800 | past_eof = 1; | |
801 | ||
802 | if (past_eof && size) { | |
803 | flags |= FIEMAP_EXTENT_LAST; | |
804 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
805 | phys, size, flags); | |
806 | } else if (size) { | |
807 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
808 | phys, size, flags); | |
809 | size = 0; | |
810 | } | |
811 | ||
812 | /* if we have holes up to/past EOF then we're done */ | |
813 | if (start_blk > last_blk || past_eof || ret) | |
814 | goto out; | |
815 | } else { | |
816 | if (start_blk > last_blk && !whole_file) { | |
817 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
818 | phys, size, flags); | |
819 | goto out; | |
820 | } | |
821 | ||
822 | /* | |
823 | * if size != 0 then we know we already have an extent | |
824 | * to add, so add it. | |
825 | */ | |
826 | if (size) { | |
827 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
828 | phys, size, flags); | |
829 | if (ret) | |
830 | goto out; | |
831 | } | |
832 | ||
833 | logical = blk_to_logical(inode, start_blk); | |
834 | phys = blk_to_logical(inode, map_bh.b_blocknr); | |
835 | size = map_bh.b_size; | |
836 | flags = 0; | |
837 | if (buffer_unwritten(&map_bh)) | |
838 | flags = FIEMAP_EXTENT_UNWRITTEN; | |
839 | ||
840 | start_blk += logical_to_blk(inode, size); | |
841 | ||
842 | /* | |
843 | * If we are past the EOF, then we need to make sure as | |
844 | * soon as we find a hole that the last extent we found | |
845 | * is marked with FIEMAP_EXTENT_LAST | |
846 | */ | |
847 | if (!past_eof && logical + size >= isize) | |
848 | past_eof = true; | |
849 | } | |
850 | cond_resched(); | |
851 | if (fatal_signal_pending(current)) | |
852 | ret = -EINTR; | |
853 | else | |
854 | goto next; | |
855 | out: | |
856 | if (ret == 1) | |
857 | ret = 0; | |
858 | ||
859 | mutex_unlock(&inode->i_mutex); | |
860 | return ret; | |
9ab70134 JK |
861 | } |
862 | ||
f1e88660 JK |
863 | /* |
864 | * This function was originally taken from fs/mpage.c, and customized for f2fs. | |
865 | * Major change was from block_size == page_size in f2fs by default. | |
866 | */ | |
867 | static int f2fs_mpage_readpages(struct address_space *mapping, | |
868 | struct list_head *pages, struct page *page, | |
869 | unsigned nr_pages) | |
870 | { | |
871 | struct bio *bio = NULL; | |
872 | unsigned page_idx; | |
873 | sector_t last_block_in_bio = 0; | |
874 | struct inode *inode = mapping->host; | |
875 | const unsigned blkbits = inode->i_blkbits; | |
876 | const unsigned blocksize = 1 << blkbits; | |
877 | sector_t block_in_file; | |
878 | sector_t last_block; | |
879 | sector_t last_block_in_file; | |
880 | sector_t block_nr; | |
881 | struct block_device *bdev = inode->i_sb->s_bdev; | |
882 | struct f2fs_map_blocks map; | |
883 | ||
884 | map.m_pblk = 0; | |
885 | map.m_lblk = 0; | |
886 | map.m_len = 0; | |
887 | map.m_flags = 0; | |
888 | ||
889 | for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { | |
890 | ||
891 | prefetchw(&page->flags); | |
892 | if (pages) { | |
893 | page = list_entry(pages->prev, struct page, lru); | |
894 | list_del(&page->lru); | |
895 | if (add_to_page_cache_lru(page, mapping, | |
896 | page->index, GFP_KERNEL)) | |
897 | goto next_page; | |
898 | } | |
899 | ||
900 | block_in_file = (sector_t)page->index; | |
901 | last_block = block_in_file + nr_pages; | |
902 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> | |
903 | blkbits; | |
904 | if (last_block > last_block_in_file) | |
905 | last_block = last_block_in_file; | |
906 | ||
907 | /* | |
908 | * Map blocks using the previous result first. | |
909 | */ | |
910 | if ((map.m_flags & F2FS_MAP_MAPPED) && | |
911 | block_in_file > map.m_lblk && | |
912 | block_in_file < (map.m_lblk + map.m_len)) | |
913 | goto got_it; | |
914 | ||
915 | /* | |
916 | * Then do more f2fs_map_blocks() calls until we are | |
917 | * done with this page. | |
918 | */ | |
919 | map.m_flags = 0; | |
920 | ||
921 | if (block_in_file < last_block) { | |
922 | map.m_lblk = block_in_file; | |
923 | map.m_len = last_block - block_in_file; | |
924 | ||
46c9e141 CY |
925 | if (f2fs_map_blocks(inode, &map, 0, |
926 | F2FS_GET_BLOCK_READ)) | |
f1e88660 JK |
927 | goto set_error_page; |
928 | } | |
929 | got_it: | |
930 | if ((map.m_flags & F2FS_MAP_MAPPED)) { | |
931 | block_nr = map.m_pblk + block_in_file - map.m_lblk; | |
932 | SetPageMappedToDisk(page); | |
933 | ||
934 | if (!PageUptodate(page) && !cleancache_get_page(page)) { | |
935 | SetPageUptodate(page); | |
936 | goto confused; | |
937 | } | |
938 | } else { | |
939 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
940 | SetPageUptodate(page); | |
941 | unlock_page(page); | |
942 | goto next_page; | |
943 | } | |
944 | ||
945 | /* | |
946 | * This page will go to BIO. Do we need to send this | |
947 | * BIO off first? | |
948 | */ | |
949 | if (bio && (last_block_in_bio != block_nr - 1)) { | |
950 | submit_and_realloc: | |
951 | submit_bio(READ, bio); | |
952 | bio = NULL; | |
953 | } | |
954 | if (bio == NULL) { | |
4375a336 JK |
955 | struct f2fs_crypto_ctx *ctx = NULL; |
956 | ||
957 | if (f2fs_encrypted_inode(inode) && | |
958 | S_ISREG(inode->i_mode)) { | |
4375a336 JK |
959 | |
960 | ctx = f2fs_get_crypto_ctx(inode); | |
961 | if (IS_ERR(ctx)) | |
962 | goto set_error_page; | |
963 | ||
964 | /* wait the page to be moved by cleaning */ | |
08b39fbd CY |
965 | f2fs_wait_on_encrypted_page_writeback( |
966 | F2FS_I_SB(inode), block_nr); | |
4375a336 JK |
967 | } |
968 | ||
f1e88660 | 969 | bio = bio_alloc(GFP_KERNEL, |
b54ffb73 | 970 | min_t(int, nr_pages, BIO_MAX_PAGES)); |
4375a336 JK |
971 | if (!bio) { |
972 | if (ctx) | |
973 | f2fs_release_crypto_ctx(ctx); | |
f1e88660 | 974 | goto set_error_page; |
4375a336 | 975 | } |
f1e88660 JK |
976 | bio->bi_bdev = bdev; |
977 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); | |
12377024 | 978 | bio->bi_end_io = f2fs_read_end_io; |
4375a336 | 979 | bio->bi_private = ctx; |
f1e88660 JK |
980 | } |
981 | ||
982 | if (bio_add_page(bio, page, blocksize, 0) < blocksize) | |
983 | goto submit_and_realloc; | |
984 | ||
985 | last_block_in_bio = block_nr; | |
986 | goto next_page; | |
987 | set_error_page: | |
988 | SetPageError(page); | |
989 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
990 | unlock_page(page); | |
991 | goto next_page; | |
992 | confused: | |
993 | if (bio) { | |
994 | submit_bio(READ, bio); | |
995 | bio = NULL; | |
996 | } | |
997 | unlock_page(page); | |
998 | next_page: | |
999 | if (pages) | |
1000 | page_cache_release(page); | |
1001 | } | |
1002 | BUG_ON(pages && !list_empty(pages)); | |
1003 | if (bio) | |
1004 | submit_bio(READ, bio); | |
1005 | return 0; | |
1006 | } | |
1007 | ||
eb47b800 JK |
1008 | static int f2fs_read_data_page(struct file *file, struct page *page) |
1009 | { | |
9ffe0fb5 | 1010 | struct inode *inode = page->mapping->host; |
b3d208f9 | 1011 | int ret = -EAGAIN; |
9ffe0fb5 | 1012 | |
c20e89cd CY |
1013 | trace_f2fs_readpage(page, DATA); |
1014 | ||
e1c42045 | 1015 | /* If the file has inline data, try to read it directly */ |
9ffe0fb5 HL |
1016 | if (f2fs_has_inline_data(inode)) |
1017 | ret = f2fs_read_inline_data(inode, page); | |
b3d208f9 | 1018 | if (ret == -EAGAIN) |
f1e88660 | 1019 | ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1); |
9ffe0fb5 | 1020 | return ret; |
eb47b800 JK |
1021 | } |
1022 | ||
1023 | static int f2fs_read_data_pages(struct file *file, | |
1024 | struct address_space *mapping, | |
1025 | struct list_head *pages, unsigned nr_pages) | |
1026 | { | |
9ffe0fb5 | 1027 | struct inode *inode = file->f_mapping->host; |
b8c29400 CY |
1028 | struct page *page = list_entry(pages->prev, struct page, lru); |
1029 | ||
1030 | trace_f2fs_readpages(inode, page, nr_pages); | |
9ffe0fb5 HL |
1031 | |
1032 | /* If the file has inline data, skip readpages */ | |
1033 | if (f2fs_has_inline_data(inode)) | |
1034 | return 0; | |
1035 | ||
f1e88660 | 1036 | return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages); |
eb47b800 JK |
1037 | } |
1038 | ||
05ca3632 | 1039 | int do_write_data_page(struct f2fs_io_info *fio) |
eb47b800 | 1040 | { |
05ca3632 | 1041 | struct page *page = fio->page; |
eb47b800 | 1042 | struct inode *inode = page->mapping->host; |
eb47b800 JK |
1043 | struct dnode_of_data dn; |
1044 | int err = 0; | |
1045 | ||
1046 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
266e97a8 | 1047 | err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); |
eb47b800 JK |
1048 | if (err) |
1049 | return err; | |
1050 | ||
cf04e8eb | 1051 | fio->blk_addr = dn.data_blkaddr; |
eb47b800 JK |
1052 | |
1053 | /* This page is already truncated */ | |
2bca1e23 JK |
1054 | if (fio->blk_addr == NULL_ADDR) { |
1055 | ClearPageUptodate(page); | |
eb47b800 | 1056 | goto out_writepage; |
2bca1e23 | 1057 | } |
eb47b800 | 1058 | |
4375a336 | 1059 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { |
08b39fbd CY |
1060 | |
1061 | /* wait for GCed encrypted page writeback */ | |
1062 | f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode), | |
1063 | fio->blk_addr); | |
1064 | ||
4375a336 JK |
1065 | fio->encrypted_page = f2fs_encrypt(inode, fio->page); |
1066 | if (IS_ERR(fio->encrypted_page)) { | |
1067 | err = PTR_ERR(fio->encrypted_page); | |
1068 | goto out_writepage; | |
1069 | } | |
1070 | } | |
1071 | ||
eb47b800 JK |
1072 | set_page_writeback(page); |
1073 | ||
1074 | /* | |
1075 | * If current allocation needs SSR, | |
1076 | * it had better in-place writes for updated data. | |
1077 | */ | |
cf04e8eb | 1078 | if (unlikely(fio->blk_addr != NEW_ADDR && |
b25958b6 HL |
1079 | !is_cold_data(page) && |
1080 | need_inplace_update(inode))) { | |
05ca3632 | 1081 | rewrite_data_page(fio); |
fff04f90 | 1082 | set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); |
8ce67cb0 | 1083 | trace_f2fs_do_write_data_page(page, IPU); |
eb47b800 | 1084 | } else { |
05ca3632 | 1085 | write_data_page(&dn, fio); |
216a620a | 1086 | set_data_blkaddr(&dn); |
7e4dde79 | 1087 | f2fs_update_extent_cache(&dn); |
8ce67cb0 | 1088 | trace_f2fs_do_write_data_page(page, OPU); |
fff04f90 | 1089 | set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); |
3c6c2beb JK |
1090 | if (page->index == 0) |
1091 | set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); | |
eb47b800 JK |
1092 | } |
1093 | out_writepage: | |
1094 | f2fs_put_dnode(&dn); | |
1095 | return err; | |
1096 | } | |
1097 | ||
1098 | static int f2fs_write_data_page(struct page *page, | |
1099 | struct writeback_control *wbc) | |
1100 | { | |
1101 | struct inode *inode = page->mapping->host; | |
4081363f | 1102 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
eb47b800 JK |
1103 | loff_t i_size = i_size_read(inode); |
1104 | const pgoff_t end_index = ((unsigned long long) i_size) | |
1105 | >> PAGE_CACHE_SHIFT; | |
9ffe0fb5 | 1106 | unsigned offset = 0; |
39936837 | 1107 | bool need_balance_fs = false; |
eb47b800 | 1108 | int err = 0; |
458e6197 | 1109 | struct f2fs_io_info fio = { |
05ca3632 | 1110 | .sbi = sbi, |
458e6197 | 1111 | .type = DATA, |
6c311ec6 | 1112 | .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, |
05ca3632 | 1113 | .page = page, |
4375a336 | 1114 | .encrypted_page = NULL, |
458e6197 | 1115 | }; |
eb47b800 | 1116 | |
ecda0de3 CY |
1117 | trace_f2fs_writepage(page, DATA); |
1118 | ||
eb47b800 | 1119 | if (page->index < end_index) |
39936837 | 1120 | goto write; |
eb47b800 JK |
1121 | |
1122 | /* | |
1123 | * If the offset is out-of-range of file size, | |
1124 | * this page does not have to be written to disk. | |
1125 | */ | |
1126 | offset = i_size & (PAGE_CACHE_SIZE - 1); | |
76f60268 | 1127 | if ((page->index >= end_index + 1) || !offset) |
39936837 | 1128 | goto out; |
eb47b800 JK |
1129 | |
1130 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | |
39936837 | 1131 | write: |
caf0047e | 1132 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
eb47b800 | 1133 | goto redirty_out; |
1e84371f JK |
1134 | if (f2fs_is_drop_cache(inode)) |
1135 | goto out; | |
1136 | if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && | |
1137 | available_free_memory(sbi, BASE_CHECK)) | |
1138 | goto redirty_out; | |
eb47b800 | 1139 | |
39936837 | 1140 | /* Dentry blocks are controlled by checkpoint */ |
eb47b800 | 1141 | if (S_ISDIR(inode->i_mode)) { |
cf779cab JK |
1142 | if (unlikely(f2fs_cp_error(sbi))) |
1143 | goto redirty_out; | |
05ca3632 | 1144 | err = do_write_data_page(&fio); |
8618b881 JK |
1145 | goto done; |
1146 | } | |
9ffe0fb5 | 1147 | |
cf779cab JK |
1148 | /* we should bypass data pages to proceed the kworkder jobs */ |
1149 | if (unlikely(f2fs_cp_error(sbi))) { | |
1150 | SetPageError(page); | |
a7ffdbe2 | 1151 | goto out; |
cf779cab JK |
1152 | } |
1153 | ||
8618b881 | 1154 | if (!wbc->for_reclaim) |
39936837 | 1155 | need_balance_fs = true; |
8618b881 | 1156 | else if (has_not_enough_free_secs(sbi, 0)) |
39936837 | 1157 | goto redirty_out; |
eb47b800 | 1158 | |
b3d208f9 | 1159 | err = -EAGAIN; |
8618b881 | 1160 | f2fs_lock_op(sbi); |
b3d208f9 JK |
1161 | if (f2fs_has_inline_data(inode)) |
1162 | err = f2fs_write_inline_data(inode, page); | |
1163 | if (err == -EAGAIN) | |
05ca3632 | 1164 | err = do_write_data_page(&fio); |
8618b881 JK |
1165 | f2fs_unlock_op(sbi); |
1166 | done: | |
1167 | if (err && err != -ENOENT) | |
1168 | goto redirty_out; | |
eb47b800 | 1169 | |
eb47b800 | 1170 | clear_cold_data(page); |
39936837 | 1171 | out: |
a7ffdbe2 | 1172 | inode_dec_dirty_pages(inode); |
2bca1e23 JK |
1173 | if (err) |
1174 | ClearPageUptodate(page); | |
eb47b800 | 1175 | unlock_page(page); |
39936837 | 1176 | if (need_balance_fs) |
eb47b800 | 1177 | f2fs_balance_fs(sbi); |
2aea39ec JK |
1178 | if (wbc->for_reclaim) |
1179 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | |
eb47b800 JK |
1180 | return 0; |
1181 | ||
eb47b800 | 1182 | redirty_out: |
76f60268 | 1183 | redirty_page_for_writepage(wbc, page); |
8618b881 | 1184 | return AOP_WRITEPAGE_ACTIVATE; |
eb47b800 JK |
1185 | } |
1186 | ||
fa9150a8 NJ |
1187 | static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, |
1188 | void *data) | |
1189 | { | |
1190 | struct address_space *mapping = data; | |
1191 | int ret = mapping->a_ops->writepage(page, wbc); | |
1192 | mapping_set_error(mapping, ret); | |
1193 | return ret; | |
1194 | } | |
1195 | ||
8f46dcae CY |
1196 | /* |
1197 | * This function was copied from write_cche_pages from mm/page-writeback.c. | |
1198 | * The major change is making write step of cold data page separately from | |
1199 | * warm/hot data page. | |
1200 | */ | |
1201 | static int f2fs_write_cache_pages(struct address_space *mapping, | |
1202 | struct writeback_control *wbc, writepage_t writepage, | |
1203 | void *data) | |
1204 | { | |
1205 | int ret = 0; | |
1206 | int done = 0; | |
1207 | struct pagevec pvec; | |
1208 | int nr_pages; | |
1209 | pgoff_t uninitialized_var(writeback_index); | |
1210 | pgoff_t index; | |
1211 | pgoff_t end; /* Inclusive */ | |
1212 | pgoff_t done_index; | |
1213 | int cycled; | |
1214 | int range_whole = 0; | |
1215 | int tag; | |
1216 | int step = 0; | |
1217 | ||
1218 | pagevec_init(&pvec, 0); | |
1219 | next: | |
1220 | if (wbc->range_cyclic) { | |
1221 | writeback_index = mapping->writeback_index; /* prev offset */ | |
1222 | index = writeback_index; | |
1223 | if (index == 0) | |
1224 | cycled = 1; | |
1225 | else | |
1226 | cycled = 0; | |
1227 | end = -1; | |
1228 | } else { | |
1229 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | |
1230 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | |
1231 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | |
1232 | range_whole = 1; | |
1233 | cycled = 1; /* ignore range_cyclic tests */ | |
1234 | } | |
1235 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) | |
1236 | tag = PAGECACHE_TAG_TOWRITE; | |
1237 | else | |
1238 | tag = PAGECACHE_TAG_DIRTY; | |
1239 | retry: | |
1240 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) | |
1241 | tag_pages_for_writeback(mapping, index, end); | |
1242 | done_index = index; | |
1243 | while (!done && (index <= end)) { | |
1244 | int i; | |
1245 | ||
1246 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | |
1247 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1); | |
1248 | if (nr_pages == 0) | |
1249 | break; | |
1250 | ||
1251 | for (i = 0; i < nr_pages; i++) { | |
1252 | struct page *page = pvec.pages[i]; | |
1253 | ||
1254 | if (page->index > end) { | |
1255 | done = 1; | |
1256 | break; | |
1257 | } | |
1258 | ||
1259 | done_index = page->index; | |
1260 | ||
1261 | lock_page(page); | |
1262 | ||
1263 | if (unlikely(page->mapping != mapping)) { | |
1264 | continue_unlock: | |
1265 | unlock_page(page); | |
1266 | continue; | |
1267 | } | |
1268 | ||
1269 | if (!PageDirty(page)) { | |
1270 | /* someone wrote it for us */ | |
1271 | goto continue_unlock; | |
1272 | } | |
1273 | ||
737f1899 | 1274 | if (step == is_cold_data(page)) |
8f46dcae CY |
1275 | goto continue_unlock; |
1276 | ||
1277 | if (PageWriteback(page)) { | |
1278 | if (wbc->sync_mode != WB_SYNC_NONE) | |
1279 | f2fs_wait_on_page_writeback(page, DATA); | |
1280 | else | |
1281 | goto continue_unlock; | |
1282 | } | |
1283 | ||
1284 | BUG_ON(PageWriteback(page)); | |
1285 | if (!clear_page_dirty_for_io(page)) | |
1286 | goto continue_unlock; | |
1287 | ||
1288 | ret = (*writepage)(page, wbc, data); | |
1289 | if (unlikely(ret)) { | |
1290 | if (ret == AOP_WRITEPAGE_ACTIVATE) { | |
1291 | unlock_page(page); | |
1292 | ret = 0; | |
1293 | } else { | |
1294 | done_index = page->index + 1; | |
1295 | done = 1; | |
1296 | break; | |
1297 | } | |
1298 | } | |
1299 | ||
1300 | if (--wbc->nr_to_write <= 0 && | |
1301 | wbc->sync_mode == WB_SYNC_NONE) { | |
1302 | done = 1; | |
1303 | break; | |
1304 | } | |
1305 | } | |
1306 | pagevec_release(&pvec); | |
1307 | cond_resched(); | |
1308 | } | |
1309 | ||
1310 | if (step < 1) { | |
1311 | step++; | |
1312 | goto next; | |
1313 | } | |
1314 | ||
1315 | if (!cycled && !done) { | |
1316 | cycled = 1; | |
1317 | index = 0; | |
1318 | end = writeback_index - 1; | |
1319 | goto retry; | |
1320 | } | |
1321 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | |
1322 | mapping->writeback_index = done_index; | |
1323 | ||
1324 | return ret; | |
1325 | } | |
1326 | ||
25ca923b | 1327 | static int f2fs_write_data_pages(struct address_space *mapping, |
eb47b800 JK |
1328 | struct writeback_control *wbc) |
1329 | { | |
1330 | struct inode *inode = mapping->host; | |
4081363f | 1331 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
5463e7c1 | 1332 | bool locked = false; |
eb47b800 | 1333 | int ret; |
50c8cdb3 | 1334 | long diff; |
eb47b800 | 1335 | |
e5748434 CY |
1336 | trace_f2fs_writepages(mapping->host, wbc, DATA); |
1337 | ||
cfb185a1 | 1338 | /* deal with chardevs and other special file */ |
1339 | if (!mapping->a_ops->writepage) | |
1340 | return 0; | |
1341 | ||
6a290544 CY |
1342 | /* skip writing if there is no dirty page in this inode */ |
1343 | if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) | |
1344 | return 0; | |
1345 | ||
a1257023 JK |
1346 | if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && |
1347 | get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && | |
1348 | available_free_memory(sbi, DIRTY_DENTS)) | |
1349 | goto skip_write; | |
1350 | ||
d5669f7b JK |
1351 | /* during POR, we don't need to trigger writepage at all. */ |
1352 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) | |
1353 | goto skip_write; | |
1354 | ||
50c8cdb3 | 1355 | diff = nr_pages_to_write(sbi, DATA, wbc); |
eb47b800 | 1356 | |
5463e7c1 JK |
1357 | if (!S_ISDIR(inode->i_mode)) { |
1358 | mutex_lock(&sbi->writepages); | |
1359 | locked = true; | |
1360 | } | |
8f46dcae | 1361 | ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); |
bb96a8d5 | 1362 | f2fs_submit_merged_bio(sbi, DATA, WRITE); |
5463e7c1 JK |
1363 | if (locked) |
1364 | mutex_unlock(&sbi->writepages); | |
458e6197 | 1365 | |
eb47b800 JK |
1366 | remove_dirty_dir_inode(inode); |
1367 | ||
50c8cdb3 | 1368 | wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); |
eb47b800 | 1369 | return ret; |
d3baf95d JK |
1370 | |
1371 | skip_write: | |
a7ffdbe2 | 1372 | wbc->pages_skipped += get_dirty_pages(inode); |
d3baf95d | 1373 | return 0; |
eb47b800 JK |
1374 | } |
1375 | ||
3aab8f82 CY |
1376 | static void f2fs_write_failed(struct address_space *mapping, loff_t to) |
1377 | { | |
1378 | struct inode *inode = mapping->host; | |
1379 | ||
1380 | if (to > inode->i_size) { | |
1381 | truncate_pagecache(inode, inode->i_size); | |
764aa3e9 | 1382 | truncate_blocks(inode, inode->i_size, true); |
3aab8f82 CY |
1383 | } |
1384 | } | |
1385 | ||
eb47b800 JK |
1386 | static int f2fs_write_begin(struct file *file, struct address_space *mapping, |
1387 | loff_t pos, unsigned len, unsigned flags, | |
1388 | struct page **pagep, void **fsdata) | |
1389 | { | |
1390 | struct inode *inode = mapping->host; | |
4081363f | 1391 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
86531d6b JK |
1392 | struct page *page = NULL; |
1393 | struct page *ipage; | |
eb47b800 JK |
1394 | pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; |
1395 | struct dnode_of_data dn; | |
1396 | int err = 0; | |
1397 | ||
62aed044 CY |
1398 | trace_f2fs_write_begin(inode, pos, len, flags); |
1399 | ||
eb47b800 | 1400 | f2fs_balance_fs(sbi); |
5f727395 JK |
1401 | |
1402 | /* | |
1403 | * We should check this at this moment to avoid deadlock on inode page | |
1404 | * and #0 page. The locking rule for inline_data conversion should be: | |
1405 | * lock_page(page #0) -> lock_page(inode_page) | |
1406 | */ | |
1407 | if (index != 0) { | |
1408 | err = f2fs_convert_inline_inode(inode); | |
1409 | if (err) | |
1410 | goto fail; | |
1411 | } | |
afcb7ca0 | 1412 | repeat: |
eb47b800 | 1413 | page = grab_cache_page_write_begin(mapping, index, flags); |
3aab8f82 CY |
1414 | if (!page) { |
1415 | err = -ENOMEM; | |
1416 | goto fail; | |
1417 | } | |
d5f66990 | 1418 | |
eb47b800 JK |
1419 | *pagep = page; |
1420 | ||
e479556b | 1421 | f2fs_lock_op(sbi); |
9ba69cf9 JK |
1422 | |
1423 | /* check inline_data */ | |
1424 | ipage = get_node_page(sbi, inode->i_ino); | |
cd34e296 CY |
1425 | if (IS_ERR(ipage)) { |
1426 | err = PTR_ERR(ipage); | |
9ba69cf9 | 1427 | goto unlock_fail; |
cd34e296 | 1428 | } |
9ba69cf9 | 1429 | |
b3d208f9 JK |
1430 | set_new_dnode(&dn, inode, ipage, ipage, 0); |
1431 | ||
9ba69cf9 | 1432 | if (f2fs_has_inline_data(inode)) { |
b3d208f9 JK |
1433 | if (pos + len <= MAX_INLINE_DATA) { |
1434 | read_inline_data(page, ipage); | |
1435 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); | |
1436 | sync_inode_page(&dn); | |
1437 | goto put_next; | |
b3d208f9 | 1438 | } |
5f727395 JK |
1439 | err = f2fs_convert_inline_page(&dn, page); |
1440 | if (err) | |
1441 | goto put_fail; | |
b600965c | 1442 | } |
759af1c9 FL |
1443 | |
1444 | err = f2fs_get_block(&dn, index); | |
9ba69cf9 | 1445 | if (err) |
8cdcb713 | 1446 | goto put_fail; |
b3d208f9 | 1447 | put_next: |
9ba69cf9 JK |
1448 | f2fs_put_dnode(&dn); |
1449 | f2fs_unlock_op(sbi); | |
1450 | ||
b3d208f9 JK |
1451 | f2fs_wait_on_page_writeback(page, DATA); |
1452 | ||
08b39fbd CY |
1453 | /* wait for GCed encrypted page writeback */ |
1454 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) | |
1455 | f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr); | |
1456 | ||
90d4388a CY |
1457 | if (len == PAGE_CACHE_SIZE) |
1458 | goto out_update; | |
1459 | if (PageUptodate(page)) | |
1460 | goto out_clear; | |
eb47b800 JK |
1461 | |
1462 | if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { | |
1463 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); | |
1464 | unsigned end = start + len; | |
1465 | ||
1466 | /* Reading beyond i_size is simple: memset to zero */ | |
1467 | zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); | |
90d4388a | 1468 | goto out_update; |
eb47b800 JK |
1469 | } |
1470 | ||
b3d208f9 | 1471 | if (dn.data_blkaddr == NEW_ADDR) { |
eb47b800 JK |
1472 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
1473 | } else { | |
cf04e8eb | 1474 | struct f2fs_io_info fio = { |
05ca3632 | 1475 | .sbi = sbi, |
cf04e8eb JK |
1476 | .type = DATA, |
1477 | .rw = READ_SYNC, | |
1478 | .blk_addr = dn.data_blkaddr, | |
05ca3632 | 1479 | .page = page, |
4375a336 | 1480 | .encrypted_page = NULL, |
cf04e8eb | 1481 | }; |
05ca3632 | 1482 | err = f2fs_submit_page_bio(&fio); |
9234f319 JK |
1483 | if (err) |
1484 | goto fail; | |
d54c795b | 1485 | |
393ff91f | 1486 | lock_page(page); |
6bacf52f | 1487 | if (unlikely(!PageUptodate(page))) { |
3aab8f82 CY |
1488 | err = -EIO; |
1489 | goto fail; | |
eb47b800 | 1490 | } |
6bacf52f | 1491 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
1492 | f2fs_put_page(page, 1); |
1493 | goto repeat; | |
eb47b800 | 1494 | } |
4375a336 JK |
1495 | |
1496 | /* avoid symlink page */ | |
1497 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { | |
1498 | err = f2fs_decrypt_one(inode, page); | |
86531d6b | 1499 | if (err) |
4375a336 | 1500 | goto fail; |
4375a336 | 1501 | } |
eb47b800 | 1502 | } |
90d4388a | 1503 | out_update: |
eb47b800 | 1504 | SetPageUptodate(page); |
90d4388a | 1505 | out_clear: |
eb47b800 JK |
1506 | clear_cold_data(page); |
1507 | return 0; | |
9ba69cf9 | 1508 | |
8cdcb713 JK |
1509 | put_fail: |
1510 | f2fs_put_dnode(&dn); | |
9ba69cf9 JK |
1511 | unlock_fail: |
1512 | f2fs_unlock_op(sbi); | |
3aab8f82 | 1513 | fail: |
86531d6b | 1514 | f2fs_put_page(page, 1); |
3aab8f82 CY |
1515 | f2fs_write_failed(mapping, pos + len); |
1516 | return err; | |
eb47b800 JK |
1517 | } |
1518 | ||
a1dd3c13 JK |
1519 | static int f2fs_write_end(struct file *file, |
1520 | struct address_space *mapping, | |
1521 | loff_t pos, unsigned len, unsigned copied, | |
1522 | struct page *page, void *fsdata) | |
1523 | { | |
1524 | struct inode *inode = page->mapping->host; | |
1525 | ||
dfb2bf38 CY |
1526 | trace_f2fs_write_end(inode, pos, len, copied); |
1527 | ||
34ba94ba | 1528 | set_page_dirty(page); |
a1dd3c13 JK |
1529 | |
1530 | if (pos + copied > i_size_read(inode)) { | |
1531 | i_size_write(inode, pos + copied); | |
1532 | mark_inode_dirty(inode); | |
1533 | update_inode_page(inode); | |
1534 | } | |
1535 | ||
75c3c8bc | 1536 | f2fs_put_page(page, 1); |
a1dd3c13 JK |
1537 | return copied; |
1538 | } | |
1539 | ||
6f673763 OS |
1540 | static int check_direct_IO(struct inode *inode, struct iov_iter *iter, |
1541 | loff_t offset) | |
944fcfc1 JK |
1542 | { |
1543 | unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; | |
944fcfc1 | 1544 | |
944fcfc1 JK |
1545 | if (offset & blocksize_mask) |
1546 | return -EINVAL; | |
1547 | ||
5b46f25d AV |
1548 | if (iov_iter_alignment(iter) & blocksize_mask) |
1549 | return -EINVAL; | |
1550 | ||
944fcfc1 JK |
1551 | return 0; |
1552 | } | |
1553 | ||
22c6186e OS |
1554 | static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
1555 | loff_t offset) | |
eb47b800 JK |
1556 | { |
1557 | struct file *file = iocb->ki_filp; | |
3aab8f82 CY |
1558 | struct address_space *mapping = file->f_mapping; |
1559 | struct inode *inode = mapping->host; | |
1560 | size_t count = iov_iter_count(iter); | |
1561 | int err; | |
944fcfc1 | 1562 | |
b3d208f9 JK |
1563 | /* we don't need to use inline_data strictly */ |
1564 | if (f2fs_has_inline_data(inode)) { | |
1565 | err = f2fs_convert_inline_inode(inode); | |
1566 | if (err) | |
1567 | return err; | |
1568 | } | |
9ffe0fb5 | 1569 | |
fcc85a4d JK |
1570 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
1571 | return 0; | |
1572 | ||
c15e8599 CY |
1573 | err = check_direct_IO(inode, iter, offset); |
1574 | if (err) | |
1575 | return err; | |
944fcfc1 | 1576 | |
6f673763 | 1577 | trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); |
70407fad | 1578 | |
f9811703 | 1579 | if (iov_iter_rw(iter) == WRITE) { |
59b802e5 | 1580 | __allocate_data_blocks(inode, offset, count); |
f9811703 CY |
1581 | if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { |
1582 | err = -EIO; | |
1583 | goto out; | |
1584 | } | |
1585 | } | |
59b802e5 | 1586 | |
e2b4e2bc | 1587 | err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); |
f9811703 | 1588 | out: |
6f673763 | 1589 | if (err < 0 && iov_iter_rw(iter) == WRITE) |
3aab8f82 | 1590 | f2fs_write_failed(mapping, offset + count); |
70407fad | 1591 | |
6f673763 | 1592 | trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); |
70407fad | 1593 | |
3aab8f82 | 1594 | return err; |
eb47b800 JK |
1595 | } |
1596 | ||
487261f3 CY |
1597 | void f2fs_invalidate_page(struct page *page, unsigned int offset, |
1598 | unsigned int length) | |
eb47b800 JK |
1599 | { |
1600 | struct inode *inode = page->mapping->host; | |
487261f3 | 1601 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
a7ffdbe2 | 1602 | |
487261f3 CY |
1603 | if (inode->i_ino >= F2FS_ROOT_INO(sbi) && |
1604 | (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) | |
a7ffdbe2 JK |
1605 | return; |
1606 | ||
487261f3 CY |
1607 | if (PageDirty(page)) { |
1608 | if (inode->i_ino == F2FS_META_INO(sbi)) | |
1609 | dec_page_count(sbi, F2FS_DIRTY_META); | |
1610 | else if (inode->i_ino == F2FS_NODE_INO(sbi)) | |
1611 | dec_page_count(sbi, F2FS_DIRTY_NODES); | |
1612 | else | |
1613 | inode_dec_dirty_pages(inode); | |
1614 | } | |
decd36b6 CY |
1615 | |
1616 | /* This is atomic written page, keep Private */ | |
1617 | if (IS_ATOMIC_WRITTEN_PAGE(page)) | |
1618 | return; | |
1619 | ||
eb47b800 JK |
1620 | ClearPagePrivate(page); |
1621 | } | |
1622 | ||
487261f3 | 1623 | int f2fs_release_page(struct page *page, gfp_t wait) |
eb47b800 | 1624 | { |
f68daeeb JK |
1625 | /* If this is dirty page, keep PagePrivate */ |
1626 | if (PageDirty(page)) | |
1627 | return 0; | |
1628 | ||
decd36b6 CY |
1629 | /* This is atomic written page, keep Private */ |
1630 | if (IS_ATOMIC_WRITTEN_PAGE(page)) | |
1631 | return 0; | |
1632 | ||
eb47b800 | 1633 | ClearPagePrivate(page); |
c3850aa1 | 1634 | return 1; |
eb47b800 JK |
1635 | } |
1636 | ||
1637 | static int f2fs_set_data_page_dirty(struct page *page) | |
1638 | { | |
1639 | struct address_space *mapping = page->mapping; | |
1640 | struct inode *inode = mapping->host; | |
1641 | ||
26c6b887 JK |
1642 | trace_f2fs_set_page_dirty(page, DATA); |
1643 | ||
eb47b800 | 1644 | SetPageUptodate(page); |
34ba94ba | 1645 | |
1e84371f | 1646 | if (f2fs_is_atomic_file(inode)) { |
decd36b6 CY |
1647 | if (!IS_ATOMIC_WRITTEN_PAGE(page)) { |
1648 | register_inmem_page(inode, page); | |
1649 | return 1; | |
1650 | } | |
1651 | /* | |
1652 | * Previously, this page has been registered, we just | |
1653 | * return here. | |
1654 | */ | |
1655 | return 0; | |
34ba94ba JK |
1656 | } |
1657 | ||
eb47b800 JK |
1658 | if (!PageDirty(page)) { |
1659 | __set_page_dirty_nobuffers(page); | |
a7ffdbe2 | 1660 | update_dirty_page(inode, page); |
eb47b800 JK |
1661 | return 1; |
1662 | } | |
1663 | return 0; | |
1664 | } | |
1665 | ||
c01e54b7 JK |
1666 | static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) |
1667 | { | |
454ae7e5 CY |
1668 | struct inode *inode = mapping->host; |
1669 | ||
b3d208f9 JK |
1670 | /* we don't need to use inline_data strictly */ |
1671 | if (f2fs_has_inline_data(inode)) { | |
1672 | int err = f2fs_convert_inline_inode(inode); | |
1673 | if (err) | |
1674 | return err; | |
1675 | } | |
e2b4e2bc | 1676 | return generic_block_bmap(mapping, block, get_data_block_bmap); |
429511cd CY |
1677 | } |
1678 | ||
eb47b800 JK |
1679 | const struct address_space_operations f2fs_dblock_aops = { |
1680 | .readpage = f2fs_read_data_page, | |
1681 | .readpages = f2fs_read_data_pages, | |
1682 | .writepage = f2fs_write_data_page, | |
1683 | .writepages = f2fs_write_data_pages, | |
1684 | .write_begin = f2fs_write_begin, | |
a1dd3c13 | 1685 | .write_end = f2fs_write_end, |
eb47b800 | 1686 | .set_page_dirty = f2fs_set_data_page_dirty, |
487261f3 CY |
1687 | .invalidatepage = f2fs_invalidate_page, |
1688 | .releasepage = f2fs_release_page, | |
eb47b800 | 1689 | .direct_IO = f2fs_direct_IO, |
c01e54b7 | 1690 | .bmap = f2fs_bmap, |
eb47b800 | 1691 | }; |