]> Git Repo - linux.git/blame - fs/f2fs/data.c
f2fs: avoid latency-critical readahead of node pages
[linux.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
8f46dcae 17#include <linux/pagevec.h>
eb47b800
JK
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
e2e40f2c 21#include <linux/uio.h>
f1e88660 22#include <linux/cleancache.h>
eb47b800
JK
23
24#include "f2fs.h"
25#include "node.h"
26#include "segment.h"
db9f7c1a 27#include "trace.h"
848753aa 28#include <trace/events/f2fs.h>
eb47b800 29
4246a0b6 30static void f2fs_read_end_io(struct bio *bio)
93dfe2ac 31{
f568849e
LT
32 struct bio_vec *bvec;
33 int i;
93dfe2ac 34
4375a336 35 if (f2fs_bio_encrypted(bio)) {
4246a0b6 36 if (bio->bi_error) {
0b81d077 37 fscrypt_release_ctx(bio->bi_private);
4375a336 38 } else {
0b81d077 39 fscrypt_decrypt_bio_pages(bio->bi_private, bio);
4375a336
JK
40 return;
41 }
42 }
43
12377024
CY
44 bio_for_each_segment_all(bvec, bio, i) {
45 struct page *page = bvec->bv_page;
f1e88660 46
4246a0b6 47 if (!bio->bi_error) {
f1e88660
JK
48 SetPageUptodate(page);
49 } else {
50 ClearPageUptodate(page);
51 SetPageError(page);
52 }
53 unlock_page(page);
54 }
f1e88660
JK
55 bio_put(bio);
56}
57
4246a0b6 58static void f2fs_write_end_io(struct bio *bio)
93dfe2ac 59{
1b1f559f 60 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
61 struct bio_vec *bvec;
62 int i;
93dfe2ac 63
f568849e 64 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
65 struct page *page = bvec->bv_page;
66
0b81d077 67 fscrypt_pullback_bio_page(&page, true);
4375a336 68
4246a0b6 69 if (unlikely(bio->bi_error)) {
93dfe2ac 70 set_bit(AS_EIO, &page->mapping->flags);
38f91ca8 71 f2fs_stop_checkpoint(sbi, true);
93dfe2ac
JK
72 }
73 end_page_writeback(page);
f568849e 74 }
f5730184
JK
75 if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
76 wq_has_sleeper(&sbi->cp_wait))
93dfe2ac
JK
77 wake_up(&sbi->cp_wait);
78
79 bio_put(bio);
80}
81
940a6d34
GZ
82/*
83 * Low-level block read/write IO operations.
84 */
85static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
86 int npages, bool is_read)
87{
88 struct bio *bio;
89
740432f8 90 bio = f2fs_bio_alloc(npages);
940a6d34
GZ
91
92 bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb6 93 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34 94 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
12377024 95 bio->bi_private = is_read ? NULL : sbi;
940a6d34
GZ
96
97 return bio;
98}
99
f5730184 100static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
19a5f5e2 101 struct bio *bio, enum page_type type)
f5730184 102{
19a5f5e2 103 if (!is_read_io(rw)) {
f5730184 104 atomic_inc(&sbi->nr_wb_bios);
52763a4b
JK
105 if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
106 current->plug && (type == DATA || type == NODE))
19a5f5e2
JK
107 blk_finish_plug(current->plug);
108 }
f5730184
JK
109 submit_bio(rw, bio);
110}
111
458e6197 112static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 113{
458e6197 114 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
115
116 if (!io->bio)
117 return;
118
6a8f8ca5 119 if (is_read_io(fio->rw))
2ace38e0 120 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
6a8f8ca5 121 else
2ace38e0 122 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
940a6d34 123
19a5f5e2 124 __submit_bio(io->sbi, fio->rw, io->bio, fio->type);
93dfe2ac
JK
125 io->bio = NULL;
126}
127
0c3a5797
CY
128static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
129 struct page *page, nid_t ino)
0fd785eb 130{
0fd785eb
CY
131 struct bio_vec *bvec;
132 struct page *target;
133 int i;
134
0c3a5797 135 if (!io->bio)
0fd785eb 136 return false;
0c3a5797
CY
137
138 if (!inode && !page && !ino)
139 return true;
0fd785eb
CY
140
141 bio_for_each_segment_all(bvec, io->bio, i) {
142
0b81d077 143 if (bvec->bv_page->mapping)
0fd785eb 144 target = bvec->bv_page;
0b81d077
JK
145 else
146 target = fscrypt_control_page(bvec->bv_page);
0fd785eb 147
0c3a5797
CY
148 if (inode && inode == target->mapping->host)
149 return true;
150 if (page && page == target)
151 return true;
152 if (ino && ino == ino_of_node(target))
0fd785eb 153 return true;
0fd785eb
CY
154 }
155
0fd785eb
CY
156 return false;
157}
158
0c3a5797
CY
159static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
160 struct page *page, nid_t ino,
161 enum page_type type)
162{
163 enum page_type btype = PAGE_TYPE_OF_BIO(type);
164 struct f2fs_bio_info *io = &sbi->write_io[btype];
165 bool ret;
166
167 down_read(&io->io_rwsem);
168 ret = __has_merged_page(io, inode, page, ino);
169 up_read(&io->io_rwsem);
170 return ret;
171}
172
173static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
174 struct inode *inode, struct page *page,
175 nid_t ino, enum page_type type, int rw)
93dfe2ac
JK
176{
177 enum page_type btype = PAGE_TYPE_OF_BIO(type);
178 struct f2fs_bio_info *io;
179
180 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
181
df0f8dc0 182 down_write(&io->io_rwsem);
458e6197 183
0c3a5797
CY
184 if (!__has_merged_page(io, inode, page, ino))
185 goto out;
186
458e6197
JK
187 /* change META to META_FLUSH in the checkpoint procedure */
188 if (type >= META_FLUSH) {
189 io->fio.type = META_FLUSH;
0f7b2abd
JK
190 if (test_opt(sbi, NOBARRIER))
191 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
192 else
193 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
458e6197
JK
194 }
195 __submit_merged_bio(io);
0c3a5797 196out:
df0f8dc0 197 up_write(&io->io_rwsem);
93dfe2ac
JK
198}
199
0c3a5797
CY
200void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
201 int rw)
202{
203 __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
204}
205
206void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
207 struct inode *inode, struct page *page,
208 nid_t ino, enum page_type type, int rw)
209{
210 if (has_merged_page(sbi, inode, page, ino, type))
211 __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
212}
213
406657dd
CY
214void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
215{
216 f2fs_submit_merged_bio(sbi, DATA, WRITE);
217 f2fs_submit_merged_bio(sbi, NODE, WRITE);
218 f2fs_submit_merged_bio(sbi, META, WRITE);
219}
220
93dfe2ac
JK
221/*
222 * Fill the locked page with data located in the block address.
223 * Return unlocked page.
224 */
05ca3632 225int f2fs_submit_page_bio(struct f2fs_io_info *fio)
93dfe2ac 226{
93dfe2ac 227 struct bio *bio;
0b81d077
JK
228 struct page *page = fio->encrypted_page ?
229 fio->encrypted_page : fio->page;
93dfe2ac 230
2ace38e0 231 trace_f2fs_submit_page_bio(page, fio);
05ca3632 232 f2fs_trace_ios(fio, 0);
93dfe2ac
JK
233
234 /* Allocate a new bio */
7a9d7548 235 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
93dfe2ac 236
09cbfeaf 237 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
93dfe2ac 238 bio_put(bio);
93dfe2ac
JK
239 return -EFAULT;
240 }
241
19a5f5e2 242 __submit_bio(fio->sbi, fio->rw, bio, fio->type);
93dfe2ac
JK
243 return 0;
244}
245
05ca3632 246void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
93dfe2ac 247{
05ca3632 248 struct f2fs_sb_info *sbi = fio->sbi;
458e6197 249 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac 250 struct f2fs_bio_info *io;
940a6d34 251 bool is_read = is_read_io(fio->rw);
4375a336 252 struct page *bio_page;
93dfe2ac 253
940a6d34 254 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac 255
7a9d7548
CY
256 if (fio->old_blkaddr != NEW_ADDR)
257 verify_block_addr(sbi, fio->old_blkaddr);
258 verify_block_addr(sbi, fio->new_blkaddr);
93dfe2ac 259
df0f8dc0 260 down_write(&io->io_rwsem);
93dfe2ac 261
7a9d7548 262 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
458e6197
JK
263 io->fio.rw != fio->rw))
264 __submit_merged_bio(io);
93dfe2ac
JK
265alloc_new:
266 if (io->bio == NULL) {
90a893c7 267 int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34 268
7a9d7548
CY
269 io->bio = __bio_alloc(sbi, fio->new_blkaddr,
270 bio_blocks, is_read);
458e6197 271 io->fio = *fio;
93dfe2ac
JK
272 }
273
4375a336
JK
274 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
275
09cbfeaf
KS
276 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
277 PAGE_SIZE) {
458e6197 278 __submit_merged_bio(io);
93dfe2ac
JK
279 goto alloc_new;
280 }
281
7a9d7548 282 io->last_block_in_bio = fio->new_blkaddr;
05ca3632 283 f2fs_trace_ios(fio, 0);
93dfe2ac 284
df0f8dc0 285 up_write(&io->io_rwsem);
05ca3632 286 trace_f2fs_submit_page_mbio(fio->page, fio);
93dfe2ac
JK
287}
288
46008c6d
CY
289static void __set_data_blkaddr(struct dnode_of_data *dn)
290{
291 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
292 __le32 *addr_array;
293
294 /* Get physical address of data block */
295 addr_array = blkaddr_in_node(rn);
296 addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
297}
298
0a8165d7 299/*
eb47b800
JK
300 * Lock ordering for the change of data block address:
301 * ->data_page
302 * ->node_page
303 * update block addresses in the node page
304 */
216a620a 305void set_data_blkaddr(struct dnode_of_data *dn)
eb47b800 306{
46008c6d
CY
307 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
308 __set_data_blkaddr(dn);
309 if (set_page_dirty(dn->node_page))
12719ae1 310 dn->node_changed = true;
eb47b800
JK
311}
312
f28b3434
CY
313void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
314{
315 dn->data_blkaddr = blkaddr;
316 set_data_blkaddr(dn);
317 f2fs_update_extent_cache(dn);
318}
319
46008c6d
CY
320/* dn->ofs_in_node will be returned with up-to-date last block pointer */
321int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
eb47b800 322{
4081363f 323 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b800 324
46008c6d
CY
325 if (!count)
326 return 0;
327
91942321 328 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
eb47b800 329 return -EPERM;
46008c6d 330 if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
eb47b800
JK
331 return -ENOSPC;
332
46008c6d
CY
333 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
334 dn->ofs_in_node, count);
335
336 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
337
338 for (; count > 0; dn->ofs_in_node++) {
339 block_t blkaddr =
340 datablock_addr(dn->node_page, dn->ofs_in_node);
341 if (blkaddr == NULL_ADDR) {
342 dn->data_blkaddr = NEW_ADDR;
343 __set_data_blkaddr(dn);
344 count--;
345 }
346 }
347
348 if (set_page_dirty(dn->node_page))
349 dn->node_changed = true;
eb47b800
JK
350 return 0;
351}
352
46008c6d
CY
353/* Should keep dn->ofs_in_node unchanged */
354int reserve_new_block(struct dnode_of_data *dn)
355{
356 unsigned int ofs_in_node = dn->ofs_in_node;
357 int ret;
358
359 ret = reserve_new_blocks(dn, 1);
360 dn->ofs_in_node = ofs_in_node;
361 return ret;
362}
363
b600965c
HL
364int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
365{
366 bool need_put = dn->inode_page ? false : true;
367 int err;
368
369 err = get_dnode_of_data(dn, index, ALLOC_NODE);
370 if (err)
371 return err;
a8865372 372
b600965c
HL
373 if (dn->data_blkaddr == NULL_ADDR)
374 err = reserve_new_block(dn);
a8865372 375 if (err || need_put)
b600965c
HL
376 f2fs_put_dnode(dn);
377 return err;
378}
379
759af1c9 380int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
eb47b800 381{
028a41e8 382 struct extent_info ei;
759af1c9 383 struct inode *inode = dn->inode;
028a41e8 384
759af1c9
FL
385 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
386 dn->data_blkaddr = ei.blk + index - ei.fofs;
387 return 0;
429511cd 388 }
028a41e8 389
759af1c9 390 return f2fs_reserve_block(dn, index);
eb47b800
JK
391}
392
a56c7c6f
JK
393struct page *get_read_data_page(struct inode *inode, pgoff_t index,
394 int rw, bool for_write)
eb47b800 395{
eb47b800
JK
396 struct address_space *mapping = inode->i_mapping;
397 struct dnode_of_data dn;
398 struct page *page;
cb3bc9ee 399 struct extent_info ei;
eb47b800 400 int err;
cf04e8eb 401 struct f2fs_io_info fio = {
05ca3632 402 .sbi = F2FS_I_SB(inode),
cf04e8eb 403 .type = DATA,
43f3eae1 404 .rw = rw,
4375a336 405 .encrypted_page = NULL,
cf04e8eb 406 };
eb47b800 407
4375a336
JK
408 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
409 return read_mapping_page(mapping, index, NULL);
410
a56c7c6f 411 page = f2fs_grab_cache_page(mapping, index, for_write);
650495de
JK
412 if (!page)
413 return ERR_PTR(-ENOMEM);
414
cb3bc9ee
CY
415 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
416 dn.data_blkaddr = ei.blk + index - ei.fofs;
417 goto got_it;
418 }
419
eb47b800 420 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 421 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
86531d6b
JK
422 if (err)
423 goto put_err;
eb47b800
JK
424 f2fs_put_dnode(&dn);
425
6bacf52f 426 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
86531d6b
JK
427 err = -ENOENT;
428 goto put_err;
650495de 429 }
cb3bc9ee 430got_it:
43f3eae1
JK
431 if (PageUptodate(page)) {
432 unlock_page(page);
eb47b800 433 return page;
43f3eae1 434 }
eb47b800 435
d59ff4df
JK
436 /*
437 * A new dentry page is allocated but not able to be written, since its
438 * new inode page couldn't be allocated due to -ENOSPC.
439 * In such the case, its blkaddr can be remained as NEW_ADDR.
440 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
441 */
442 if (dn.data_blkaddr == NEW_ADDR) {
09cbfeaf 443 zero_user_segment(page, 0, PAGE_SIZE);
d59ff4df 444 SetPageUptodate(page);
43f3eae1 445 unlock_page(page);
d59ff4df
JK
446 return page;
447 }
eb47b800 448
7a9d7548 449 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
05ca3632
JK
450 fio.page = page;
451 err = f2fs_submit_page_bio(&fio);
393ff91f 452 if (err)
86531d6b 453 goto put_err;
43f3eae1 454 return page;
86531d6b
JK
455
456put_err:
457 f2fs_put_page(page, 1);
458 return ERR_PTR(err);
43f3eae1
JK
459}
460
461struct page *find_data_page(struct inode *inode, pgoff_t index)
462{
463 struct address_space *mapping = inode->i_mapping;
464 struct page *page;
465
466 page = find_get_page(mapping, index);
467 if (page && PageUptodate(page))
468 return page;
469 f2fs_put_page(page, 0);
470
a56c7c6f 471 page = get_read_data_page(inode, index, READ_SYNC, false);
43f3eae1
JK
472 if (IS_ERR(page))
473 return page;
474
475 if (PageUptodate(page))
476 return page;
477
478 wait_on_page_locked(page);
479 if (unlikely(!PageUptodate(page))) {
480 f2fs_put_page(page, 0);
481 return ERR_PTR(-EIO);
482 }
483 return page;
484}
485
486/*
487 * If it tries to access a hole, return an error.
488 * Because, the callers, functions in dir.c and GC, should be able to know
489 * whether this page exists or not.
490 */
a56c7c6f
JK
491struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
492 bool for_write)
43f3eae1
JK
493{
494 struct address_space *mapping = inode->i_mapping;
495 struct page *page;
496repeat:
a56c7c6f 497 page = get_read_data_page(inode, index, READ_SYNC, for_write);
43f3eae1
JK
498 if (IS_ERR(page))
499 return page;
393ff91f 500
43f3eae1 501 /* wait for read completion */
393ff91f 502 lock_page(page);
6bacf52f 503 if (unlikely(!PageUptodate(page))) {
393ff91f
JK
504 f2fs_put_page(page, 1);
505 return ERR_PTR(-EIO);
eb47b800 506 }
6bacf52f 507 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
508 f2fs_put_page(page, 1);
509 goto repeat;
eb47b800
JK
510 }
511 return page;
512}
513
0a8165d7 514/*
eb47b800
JK
515 * Caller ensures that this data page is never allocated.
516 * A new zero-filled data page is allocated in the page cache.
39936837 517 *
4f4124d0
CY
518 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
519 * f2fs_unlock_op().
470f00e9
CY
520 * Note that, ipage is set only by make_empty_dir, and if any error occur,
521 * ipage should be released by this function.
eb47b800 522 */
64aa7ed9 523struct page *get_new_data_page(struct inode *inode,
a8865372 524 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 525{
eb47b800
JK
526 struct address_space *mapping = inode->i_mapping;
527 struct page *page;
528 struct dnode_of_data dn;
529 int err;
7612118a 530
a56c7c6f 531 page = f2fs_grab_cache_page(mapping, index, true);
470f00e9
CY
532 if (!page) {
533 /*
534 * before exiting, we should make sure ipage will be released
535 * if any error occur.
536 */
537 f2fs_put_page(ipage, 1);
01f28610 538 return ERR_PTR(-ENOMEM);
470f00e9 539 }
eb47b800 540
a8865372 541 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 542 err = f2fs_reserve_block(&dn, index);
01f28610
JK
543 if (err) {
544 f2fs_put_page(page, 1);
eb47b800 545 return ERR_PTR(err);
a8865372 546 }
01f28610
JK
547 if (!ipage)
548 f2fs_put_dnode(&dn);
eb47b800
JK
549
550 if (PageUptodate(page))
01f28610 551 goto got_it;
eb47b800
JK
552
553 if (dn.data_blkaddr == NEW_ADDR) {
09cbfeaf 554 zero_user_segment(page, 0, PAGE_SIZE);
393ff91f 555 SetPageUptodate(page);
eb47b800 556 } else {
4375a336 557 f2fs_put_page(page, 1);
a8865372 558
7612118a
JK
559 /* if ipage exists, blkaddr should be NEW_ADDR */
560 f2fs_bug_on(F2FS_I_SB(inode), ipage);
561 page = get_lock_data_page(inode, index, true);
4375a336 562 if (IS_ERR(page))
7612118a 563 return page;
eb47b800 564 }
01f28610 565got_it:
9edcdabf 566 if (new_i_size && i_size_read(inode) <
ee6d182f 567 ((loff_t)(index + 1) << PAGE_SHIFT))
fc9581c8 568 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
eb47b800
JK
569 return page;
570}
571
bfad7c2d
JK
572static int __allocate_data_block(struct dnode_of_data *dn)
573{
4081363f 574 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
bfad7c2d 575 struct f2fs_summary sum;
bfad7c2d 576 struct node_info ni;
38aa0889 577 int seg = CURSEG_WARM_DATA;
976e4c50 578 pgoff_t fofs;
46008c6d 579 blkcnt_t count = 1;
bfad7c2d 580
91942321 581 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
bfad7c2d 582 return -EPERM;
df6136ef
CY
583
584 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
585 if (dn->data_blkaddr == NEW_ADDR)
586 goto alloc;
587
46008c6d 588 if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
bfad7c2d
JK
589 return -ENOSPC;
590
df6136ef 591alloc:
bfad7c2d
JK
592 get_node_info(sbi, dn->nid, &ni);
593 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
594
38aa0889
JK
595 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
596 seg = CURSEG_DIRECT_IO;
597
df6136ef
CY
598 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
599 &sum, seg);
216a620a 600 set_data_blkaddr(dn);
bfad7c2d 601
976e4c50 602 /* update i_size */
81ca7350 603 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
976e4c50 604 dn->ofs_in_node;
09cbfeaf 605 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
fc9581c8 606 f2fs_i_size_write(dn->inode,
09cbfeaf 607 ((loff_t)(fofs + 1) << PAGE_SHIFT));
bfad7c2d
JK
608 return 0;
609}
610
b439b103 611ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
59b802e5 612{
b439b103 613 struct inode *inode = file_inode(iocb->ki_filp);
5b8db7fa 614 struct f2fs_map_blocks map;
b439b103 615 ssize_t ret = 0;
59b802e5 616
0080c507
JK
617 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
618 map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
da85985c 619 map.m_next_pgofs = NULL;
2a340760 620
24b84912
JK
621 if (f2fs_encrypted_inode(inode))
622 return 0;
623
624 if (iocb->ki_flags & IOCB_DIRECT) {
625 ret = f2fs_convert_inline_inode(inode);
626 if (ret)
627 return ret;
628 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
629 }
630 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
b439b103
JK
631 ret = f2fs_convert_inline_inode(inode);
632 if (ret)
633 return ret;
b439b103 634 }
24b84912
JK
635 if (!f2fs_has_inline_data(inode))
636 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
b439b103 637 return ret;
59b802e5
JK
638}
639
0a8165d7 640/*
003a3e1d
JK
641 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
642 * f2fs_map_blocks structure.
4f4124d0
CY
643 * If original data blocks are allocated, then give them to blockdev.
644 * Otherwise,
645 * a. preallocate requested block addresses
646 * b. do not use extent cache for better performance
647 * c. give the block addresses to blockdev
eb47b800 648 */
d323d005 649int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
e2b4e2bc 650 int create, int flag)
eb47b800 651{
003a3e1d 652 unsigned int maxblocks = map->m_len;
eb47b800 653 struct dnode_of_data dn;
f9811703 654 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
ac6f1999 655 int mode = create ? ALLOC_NODE : LOOKUP_NODE;
46008c6d 656 pgoff_t pgofs, end_offset, end;
bfad7c2d 657 int err = 0, ofs = 1;
46008c6d
CY
658 unsigned int ofs_in_node, last_ofs_in_node;
659 blkcnt_t prealloc;
a2e7d1bf 660 struct extent_info ei;
bfad7c2d 661 bool allocated = false;
7df3a431 662 block_t blkaddr;
eb47b800 663
003a3e1d
JK
664 map->m_len = 0;
665 map->m_flags = 0;
666
667 /* it only supports block size == page size */
668 pgofs = (pgoff_t)map->m_lblk;
46008c6d 669 end = pgofs + maxblocks;
eb47b800 670
24b84912 671 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
003a3e1d
JK
672 map->m_pblk = ei.blk + pgofs - ei.fofs;
673 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
674 map->m_flags = F2FS_MAP_MAPPED;
bfad7c2d 675 goto out;
a2e7d1bf 676 }
bfad7c2d 677
4fe71e88 678next_dnode:
59b802e5 679 if (create)
3104af35 680 f2fs_lock_op(sbi);
eb47b800
JK
681
682 /* When reading holes, we need its node page */
683 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 684 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 685 if (err) {
43473f96
CY
686 if (flag == F2FS_GET_BLOCK_BMAP)
687 map->m_pblk = 0;
da85985c 688 if (err == -ENOENT) {
bfad7c2d 689 err = 0;
da85985c
CY
690 if (map->m_next_pgofs)
691 *map->m_next_pgofs =
692 get_next_page_offset(&dn, pgofs);
693 }
bfad7c2d 694 goto unlock_out;
848753aa 695 }
973163fc 696
46008c6d
CY
697 prealloc = 0;
698 ofs_in_node = dn.ofs_in_node;
81ca7350 699 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
4fe71e88
CY
700
701next_block:
702 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
703
704 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
973163fc 705 if (create) {
f9811703
CY
706 if (unlikely(f2fs_cp_error(sbi))) {
707 err = -EIO;
4fe71e88 708 goto sync_out;
f9811703 709 }
24b84912 710 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
46008c6d
CY
711 if (blkaddr == NULL_ADDR) {
712 prealloc++;
713 last_ofs_in_node = dn.ofs_in_node;
714 }
24b84912
JK
715 } else {
716 err = __allocate_data_block(&dn);
46008c6d 717 if (!err) {
91942321 718 set_inode_flag(inode, FI_APPEND_WRITE);
46008c6d
CY
719 allocated = true;
720 }
24b84912 721 }
973163fc 722 if (err)
4fe71e88 723 goto sync_out;
973163fc 724 map->m_flags = F2FS_MAP_NEW;
4fe71e88 725 blkaddr = dn.data_blkaddr;
973163fc 726 } else {
43473f96
CY
727 if (flag == F2FS_GET_BLOCK_BMAP) {
728 map->m_pblk = 0;
729 goto sync_out;
730 }
da85985c
CY
731 if (flag == F2FS_GET_BLOCK_FIEMAP &&
732 blkaddr == NULL_ADDR) {
733 if (map->m_next_pgofs)
734 *map->m_next_pgofs = pgofs + 1;
735 }
973163fc 736 if (flag != F2FS_GET_BLOCK_FIEMAP ||
43473f96 737 blkaddr != NEW_ADDR)
4fe71e88 738 goto sync_out;
e2b4e2bc 739 }
e2b4e2bc 740 }
eb47b800 741
46008c6d
CY
742 if (flag == F2FS_GET_BLOCK_PRE_AIO)
743 goto skip;
744
4fe71e88
CY
745 if (map->m_len == 0) {
746 /* preallocated unwritten block should be mapped for fiemap. */
747 if (blkaddr == NEW_ADDR)
748 map->m_flags |= F2FS_MAP_UNWRITTEN;
749 map->m_flags |= F2FS_MAP_MAPPED;
750
751 map->m_pblk = blkaddr;
752 map->m_len = 1;
753 } else if ((map->m_pblk != NEW_ADDR &&
754 blkaddr == (map->m_pblk + ofs)) ||
b439b103 755 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
46008c6d 756 flag == F2FS_GET_BLOCK_PRE_DIO) {
4fe71e88
CY
757 ofs++;
758 map->m_len++;
759 } else {
760 goto sync_out;
761 }
bfad7c2d 762
46008c6d 763skip:
bfad7c2d
JK
764 dn.ofs_in_node++;
765 pgofs++;
766
46008c6d
CY
767 /* preallocate blocks in batch for one dnode page */
768 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
769 (pgofs == end || dn.ofs_in_node == end_offset)) {
7df3a431 770
46008c6d
CY
771 dn.ofs_in_node = ofs_in_node;
772 err = reserve_new_blocks(&dn, prealloc);
773 if (err)
774 goto sync_out;
bfad7c2d 775
46008c6d
CY
776 map->m_len += dn.ofs_in_node - ofs_in_node;
777 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
778 err = -ENOSPC;
779 goto sync_out;
3104af35 780 }
46008c6d
CY
781 dn.ofs_in_node = end_offset;
782 }
783
784 if (pgofs >= end)
785 goto sync_out;
786 else if (dn.ofs_in_node < end_offset)
787 goto next_block;
788
46008c6d
CY
789 f2fs_put_dnode(&dn);
790
791 if (create) {
792 f2fs_unlock_op(sbi);
793 f2fs_balance_fs(sbi, allocated);
eb47b800 794 }
46008c6d
CY
795 allocated = false;
796 goto next_dnode;
7df3a431 797
bfad7c2d 798sync_out:
eb47b800 799 f2fs_put_dnode(&dn);
bfad7c2d 800unlock_out:
2a340760 801 if (create) {
3104af35 802 f2fs_unlock_op(sbi);
3c082b7b 803 f2fs_balance_fs(sbi, allocated);
2a340760 804 }
bfad7c2d 805out:
003a3e1d 806 trace_f2fs_map_blocks(inode, map, err);
bfad7c2d 807 return err;
eb47b800
JK
808}
809
003a3e1d 810static int __get_data_block(struct inode *inode, sector_t iblock,
da85985c
CY
811 struct buffer_head *bh, int create, int flag,
812 pgoff_t *next_pgofs)
003a3e1d
JK
813{
814 struct f2fs_map_blocks map;
815 int ret;
816
817 map.m_lblk = iblock;
818 map.m_len = bh->b_size >> inode->i_blkbits;
da85985c 819 map.m_next_pgofs = next_pgofs;
003a3e1d 820
e2b4e2bc 821 ret = f2fs_map_blocks(inode, &map, create, flag);
003a3e1d
JK
822 if (!ret) {
823 map_bh(bh, inode->i_sb, map.m_pblk);
824 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
825 bh->b_size = map.m_len << inode->i_blkbits;
826 }
827 return ret;
828}
829
ccfb3000 830static int get_data_block(struct inode *inode, sector_t iblock,
da85985c
CY
831 struct buffer_head *bh_result, int create, int flag,
832 pgoff_t *next_pgofs)
e2b4e2bc 833{
da85985c
CY
834 return __get_data_block(inode, iblock, bh_result, create,
835 flag, next_pgofs);
e2b4e2bc
CY
836}
837
838static int get_data_block_dio(struct inode *inode, sector_t iblock,
ccfb3000
JK
839 struct buffer_head *bh_result, int create)
840{
e2b4e2bc 841 return __get_data_block(inode, iblock, bh_result, create,
da85985c 842 F2FS_GET_BLOCK_DIO, NULL);
ccfb3000
JK
843}
844
e2b4e2bc 845static int get_data_block_bmap(struct inode *inode, sector_t iblock,
ccfb3000
JK
846 struct buffer_head *bh_result, int create)
847{
179448bf 848 /* Block number less than F2FS MAX BLOCKS */
e0afc4d6 849 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
179448bf
YH
850 return -EFBIG;
851
e2b4e2bc 852 return __get_data_block(inode, iblock, bh_result, create,
da85985c 853 F2FS_GET_BLOCK_BMAP, NULL);
ccfb3000
JK
854}
855
7f63eb77
JK
856static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
857{
858 return (offset >> inode->i_blkbits);
859}
860
861static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
862{
863 return (blk << inode->i_blkbits);
864}
865
9ab70134
JK
866int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
867 u64 start, u64 len)
868{
7f63eb77
JK
869 struct buffer_head map_bh;
870 sector_t start_blk, last_blk;
da85985c 871 pgoff_t next_pgofs;
de1475cc 872 loff_t isize;
7f63eb77
JK
873 u64 logical = 0, phys = 0, size = 0;
874 u32 flags = 0;
7f63eb77
JK
875 int ret = 0;
876
877 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
878 if (ret)
879 return ret;
880
67f8cf3c
JK
881 if (f2fs_has_inline_data(inode)) {
882 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
883 if (ret != -EAGAIN)
884 return ret;
885 }
886
5955102c 887 inode_lock(inode);
de1475cc
FL
888
889 isize = i_size_read(inode);
9a950d52
FL
890 if (start >= isize)
891 goto out;
7f63eb77 892
9a950d52
FL
893 if (start + len > isize)
894 len = isize - start;
7f63eb77
JK
895
896 if (logical_to_blk(inode, len) == 0)
897 len = blk_to_logical(inode, 1);
898
899 start_blk = logical_to_blk(inode, start);
900 last_blk = logical_to_blk(inode, start + len - 1);
9a950d52 901
7f63eb77
JK
902next:
903 memset(&map_bh, 0, sizeof(struct buffer_head));
904 map_bh.b_size = len;
905
e2b4e2bc 906 ret = get_data_block(inode, start_blk, &map_bh, 0,
da85985c 907 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
7f63eb77
JK
908 if (ret)
909 goto out;
910
911 /* HOLE */
912 if (!buffer_mapped(&map_bh)) {
da85985c 913 start_blk = next_pgofs;
9a950d52 914 /* Go through holes util pass the EOF */
da85985c 915 if (blk_to_logical(inode, start_blk) < isize)
9a950d52
FL
916 goto prep_next;
917 /* Found a hole beyond isize means no more extents.
918 * Note that the premise is that filesystems don't
919 * punch holes beyond isize and keep size unchanged.
920 */
921 flags |= FIEMAP_EXTENT_LAST;
922 }
7f63eb77 923
da5af127
CY
924 if (size) {
925 if (f2fs_encrypted_inode(inode))
926 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
927
9a950d52
FL
928 ret = fiemap_fill_next_extent(fieinfo, logical,
929 phys, size, flags);
da5af127 930 }
7f63eb77 931
9a950d52
FL
932 if (start_blk > last_blk || ret)
933 goto out;
7f63eb77 934
9a950d52
FL
935 logical = blk_to_logical(inode, start_blk);
936 phys = blk_to_logical(inode, map_bh.b_blocknr);
937 size = map_bh.b_size;
938 flags = 0;
939 if (buffer_unwritten(&map_bh))
940 flags = FIEMAP_EXTENT_UNWRITTEN;
7f63eb77 941
9a950d52 942 start_blk += logical_to_blk(inode, size);
7f63eb77 943
9a950d52 944prep_next:
7f63eb77
JK
945 cond_resched();
946 if (fatal_signal_pending(current))
947 ret = -EINTR;
948 else
949 goto next;
950out:
951 if (ret == 1)
952 ret = 0;
953
5955102c 954 inode_unlock(inode);
7f63eb77 955 return ret;
9ab70134
JK
956}
957
f1e88660
JK
958/*
959 * This function was originally taken from fs/mpage.c, and customized for f2fs.
960 * Major change was from block_size == page_size in f2fs by default.
961 */
962static int f2fs_mpage_readpages(struct address_space *mapping,
963 struct list_head *pages, struct page *page,
964 unsigned nr_pages)
965{
966 struct bio *bio = NULL;
967 unsigned page_idx;
968 sector_t last_block_in_bio = 0;
969 struct inode *inode = mapping->host;
970 const unsigned blkbits = inode->i_blkbits;
971 const unsigned blocksize = 1 << blkbits;
972 sector_t block_in_file;
973 sector_t last_block;
974 sector_t last_block_in_file;
975 sector_t block_nr;
976 struct block_device *bdev = inode->i_sb->s_bdev;
977 struct f2fs_map_blocks map;
978
979 map.m_pblk = 0;
980 map.m_lblk = 0;
981 map.m_len = 0;
982 map.m_flags = 0;
da85985c 983 map.m_next_pgofs = NULL;
f1e88660
JK
984
985 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
986
987 prefetchw(&page->flags);
988 if (pages) {
989 page = list_entry(pages->prev, struct page, lru);
990 list_del(&page->lru);
991 if (add_to_page_cache_lru(page, mapping,
992 page->index, GFP_KERNEL))
993 goto next_page;
994 }
995
996 block_in_file = (sector_t)page->index;
997 last_block = block_in_file + nr_pages;
998 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
999 blkbits;
1000 if (last_block > last_block_in_file)
1001 last_block = last_block_in_file;
1002
1003 /*
1004 * Map blocks using the previous result first.
1005 */
1006 if ((map.m_flags & F2FS_MAP_MAPPED) &&
1007 block_in_file > map.m_lblk &&
1008 block_in_file < (map.m_lblk + map.m_len))
1009 goto got_it;
1010
1011 /*
1012 * Then do more f2fs_map_blocks() calls until we are
1013 * done with this page.
1014 */
1015 map.m_flags = 0;
1016
1017 if (block_in_file < last_block) {
1018 map.m_lblk = block_in_file;
1019 map.m_len = last_block - block_in_file;
1020
46c9e141 1021 if (f2fs_map_blocks(inode, &map, 0,
da85985c 1022 F2FS_GET_BLOCK_READ))
f1e88660
JK
1023 goto set_error_page;
1024 }
1025got_it:
1026 if ((map.m_flags & F2FS_MAP_MAPPED)) {
1027 block_nr = map.m_pblk + block_in_file - map.m_lblk;
1028 SetPageMappedToDisk(page);
1029
1030 if (!PageUptodate(page) && !cleancache_get_page(page)) {
1031 SetPageUptodate(page);
1032 goto confused;
1033 }
1034 } else {
09cbfeaf 1035 zero_user_segment(page, 0, PAGE_SIZE);
f1e88660
JK
1036 SetPageUptodate(page);
1037 unlock_page(page);
1038 goto next_page;
1039 }
1040
1041 /*
1042 * This page will go to BIO. Do we need to send this
1043 * BIO off first?
1044 */
1045 if (bio && (last_block_in_bio != block_nr - 1)) {
1046submit_and_realloc:
19a5f5e2 1047 __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
f1e88660
JK
1048 bio = NULL;
1049 }
1050 if (bio == NULL) {
0b81d077 1051 struct fscrypt_ctx *ctx = NULL;
4375a336
JK
1052
1053 if (f2fs_encrypted_inode(inode) &&
1054 S_ISREG(inode->i_mode)) {
4375a336 1055
b32e4482 1056 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
4375a336
JK
1057 if (IS_ERR(ctx))
1058 goto set_error_page;
1059
1060 /* wait the page to be moved by cleaning */
08b39fbd
CY
1061 f2fs_wait_on_encrypted_page_writeback(
1062 F2FS_I_SB(inode), block_nr);
4375a336
JK
1063 }
1064
f1e88660 1065 bio = bio_alloc(GFP_KERNEL,
b54ffb73 1066 min_t(int, nr_pages, BIO_MAX_PAGES));
4375a336
JK
1067 if (!bio) {
1068 if (ctx)
0b81d077 1069 fscrypt_release_ctx(ctx);
f1e88660 1070 goto set_error_page;
4375a336 1071 }
f1e88660
JK
1072 bio->bi_bdev = bdev;
1073 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
12377024 1074 bio->bi_end_io = f2fs_read_end_io;
4375a336 1075 bio->bi_private = ctx;
f1e88660
JK
1076 }
1077
1078 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1079 goto submit_and_realloc;
1080
1081 last_block_in_bio = block_nr;
1082 goto next_page;
1083set_error_page:
1084 SetPageError(page);
09cbfeaf 1085 zero_user_segment(page, 0, PAGE_SIZE);
f1e88660
JK
1086 unlock_page(page);
1087 goto next_page;
1088confused:
1089 if (bio) {
19a5f5e2 1090 __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
f1e88660
JK
1091 bio = NULL;
1092 }
1093 unlock_page(page);
1094next_page:
1095 if (pages)
09cbfeaf 1096 put_page(page);
f1e88660
JK
1097 }
1098 BUG_ON(pages && !list_empty(pages));
1099 if (bio)
19a5f5e2 1100 __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
f1e88660
JK
1101 return 0;
1102}
1103
eb47b800
JK
1104static int f2fs_read_data_page(struct file *file, struct page *page)
1105{
9ffe0fb5 1106 struct inode *inode = page->mapping->host;
b3d208f9 1107 int ret = -EAGAIN;
9ffe0fb5 1108
c20e89cd
CY
1109 trace_f2fs_readpage(page, DATA);
1110
e1c42045 1111 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
1112 if (f2fs_has_inline_data(inode))
1113 ret = f2fs_read_inline_data(inode, page);
b3d208f9 1114 if (ret == -EAGAIN)
f1e88660 1115 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
9ffe0fb5 1116 return ret;
eb47b800
JK
1117}
1118
1119static int f2fs_read_data_pages(struct file *file,
1120 struct address_space *mapping,
1121 struct list_head *pages, unsigned nr_pages)
1122{
9ffe0fb5 1123 struct inode *inode = file->f_mapping->host;
b8c29400
CY
1124 struct page *page = list_entry(pages->prev, struct page, lru);
1125
1126 trace_f2fs_readpages(inode, page, nr_pages);
9ffe0fb5
HL
1127
1128 /* If the file has inline data, skip readpages */
1129 if (f2fs_has_inline_data(inode))
1130 return 0;
1131
f1e88660 1132 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
eb47b800
JK
1133}
1134
05ca3632 1135int do_write_data_page(struct f2fs_io_info *fio)
eb47b800 1136{
05ca3632 1137 struct page *page = fio->page;
eb47b800 1138 struct inode *inode = page->mapping->host;
eb47b800
JK
1139 struct dnode_of_data dn;
1140 int err = 0;
1141
1142 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 1143 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
1144 if (err)
1145 return err;
1146
28bc106b 1147 fio->old_blkaddr = dn.data_blkaddr;
eb47b800
JK
1148
1149 /* This page is already truncated */
7a9d7548 1150 if (fio->old_blkaddr == NULL_ADDR) {
2bca1e23 1151 ClearPageUptodate(page);
eb47b800 1152 goto out_writepage;
2bca1e23 1153 }
eb47b800 1154
4375a336 1155 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
b32e4482 1156 gfp_t gfp_flags = GFP_NOFS;
08b39fbd
CY
1157
1158 /* wait for GCed encrypted page writeback */
1159 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
7a9d7548 1160 fio->old_blkaddr);
b32e4482
JK
1161retry_encrypt:
1162 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1163 gfp_flags);
4375a336
JK
1164 if (IS_ERR(fio->encrypted_page)) {
1165 err = PTR_ERR(fio->encrypted_page);
b32e4482
JK
1166 if (err == -ENOMEM) {
1167 /* flush pending ios and wait for a while */
1168 f2fs_flush_merged_bios(F2FS_I_SB(inode));
1169 congestion_wait(BLK_RW_ASYNC, HZ/50);
1170 gfp_flags |= __GFP_NOFAIL;
1171 err = 0;
1172 goto retry_encrypt;
1173 }
4375a336
JK
1174 goto out_writepage;
1175 }
1176 }
1177
eb47b800
JK
1178 set_page_writeback(page);
1179
1180 /*
1181 * If current allocation needs SSR,
1182 * it had better in-place writes for updated data.
1183 */
7a9d7548 1184 if (unlikely(fio->old_blkaddr != NEW_ADDR &&
b25958b6 1185 !is_cold_data(page) &&
2da3e027 1186 !IS_ATOMIC_WRITTEN_PAGE(page) &&
b25958b6 1187 need_inplace_update(inode))) {
05ca3632 1188 rewrite_data_page(fio);
91942321 1189 set_inode_flag(inode, FI_UPDATE_WRITE);
8ce67cb0 1190 trace_f2fs_do_write_data_page(page, IPU);
eb47b800 1191 } else {
05ca3632 1192 write_data_page(&dn, fio);
8ce67cb0 1193 trace_f2fs_do_write_data_page(page, OPU);
91942321 1194 set_inode_flag(inode, FI_APPEND_WRITE);
3c6c2beb 1195 if (page->index == 0)
91942321 1196 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
eb47b800
JK
1197 }
1198out_writepage:
1199 f2fs_put_dnode(&dn);
1200 return err;
1201}
1202
1203static int f2fs_write_data_page(struct page *page,
1204 struct writeback_control *wbc)
1205{
1206 struct inode *inode = page->mapping->host;
4081363f 1207 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
1208 loff_t i_size = i_size_read(inode);
1209 const pgoff_t end_index = ((unsigned long long) i_size)
09cbfeaf 1210 >> PAGE_SHIFT;
26de9b11 1211 loff_t psize = (page->index + 1) << PAGE_SHIFT;
9ffe0fb5 1212 unsigned offset = 0;
39936837 1213 bool need_balance_fs = false;
eb47b800 1214 int err = 0;
458e6197 1215 struct f2fs_io_info fio = {
05ca3632 1216 .sbi = sbi,
458e6197 1217 .type = DATA,
6c311ec6 1218 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
05ca3632 1219 .page = page,
4375a336 1220 .encrypted_page = NULL,
458e6197 1221 };
eb47b800 1222
ecda0de3
CY
1223 trace_f2fs_writepage(page, DATA);
1224
eb47b800 1225 if (page->index < end_index)
39936837 1226 goto write;
eb47b800
JK
1227
1228 /*
1229 * If the offset is out-of-range of file size,
1230 * this page does not have to be written to disk.
1231 */
09cbfeaf 1232 offset = i_size & (PAGE_SIZE - 1);
76f60268 1233 if ((page->index >= end_index + 1) || !offset)
39936837 1234 goto out;
eb47b800 1235
09cbfeaf 1236 zero_user_segment(page, offset, PAGE_SIZE);
39936837 1237write:
caf0047e 1238 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
eb47b800 1239 goto redirty_out;
1e84371f
JK
1240 if (f2fs_is_drop_cache(inode))
1241 goto out;
e6e5f561
JK
1242 /* we should not write 0'th page having journal header */
1243 if (f2fs_is_volatile_file(inode) && (!page->index ||
1244 (!wbc->for_reclaim &&
1245 available_free_memory(sbi, BASE_CHECK))))
1e84371f 1246 goto redirty_out;
eb47b800 1247
cf779cab
JK
1248 /* we should bypass data pages to proceed the kworkder jobs */
1249 if (unlikely(f2fs_cp_error(sbi))) {
7f319975 1250 mapping_set_error(page->mapping, -EIO);
a7ffdbe2 1251 goto out;
cf779cab
JK
1252 }
1253
b230e6ca
JK
1254 /* Dentry blocks are controlled by checkpoint */
1255 if (S_ISDIR(inode->i_mode)) {
1256 err = do_write_data_page(&fio);
1257 goto done;
1258 }
1259
8618b881 1260 if (!wbc->for_reclaim)
39936837 1261 need_balance_fs = true;
8618b881 1262 else if (has_not_enough_free_secs(sbi, 0))
39936837 1263 goto redirty_out;
eb47b800 1264
b3d208f9 1265 err = -EAGAIN;
8618b881 1266 f2fs_lock_op(sbi);
b3d208f9
JK
1267 if (f2fs_has_inline_data(inode))
1268 err = f2fs_write_inline_data(inode, page);
1269 if (err == -EAGAIN)
05ca3632 1270 err = do_write_data_page(&fio);
26de9b11
JK
1271 if (F2FS_I(inode)->last_disk_size < psize)
1272 F2FS_I(inode)->last_disk_size = psize;
8618b881
JK
1273 f2fs_unlock_op(sbi);
1274done:
1275 if (err && err != -ENOENT)
1276 goto redirty_out;
eb47b800 1277
eb47b800 1278 clear_cold_data(page);
39936837 1279out:
a7ffdbe2 1280 inode_dec_dirty_pages(inode);
2bca1e23
JK
1281 if (err)
1282 ClearPageUptodate(page);
0c3a5797
CY
1283
1284 if (wbc->for_reclaim) {
1285 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
1286 remove_dirty_inode(inode);
1287 }
1288
eb47b800 1289 unlock_page(page);
2c4db1a6 1290 f2fs_balance_fs(sbi, need_balance_fs);
0c3a5797
CY
1291
1292 if (unlikely(f2fs_cp_error(sbi)))
2aea39ec 1293 f2fs_submit_merged_bio(sbi, DATA, WRITE);
0c3a5797 1294
eb47b800
JK
1295 return 0;
1296
eb47b800 1297redirty_out:
76f60268 1298 redirty_page_for_writepage(wbc, page);
b230e6ca
JK
1299 unlock_page(page);
1300 return err;
fa9150a8
NJ
1301}
1302
8f46dcae
CY
1303/*
1304 * This function was copied from write_cche_pages from mm/page-writeback.c.
1305 * The major change is making write step of cold data page separately from
1306 * warm/hot data page.
1307 */
1308static int f2fs_write_cache_pages(struct address_space *mapping,
b230e6ca 1309 struct writeback_control *wbc)
8f46dcae
CY
1310{
1311 int ret = 0;
1312 int done = 0;
1313 struct pagevec pvec;
1314 int nr_pages;
1315 pgoff_t uninitialized_var(writeback_index);
1316 pgoff_t index;
1317 pgoff_t end; /* Inclusive */
1318 pgoff_t done_index;
1319 int cycled;
1320 int range_whole = 0;
1321 int tag;
8f46dcae
CY
1322
1323 pagevec_init(&pvec, 0);
46ae957f 1324
8f46dcae
CY
1325 if (wbc->range_cyclic) {
1326 writeback_index = mapping->writeback_index; /* prev offset */
1327 index = writeback_index;
1328 if (index == 0)
1329 cycled = 1;
1330 else
1331 cycled = 0;
1332 end = -1;
1333 } else {
09cbfeaf
KS
1334 index = wbc->range_start >> PAGE_SHIFT;
1335 end = wbc->range_end >> PAGE_SHIFT;
8f46dcae
CY
1336 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1337 range_whole = 1;
1338 cycled = 1; /* ignore range_cyclic tests */
1339 }
1340 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1341 tag = PAGECACHE_TAG_TOWRITE;
1342 else
1343 tag = PAGECACHE_TAG_DIRTY;
1344retry:
1345 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1346 tag_pages_for_writeback(mapping, index, end);
1347 done_index = index;
1348 while (!done && (index <= end)) {
1349 int i;
1350
1351 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1352 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1353 if (nr_pages == 0)
1354 break;
1355
1356 for (i = 0; i < nr_pages; i++) {
1357 struct page *page = pvec.pages[i];
1358
1359 if (page->index > end) {
1360 done = 1;
1361 break;
1362 }
1363
1364 done_index = page->index;
1365
1366 lock_page(page);
1367
1368 if (unlikely(page->mapping != mapping)) {
1369continue_unlock:
1370 unlock_page(page);
1371 continue;
1372 }
1373
1374 if (!PageDirty(page)) {
1375 /* someone wrote it for us */
1376 goto continue_unlock;
1377 }
1378
8f46dcae
CY
1379 if (PageWriteback(page)) {
1380 if (wbc->sync_mode != WB_SYNC_NONE)
fec1d657
JK
1381 f2fs_wait_on_page_writeback(page,
1382 DATA, true);
8f46dcae
CY
1383 else
1384 goto continue_unlock;
1385 }
1386
1387 BUG_ON(PageWriteback(page));
1388 if (!clear_page_dirty_for_io(page))
1389 goto continue_unlock;
1390
b230e6ca 1391 ret = mapping->a_ops->writepage(page, wbc);
8f46dcae 1392 if (unlikely(ret)) {
b230e6ca
JK
1393 done_index = page->index + 1;
1394 done = 1;
1395 break;
8f46dcae
CY
1396 }
1397
1398 if (--wbc->nr_to_write <= 0 &&
1399 wbc->sync_mode == WB_SYNC_NONE) {
1400 done = 1;
1401 break;
1402 }
1403 }
1404 pagevec_release(&pvec);
1405 cond_resched();
1406 }
1407
8f46dcae
CY
1408 if (!cycled && !done) {
1409 cycled = 1;
1410 index = 0;
1411 end = writeback_index - 1;
1412 goto retry;
1413 }
1414 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1415 mapping->writeback_index = done_index;
1416
1417 return ret;
1418}
1419
25ca923b 1420static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
1421 struct writeback_control *wbc)
1422{
1423 struct inode *inode = mapping->host;
4081363f 1424 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800 1425 int ret;
eb47b800 1426
cfb185a1 1427 /* deal with chardevs and other special file */
1428 if (!mapping->a_ops->writepage)
1429 return 0;
1430
6a290544
CY
1431 /* skip writing if there is no dirty page in this inode */
1432 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1433 return 0;
1434
a1257023
JK
1435 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1436 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1437 available_free_memory(sbi, DIRTY_DENTS))
1438 goto skip_write;
1439
d323d005 1440 /* skip writing during file defragment */
91942321 1441 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
d323d005
CY
1442 goto skip_write;
1443
d5669f7b
JK
1444 /* during POR, we don't need to trigger writepage at all. */
1445 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1446 goto skip_write;
1447
d31c7c3f
YH
1448 trace_f2fs_writepages(mapping->host, wbc, DATA);
1449
b230e6ca 1450 ret = f2fs_write_cache_pages(mapping, wbc);
28ea6162
JK
1451 /*
1452 * if some pages were truncated, we cannot guarantee its mapping->host
1453 * to detect pending bios.
1454 */
1455 f2fs_submit_merged_bio(sbi, DATA, WRITE);
458e6197 1456
c227f912 1457 remove_dirty_inode(inode);
eb47b800 1458 return ret;
d3baf95d
JK
1459
1460skip_write:
a7ffdbe2 1461 wbc->pages_skipped += get_dirty_pages(inode);
d31c7c3f 1462 trace_f2fs_writepages(mapping->host, wbc, DATA);
d3baf95d 1463 return 0;
eb47b800
JK
1464}
1465
3aab8f82
CY
1466static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1467{
1468 struct inode *inode = mapping->host;
819d9153 1469 loff_t i_size = i_size_read(inode);
3aab8f82 1470
819d9153
JK
1471 if (to > i_size) {
1472 truncate_pagecache(inode, i_size);
1473 truncate_blocks(inode, i_size, true);
3aab8f82
CY
1474 }
1475}
1476
2aadac08
JK
1477static int prepare_write_begin(struct f2fs_sb_info *sbi,
1478 struct page *page, loff_t pos, unsigned len,
1479 block_t *blk_addr, bool *node_changed)
1480{
1481 struct inode *inode = page->mapping->host;
1482 pgoff_t index = page->index;
1483 struct dnode_of_data dn;
1484 struct page *ipage;
b4d07a3e
JK
1485 bool locked = false;
1486 struct extent_info ei;
2aadac08
JK
1487 int err = 0;
1488
24b84912
JK
1489 /*
1490 * we already allocated all the blocks, so we don't need to get
1491 * the block addresses when there is no need to fill the page.
1492 */
1493 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
09cbfeaf 1494 len == PAGE_SIZE)
24b84912
JK
1495 return 0;
1496
b4d07a3e 1497 if (f2fs_has_inline_data(inode) ||
09cbfeaf 1498 (pos & PAGE_MASK) >= i_size_read(inode)) {
b4d07a3e
JK
1499 f2fs_lock_op(sbi);
1500 locked = true;
1501 }
1502restart:
2aadac08
JK
1503 /* check inline_data */
1504 ipage = get_node_page(sbi, inode->i_ino);
1505 if (IS_ERR(ipage)) {
1506 err = PTR_ERR(ipage);
1507 goto unlock_out;
1508 }
1509
1510 set_new_dnode(&dn, inode, ipage, ipage, 0);
1511
1512 if (f2fs_has_inline_data(inode)) {
1513 if (pos + len <= MAX_INLINE_DATA) {
1514 read_inline_data(page, ipage);
91942321 1515 set_inode_flag(inode, FI_DATA_EXIST);
ab47036d
CY
1516 if (inode->i_nlink)
1517 set_inline_node(ipage);
2aadac08
JK
1518 } else {
1519 err = f2fs_convert_inline_page(&dn, page);
1520 if (err)
b4d07a3e
JK
1521 goto out;
1522 if (dn.data_blkaddr == NULL_ADDR)
1523 err = f2fs_get_block(&dn, index);
1524 }
1525 } else if (locked) {
1526 err = f2fs_get_block(&dn, index);
1527 } else {
1528 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1529 dn.data_blkaddr = ei.blk + index - ei.fofs;
1530 } else {
b4d07a3e
JK
1531 /* hole case */
1532 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
4da7bf5a 1533 if (err || dn.data_blkaddr == NULL_ADDR) {
b4d07a3e
JK
1534 f2fs_put_dnode(&dn);
1535 f2fs_lock_op(sbi);
1536 locked = true;
1537 goto restart;
1538 }
2aadac08
JK
1539 }
1540 }
b4d07a3e 1541
2aadac08
JK
1542 /* convert_inline_page can make node_changed */
1543 *blk_addr = dn.data_blkaddr;
1544 *node_changed = dn.node_changed;
b4d07a3e 1545out:
2aadac08
JK
1546 f2fs_put_dnode(&dn);
1547unlock_out:
b4d07a3e
JK
1548 if (locked)
1549 f2fs_unlock_op(sbi);
2aadac08
JK
1550 return err;
1551}
1552
eb47b800
JK
1553static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1554 loff_t pos, unsigned len, unsigned flags,
1555 struct page **pagep, void **fsdata)
1556{
1557 struct inode *inode = mapping->host;
4081363f 1558 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86531d6b 1559 struct page *page = NULL;
09cbfeaf 1560 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2aadac08
JK
1561 bool need_balance = false;
1562 block_t blkaddr = NULL_ADDR;
eb47b800
JK
1563 int err = 0;
1564
62aed044
CY
1565 trace_f2fs_write_begin(inode, pos, len, flags);
1566
5f727395
JK
1567 /*
1568 * We should check this at this moment to avoid deadlock on inode page
1569 * and #0 page. The locking rule for inline_data conversion should be:
1570 * lock_page(page #0) -> lock_page(inode_page)
1571 */
1572 if (index != 0) {
1573 err = f2fs_convert_inline_inode(inode);
1574 if (err)
1575 goto fail;
1576 }
afcb7ca0 1577repeat:
eb47b800 1578 page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f82
CY
1579 if (!page) {
1580 err = -ENOMEM;
1581 goto fail;
1582 }
d5f66990 1583
eb47b800
JK
1584 *pagep = page;
1585
2aadac08
JK
1586 err = prepare_write_begin(sbi, page, pos, len,
1587 &blkaddr, &need_balance);
9ba69cf9 1588 if (err)
2aadac08 1589 goto fail;
9ba69cf9 1590
2aadac08 1591 if (need_balance && has_not_enough_free_secs(sbi, 0)) {
2a340760 1592 unlock_page(page);
2c4db1a6 1593 f2fs_balance_fs(sbi, true);
2a340760
JK
1594 lock_page(page);
1595 if (page->mapping != mapping) {
1596 /* The page got truncated from under us */
1597 f2fs_put_page(page, 1);
1598 goto repeat;
1599 }
1600 }
1601
fec1d657 1602 f2fs_wait_on_page_writeback(page, DATA, false);
b3d208f9 1603
08b39fbd
CY
1604 /* wait for GCed encrypted page writeback */
1605 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
2aadac08 1606 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
08b39fbd 1607
09cbfeaf 1608 if (len == PAGE_SIZE)
90d4388a
CY
1609 goto out_update;
1610 if (PageUptodate(page))
1611 goto out_clear;
eb47b800 1612
09cbfeaf
KS
1613 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
1614 unsigned start = pos & (PAGE_SIZE - 1);
eb47b800
JK
1615 unsigned end = start + len;
1616
1617 /* Reading beyond i_size is simple: memset to zero */
09cbfeaf 1618 zero_user_segments(page, 0, start, end, PAGE_SIZE);
90d4388a 1619 goto out_update;
eb47b800
JK
1620 }
1621
2aadac08 1622 if (blkaddr == NEW_ADDR) {
09cbfeaf 1623 zero_user_segment(page, 0, PAGE_SIZE);
eb47b800 1624 } else {
cf04e8eb 1625 struct f2fs_io_info fio = {
05ca3632 1626 .sbi = sbi,
cf04e8eb
JK
1627 .type = DATA,
1628 .rw = READ_SYNC,
7a9d7548
CY
1629 .old_blkaddr = blkaddr,
1630 .new_blkaddr = blkaddr,
05ca3632 1631 .page = page,
4375a336 1632 .encrypted_page = NULL,
cf04e8eb 1633 };
05ca3632 1634 err = f2fs_submit_page_bio(&fio);
9234f319
JK
1635 if (err)
1636 goto fail;
d54c795b 1637
393ff91f 1638 lock_page(page);
6bacf52f 1639 if (unlikely(!PageUptodate(page))) {
3aab8f82
CY
1640 err = -EIO;
1641 goto fail;
eb47b800 1642 }
6bacf52f 1643 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1644 f2fs_put_page(page, 1);
1645 goto repeat;
eb47b800 1646 }
4375a336
JK
1647
1648 /* avoid symlink page */
1649 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
0b81d077 1650 err = fscrypt_decrypt_page(page);
86531d6b 1651 if (err)
4375a336 1652 goto fail;
4375a336 1653 }
eb47b800 1654 }
90d4388a 1655out_update:
eb47b800 1656 SetPageUptodate(page);
90d4388a 1657out_clear:
eb47b800
JK
1658 clear_cold_data(page);
1659 return 0;
9ba69cf9 1660
3aab8f82 1661fail:
86531d6b 1662 f2fs_put_page(page, 1);
3aab8f82
CY
1663 f2fs_write_failed(mapping, pos + len);
1664 return err;
eb47b800
JK
1665}
1666
a1dd3c13
JK
1667static int f2fs_write_end(struct file *file,
1668 struct address_space *mapping,
1669 loff_t pos, unsigned len, unsigned copied,
1670 struct page *page, void *fsdata)
1671{
1672 struct inode *inode = page->mapping->host;
1673
dfb2bf38
CY
1674 trace_f2fs_write_end(inode, pos, len, copied);
1675
34ba94ba 1676 set_page_dirty(page);
a1dd3c13 1677
fc9581c8
JK
1678 if (pos + copied > i_size_read(inode))
1679 f2fs_i_size_write(inode, pos + copied);
a1dd3c13 1680
75c3c8bc 1681 f2fs_put_page(page, 1);
d0239e1b 1682 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
a1dd3c13
JK
1683 return copied;
1684}
1685
6f673763
OS
1686static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1687 loff_t offset)
944fcfc1
JK
1688{
1689 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1 1690
944fcfc1
JK
1691 if (offset & blocksize_mask)
1692 return -EINVAL;
1693
5b46f25d
AV
1694 if (iov_iter_alignment(iter) & blocksize_mask)
1695 return -EINVAL;
1696
944fcfc1
JK
1697 return 0;
1698}
1699
c8b8e32d 1700static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
eb47b800 1701{
b439b103 1702 struct address_space *mapping = iocb->ki_filp->f_mapping;
3aab8f82
CY
1703 struct inode *inode = mapping->host;
1704 size_t count = iov_iter_count(iter);
c8b8e32d 1705 loff_t offset = iocb->ki_pos;
3aab8f82 1706 int err;
944fcfc1 1707
b439b103 1708 err = check_direct_IO(inode, iter, offset);
b9d777b8
JK
1709 if (err)
1710 return err;
9ffe0fb5 1711
fcc85a4d
JK
1712 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1713 return 0;
36abef4e
JK
1714 if (test_opt(F2FS_I_SB(inode), LFS))
1715 return 0;
fcc85a4d 1716
6f673763 1717 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
70407fad 1718
c8b8e32d 1719 err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
6bfc4919
JK
1720 if (iov_iter_rw(iter) == WRITE) {
1721 if (err > 0)
91942321 1722 set_inode_flag(inode, FI_UPDATE_WRITE);
6bfc4919
JK
1723 else if (err < 0)
1724 f2fs_write_failed(mapping, offset + count);
1725 }
70407fad 1726
6f673763 1727 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
70407fad 1728
3aab8f82 1729 return err;
eb47b800
JK
1730}
1731
487261f3
CY
1732void f2fs_invalidate_page(struct page *page, unsigned int offset,
1733 unsigned int length)
eb47b800
JK
1734{
1735 struct inode *inode = page->mapping->host;
487261f3 1736 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe2 1737
487261f3 1738 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
09cbfeaf 1739 (offset % PAGE_SIZE || length != PAGE_SIZE))
a7ffdbe2
JK
1740 return;
1741
487261f3
CY
1742 if (PageDirty(page)) {
1743 if (inode->i_ino == F2FS_META_INO(sbi))
1744 dec_page_count(sbi, F2FS_DIRTY_META);
1745 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1746 dec_page_count(sbi, F2FS_DIRTY_NODES);
1747 else
1748 inode_dec_dirty_pages(inode);
1749 }
decd36b6
CY
1750
1751 /* This is atomic written page, keep Private */
1752 if (IS_ATOMIC_WRITTEN_PAGE(page))
1753 return;
1754
23dc974e 1755 set_page_private(page, 0);
eb47b800
JK
1756 ClearPagePrivate(page);
1757}
1758
487261f3 1759int f2fs_release_page(struct page *page, gfp_t wait)
eb47b800 1760{
f68daeeb
JK
1761 /* If this is dirty page, keep PagePrivate */
1762 if (PageDirty(page))
1763 return 0;
1764
decd36b6
CY
1765 /* This is atomic written page, keep Private */
1766 if (IS_ATOMIC_WRITTEN_PAGE(page))
1767 return 0;
1768
23dc974e 1769 set_page_private(page, 0);
eb47b800 1770 ClearPagePrivate(page);
c3850aa1 1771 return 1;
eb47b800
JK
1772}
1773
1774static int f2fs_set_data_page_dirty(struct page *page)
1775{
1776 struct address_space *mapping = page->mapping;
1777 struct inode *inode = mapping->host;
1778
26c6b887
JK
1779 trace_f2fs_set_page_dirty(page, DATA);
1780
eb47b800 1781 SetPageUptodate(page);
34ba94ba 1782
1e84371f 1783 if (f2fs_is_atomic_file(inode)) {
decd36b6
CY
1784 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
1785 register_inmem_page(inode, page);
1786 return 1;
1787 }
1788 /*
1789 * Previously, this page has been registered, we just
1790 * return here.
1791 */
1792 return 0;
34ba94ba
JK
1793 }
1794
eb47b800
JK
1795 if (!PageDirty(page)) {
1796 __set_page_dirty_nobuffers(page);
a7ffdbe2 1797 update_dirty_page(inode, page);
eb47b800
JK
1798 return 1;
1799 }
1800 return 0;
1801}
1802
c01e54b7
JK
1803static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1804{
454ae7e5
CY
1805 struct inode *inode = mapping->host;
1806
1d373a0e
JK
1807 if (f2fs_has_inline_data(inode))
1808 return 0;
1809
1810 /* make sure allocating whole blocks */
1811 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1812 filemap_write_and_wait(mapping);
1813
e2b4e2bc 1814 return generic_block_bmap(mapping, block, get_data_block_bmap);
429511cd
CY
1815}
1816
eb47b800
JK
1817const struct address_space_operations f2fs_dblock_aops = {
1818 .readpage = f2fs_read_data_page,
1819 .readpages = f2fs_read_data_pages,
1820 .writepage = f2fs_write_data_page,
1821 .writepages = f2fs_write_data_pages,
1822 .write_begin = f2fs_write_begin,
a1dd3c13 1823 .write_end = f2fs_write_end,
eb47b800 1824 .set_page_dirty = f2fs_set_data_page_dirty,
487261f3
CY
1825 .invalidatepage = f2fs_invalidate_page,
1826 .releasepage = f2fs_release_page,
eb47b800 1827 .direct_IO = f2fs_direct_IO,
c01e54b7 1828 .bmap = f2fs_bmap,
eb47b800 1829};
This page took 0.917875 seconds and 4 git commands to generate.