]>
Commit | Line | Data |
---|---|---|
ac27a0ec | 1 | /* |
617ba13b | 2 | * linux/fs/ext4/fsync.c |
ac27a0ec DK |
3 | * |
4 | * Copyright (C) 1993 Stephen Tweedie ([email protected]) | |
5 | * from | |
6 | * Copyright (C) 1992 Remy Card ([email protected]) | |
7 | * Laboratoire MASI - Institut Blaise Pascal | |
8 | * Universite Pierre et Marie Curie (Paris VI) | |
9 | * from | |
10 | * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds | |
11 | * | |
617ba13b | 12 | * ext4fs fsync primitive |
ac27a0ec DK |
13 | * |
14 | * Big-endian to little-endian byte-swapping/bitmaps by | |
15 | * David S. Miller ([email protected]), 1995 | |
16 | * | |
17 | * Removed unnecessary code duplication for little endian machines | |
18 | * and excessive __inline__s. | |
19 | * Andi Kleen, 1997 | |
20 | * | |
21 | * Major simplications and cleanup - we only need to do the metadata, because | |
22 | * we can depend on generic_block_fdatasync() to sync the data blocks. | |
23 | */ | |
24 | ||
25 | #include <linux/time.h> | |
26 | #include <linux/fs.h> | |
27 | #include <linux/sched.h> | |
28 | #include <linux/writeback.h> | |
dab291af | 29 | #include <linux/jbd2.h> |
d755fb38 | 30 | #include <linux/blkdev.h> |
9bffad1e | 31 | |
3dcf5451 CH |
32 | #include "ext4.h" |
33 | #include "ext4_jbd2.h" | |
ac27a0ec | 34 | |
9bffad1e TT |
35 | #include <trace/events/ext4.h> |
36 | ||
4a873a47 TT |
37 | static void dump_completed_IO(struct inode * inode) |
38 | { | |
e8bbe8c4 | 39 | #ifdef EXT4FS_DEBUG |
4a873a47 TT |
40 | struct list_head *cur, *before, *after; |
41 | ext4_io_end_t *io, *io0, *io1; | |
42 | unsigned long flags; | |
43 | ||
44 | if (list_empty(&EXT4_I(inode)->i_completed_io_list)){ | |
45 | ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino); | |
46 | return; | |
47 | } | |
48 | ||
49 | ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino); | |
50 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); | |
51 | list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){ | |
52 | cur = &io->list; | |
53 | before = cur->prev; | |
54 | io0 = container_of(before, ext4_io_end_t, list); | |
55 | after = cur->next; | |
56 | io1 = container_of(after, ext4_io_end_t, list); | |
57 | ||
58 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", | |
59 | io, inode->i_ino, io0, io1); | |
60 | } | |
61 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); | |
62 | #endif | |
63 | } | |
64 | ||
65 | /* | |
66 | * This function is called from ext4_sync_file(). | |
67 | * | |
68 | * When IO is completed, the work to convert unwritten extents to | |
69 | * written is queued on workqueue but may not get immediately | |
70 | * scheduled. When fsync is called, we need to ensure the | |
71 | * conversion is complete before fsync returns. | |
72 | * The inode keeps track of a list of pending/completed IO that | |
73 | * might needs to do the conversion. This function walks through | |
74 | * the list and convert the related unwritten extents for completed IO | |
75 | * to written. | |
76 | * The function return the number of pending IOs on success. | |
77 | */ | |
3889fd57 | 78 | extern int ext4_flush_completed_IO(struct inode *inode) |
4a873a47 TT |
79 | { |
80 | ext4_io_end_t *io; | |
81 | struct ext4_inode_info *ei = EXT4_I(inode); | |
82 | unsigned long flags; | |
83 | int ret = 0; | |
84 | int ret2 = 0; | |
85 | ||
86 | if (list_empty(&ei->i_completed_io_list)) | |
87 | return ret; | |
88 | ||
89 | dump_completed_IO(inode); | |
90 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | |
91 | while (!list_empty(&ei->i_completed_io_list)){ | |
92 | io = list_entry(ei->i_completed_io_list.next, | |
93 | ext4_io_end_t, list); | |
94 | /* | |
95 | * Calling ext4_end_io_nolock() to convert completed | |
96 | * IO to written. | |
97 | * | |
98 | * When ext4_sync_file() is called, run_queue() may already | |
99 | * about to flush the work corresponding to this io structure. | |
100 | * It will be upset if it founds the io structure related | |
101 | * to the work-to-be schedule is freed. | |
102 | * | |
103 | * Thus we need to keep the io structure still valid here after | |
25985edc | 104 | * conversion finished. The io structure has a flag to |
4a873a47 TT |
105 | * avoid double converting from both fsync and background work |
106 | * queue work. | |
107 | */ | |
108 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | |
109 | ret = ext4_end_io_nolock(io); | |
110 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | |
111 | if (ret < 0) | |
112 | ret2 = ret; | |
113 | else | |
114 | list_del_init(&io->list); | |
115 | } | |
116 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | |
117 | return (ret2 < 0) ? ret2 : 0; | |
118 | } | |
119 | ||
14ece102 FM |
120 | /* |
121 | * If we're not journaling and this is a just-created file, we have to | |
122 | * sync our parent directory (if it was freshly created) since | |
123 | * otherwise it will only be written by writeback, leaving a huge | |
124 | * window during which a crash may lose the file. This may apply for | |
125 | * the parent directory's parent as well, and so on recursively, if | |
126 | * they are also freshly created. | |
127 | */ | |
0893ed45 | 128 | static int ext4_sync_parent(struct inode *inode) |
14ece102 | 129 | { |
0893ed45 | 130 | struct writeback_control wbc; |
14ece102 | 131 | struct dentry *dentry = NULL; |
d59729f4 | 132 | struct inode *next; |
0893ed45 | 133 | int ret = 0; |
14ece102 | 134 | |
d59729f4 TT |
135 | if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) |
136 | return 0; | |
137 | inode = igrab(inode); | |
138 | while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { | |
14ece102 | 139 | ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); |
d59729f4 TT |
140 | dentry = NULL; |
141 | spin_lock(&inode->i_lock); | |
142 | if (!list_empty(&inode->i_dentry)) { | |
143 | dentry = list_first_entry(&inode->i_dentry, | |
144 | struct dentry, d_alias); | |
145 | dget(dentry); | |
146 | } | |
147 | spin_unlock(&inode->i_lock); | |
148 | if (!dentry) | |
14ece102 | 149 | break; |
d59729f4 TT |
150 | next = igrab(dentry->d_parent->d_inode); |
151 | dput(dentry); | |
152 | if (!next) | |
153 | break; | |
154 | iput(inode); | |
155 | inode = next; | |
0893ed45 CW |
156 | ret = sync_mapping_buffers(inode->i_mapping); |
157 | if (ret) | |
158 | break; | |
159 | memset(&wbc, 0, sizeof(wbc)); | |
160 | wbc.sync_mode = WB_SYNC_ALL; | |
161 | wbc.nr_to_write = 0; /* only write out the inode */ | |
162 | ret = sync_inode(inode, &wbc); | |
163 | if (ret) | |
164 | break; | |
14ece102 | 165 | } |
d59729f4 | 166 | iput(inode); |
0893ed45 | 167 | return ret; |
14ece102 FM |
168 | } |
169 | ||
02c24a82 JB |
170 | /** |
171 | * __sync_file - generic_file_fsync without the locking and filemap_write | |
172 | * @inode: inode to sync | |
173 | * @datasync: only sync essential metadata if true | |
174 | * | |
175 | * This is just generic_file_fsync without the locking. This is needed for | |
176 | * nojournal mode to make sure this inodes data/metadata makes it to disk | |
177 | * properly. The i_mutex should be held already. | |
178 | */ | |
179 | static int __sync_inode(struct inode *inode, int datasync) | |
180 | { | |
181 | int err; | |
182 | int ret; | |
183 | ||
184 | ret = sync_mapping_buffers(inode->i_mapping); | |
185 | if (!(inode->i_state & I_DIRTY)) | |
186 | return ret; | |
187 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) | |
188 | return ret; | |
189 | ||
190 | err = sync_inode_metadata(inode, 1); | |
191 | if (ret == 0) | |
192 | ret = err; | |
193 | return ret; | |
194 | } | |
195 | ||
ac27a0ec | 196 | /* |
617ba13b | 197 | * akpm: A new design for ext4_sync_file(). |
ac27a0ec DK |
198 | * |
199 | * This is only called from sys_fsync(), sys_fdatasync() and sys_msync(). | |
200 | * There cannot be a transaction open by this task. | |
201 | * Another task could have dirtied this inode. Its data can be in any | |
202 | * state in the journalling system. | |
203 | * | |
204 | * What we do is just kick off a commit and wait on it. This will snapshot the | |
205 | * inode to disk. | |
8d5d02e6 MC |
206 | * |
207 | * i_mutex lock is held when entering and exiting this function | |
ac27a0ec DK |
208 | */ |
209 | ||
02c24a82 | 210 | int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
ac27a0ec | 211 | { |
7ea80859 | 212 | struct inode *inode = file->f_mapping->host; |
b436b9be | 213 | struct ext4_inode_info *ei = EXT4_I(inode); |
d755fb38 | 214 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; |
b436b9be JK |
215 | int ret; |
216 | tid_t commit_tid; | |
93628ffb | 217 | bool needs_barrier = false; |
ac27a0ec | 218 | |
ac39849d | 219 | J_ASSERT(ext4_journal_current_handle() == NULL); |
ac27a0ec | 220 | |
0562e0ba | 221 | trace_ext4_sync_file_enter(file, datasync); |
ede86cc4 | 222 | |
02c24a82 JB |
223 | ret = filemap_write_and_wait_range(inode->i_mapping, start, end); |
224 | if (ret) | |
225 | return ret; | |
226 | mutex_lock(&inode->i_mutex); | |
227 | ||
b436b9be | 228 | if (inode->i_sb->s_flags & MS_RDONLY) |
02c24a82 | 229 | goto out; |
b436b9be | 230 | |
3889fd57 | 231 | ret = ext4_flush_completed_IO(inode); |
8d5d02e6 | 232 | if (ret < 0) |
0562e0ba | 233 | goto out; |
60e6679e | 234 | |
14ece102 | 235 | if (!journal) { |
02c24a82 | 236 | ret = __sync_inode(inode, datasync); |
14ece102 | 237 | if (!ret && !list_empty(&inode->i_dentry)) |
0893ed45 | 238 | ret = ext4_sync_parent(inode); |
0562e0ba | 239 | goto out; |
14ece102 | 240 | } |
b436b9be | 241 | |
ac27a0ec | 242 | /* |
b436b9be | 243 | * data=writeback,ordered: |
ac27a0ec | 244 | * The caller's filemap_fdatawrite()/wait will sync the data. |
b436b9be JK |
245 | * Metadata is in the journal, we wait for proper transaction to |
246 | * commit here. | |
ac27a0ec DK |
247 | * |
248 | * data=journal: | |
249 | * filemap_fdatawrite won't do anything (the buffers are clean). | |
617ba13b | 250 | * ext4_force_commit will write the file data into the journal and |
ac27a0ec DK |
251 | * will wait on that. |
252 | * filemap_fdatawait() will encounter a ton of newly-dirtied pages | |
253 | * (they were dirtied by commit). But that's OK - the blocks are | |
254 | * safe in-journal, which is all fsync() needs to ensure. | |
255 | */ | |
0562e0ba JZ |
256 | if (ext4_should_journal_data(inode)) { |
257 | ret = ext4_force_commit(inode->i_sb); | |
258 | goto out; | |
259 | } | |
ac27a0ec | 260 | |
b436b9be | 261 | commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; |
93628ffb JK |
262 | if (journal->j_flags & JBD2_BARRIER && |
263 | !jbd2_trans_will_send_data_barrier(journal, commit_tid)) | |
264 | needs_barrier = true; | |
265 | jbd2_log_start_commit(journal, commit_tid); | |
266 | ret = jbd2_log_wait_commit(journal, commit_tid); | |
267 | if (needs_barrier) | |
dd3932ed | 268 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); |
0562e0ba | 269 | out: |
02c24a82 | 270 | mutex_unlock(&inode->i_mutex); |
0562e0ba | 271 | trace_ext4_sync_file_exit(inode, ret); |
ac27a0ec DK |
272 | return ret; |
273 | } |