]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/ext4/fsync.c | |
3 | * | |
4 | * Copyright (C) 1993 Stephen Tweedie ([email protected]) | |
5 | * from | |
6 | * Copyright (C) 1992 Remy Card ([email protected]) | |
7 | * Laboratoire MASI - Institut Blaise Pascal | |
8 | * Universite Pierre et Marie Curie (Paris VI) | |
9 | * from | |
10 | * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds | |
11 | * | |
12 | * ext4fs fsync primitive | |
13 | * | |
14 | * Big-endian to little-endian byte-swapping/bitmaps by | |
15 | * David S. Miller ([email protected]), 1995 | |
16 | * | |
17 | * Removed unnecessary code duplication for little endian machines | |
18 | * and excessive __inline__s. | |
19 | * Andi Kleen, 1997 | |
20 | * | |
21 | * Major simplications and cleanup - we only need to do the metadata, because | |
22 | * we can depend on generic_block_fdatasync() to sync the data blocks. | |
23 | */ | |
24 | ||
25 | #include <linux/time.h> | |
26 | #include <linux/fs.h> | |
27 | #include <linux/sched.h> | |
28 | #include <linux/writeback.h> | |
29 | #include <linux/jbd2.h> | |
30 | #include <linux/blkdev.h> | |
31 | ||
32 | #include "ext4.h" | |
33 | #include "ext4_jbd2.h" | |
34 | ||
35 | #include <trace/events/ext4.h> | |
36 | ||
37 | static void dump_completed_IO(struct inode * inode) | |
38 | { | |
39 | #ifdef EXT4FS_DEBUG | |
40 | struct list_head *cur, *before, *after; | |
41 | ext4_io_end_t *io, *io0, *io1; | |
42 | unsigned long flags; | |
43 | ||
44 | if (list_empty(&EXT4_I(inode)->i_completed_io_list)){ | |
45 | ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino); | |
46 | return; | |
47 | } | |
48 | ||
49 | ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino); | |
50 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); | |
51 | list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){ | |
52 | cur = &io->list; | |
53 | before = cur->prev; | |
54 | io0 = container_of(before, ext4_io_end_t, list); | |
55 | after = cur->next; | |
56 | io1 = container_of(after, ext4_io_end_t, list); | |
57 | ||
58 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", | |
59 | io, inode->i_ino, io0, io1); | |
60 | } | |
61 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); | |
62 | #endif | |
63 | } | |
64 | ||
65 | /* | |
66 | * This function is called from ext4_sync_file(). | |
67 | * | |
68 | * When IO is completed, the work to convert unwritten extents to | |
69 | * written is queued on workqueue but may not get immediately | |
70 | * scheduled. When fsync is called, we need to ensure the | |
71 | * conversion is complete before fsync returns. | |
72 | * The inode keeps track of a list of pending/completed IO that | |
73 | * might needs to do the conversion. This function walks through | |
74 | * the list and convert the related unwritten extents for completed IO | |
75 | * to written. | |
76 | * The function return the number of pending IOs on success. | |
77 | */ | |
78 | extern int ext4_flush_completed_IO(struct inode *inode) | |
79 | { | |
80 | ext4_io_end_t *io; | |
81 | struct ext4_inode_info *ei = EXT4_I(inode); | |
82 | unsigned long flags; | |
83 | int ret = 0; | |
84 | int ret2 = 0; | |
85 | ||
86 | if (list_empty(&ei->i_completed_io_list)) | |
87 | return ret; | |
88 | ||
89 | dump_completed_IO(inode); | |
90 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | |
91 | while (!list_empty(&ei->i_completed_io_list)){ | |
92 | io = list_entry(ei->i_completed_io_list.next, | |
93 | ext4_io_end_t, list); | |
94 | /* | |
95 | * Calling ext4_end_io_nolock() to convert completed | |
96 | * IO to written. | |
97 | * | |
98 | * When ext4_sync_file() is called, run_queue() may already | |
99 | * about to flush the work corresponding to this io structure. | |
100 | * It will be upset if it founds the io structure related | |
101 | * to the work-to-be schedule is freed. | |
102 | * | |
103 | * Thus we need to keep the io structure still valid here after | |
104 | * conversion finished. The io structure has a flag to | |
105 | * avoid double converting from both fsync and background work | |
106 | * queue work. | |
107 | */ | |
108 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | |
109 | ret = ext4_end_io_nolock(io); | |
110 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | |
111 | if (ret < 0) | |
112 | ret2 = ret; | |
113 | else | |
114 | list_del_init(&io->list); | |
115 | } | |
116 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | |
117 | return (ret2 < 0) ? ret2 : 0; | |
118 | } | |
119 | ||
120 | /* | |
121 | * If we're not journaling and this is a just-created file, we have to | |
122 | * sync our parent directory (if it was freshly created) since | |
123 | * otherwise it will only be written by writeback, leaving a huge | |
124 | * window during which a crash may lose the file. This may apply for | |
125 | * the parent directory's parent as well, and so on recursively, if | |
126 | * they are also freshly created. | |
127 | */ | |
128 | static int ext4_sync_parent(struct inode *inode) | |
129 | { | |
130 | struct writeback_control wbc; | |
131 | struct dentry *dentry = NULL; | |
132 | struct inode *next; | |
133 | int ret = 0; | |
134 | ||
135 | if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) | |
136 | return 0; | |
137 | inode = igrab(inode); | |
138 | while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { | |
139 | ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); | |
140 | dentry = NULL; | |
141 | spin_lock(&inode->i_lock); | |
142 | if (!list_empty(&inode->i_dentry)) { | |
143 | dentry = list_first_entry(&inode->i_dentry, | |
144 | struct dentry, d_alias); | |
145 | dget(dentry); | |
146 | } | |
147 | spin_unlock(&inode->i_lock); | |
148 | if (!dentry) | |
149 | break; | |
150 | next = igrab(dentry->d_parent->d_inode); | |
151 | dput(dentry); | |
152 | if (!next) | |
153 | break; | |
154 | iput(inode); | |
155 | inode = next; | |
156 | ret = sync_mapping_buffers(inode->i_mapping); | |
157 | if (ret) | |
158 | break; | |
159 | memset(&wbc, 0, sizeof(wbc)); | |
160 | wbc.sync_mode = WB_SYNC_ALL; | |
161 | wbc.nr_to_write = 0; /* only write out the inode */ | |
162 | ret = sync_inode(inode, &wbc); | |
163 | if (ret) | |
164 | break; | |
165 | } | |
166 | iput(inode); | |
167 | return ret; | |
168 | } | |
169 | ||
170 | /** | |
171 | * __sync_file - generic_file_fsync without the locking and filemap_write | |
172 | * @inode: inode to sync | |
173 | * @datasync: only sync essential metadata if true | |
174 | * | |
175 | * This is just generic_file_fsync without the locking. This is needed for | |
176 | * nojournal mode to make sure this inodes data/metadata makes it to disk | |
177 | * properly. The i_mutex should be held already. | |
178 | */ | |
179 | static int __sync_inode(struct inode *inode, int datasync) | |
180 | { | |
181 | int err; | |
182 | int ret; | |
183 | ||
184 | ret = sync_mapping_buffers(inode->i_mapping); | |
185 | if (!(inode->i_state & I_DIRTY)) | |
186 | return ret; | |
187 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) | |
188 | return ret; | |
189 | ||
190 | err = sync_inode_metadata(inode, 1); | |
191 | if (ret == 0) | |
192 | ret = err; | |
193 | return ret; | |
194 | } | |
195 | ||
196 | /* | |
197 | * akpm: A new design for ext4_sync_file(). | |
198 | * | |
199 | * This is only called from sys_fsync(), sys_fdatasync() and sys_msync(). | |
200 | * There cannot be a transaction open by this task. | |
201 | * Another task could have dirtied this inode. Its data can be in any | |
202 | * state in the journalling system. | |
203 | * | |
204 | * What we do is just kick off a commit and wait on it. This will snapshot the | |
205 | * inode to disk. | |
206 | * | |
207 | * i_mutex lock is held when entering and exiting this function | |
208 | */ | |
209 | ||
210 | int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |
211 | { | |
212 | struct inode *inode = file->f_mapping->host; | |
213 | struct ext4_inode_info *ei = EXT4_I(inode); | |
214 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; | |
215 | int ret; | |
216 | tid_t commit_tid; | |
217 | bool needs_barrier = false; | |
218 | ||
219 | J_ASSERT(ext4_journal_current_handle() == NULL); | |
220 | ||
221 | trace_ext4_sync_file_enter(file, datasync); | |
222 | ||
223 | ret = filemap_write_and_wait_range(inode->i_mapping, start, end); | |
224 | if (ret) | |
225 | return ret; | |
226 | mutex_lock(&inode->i_mutex); | |
227 | ||
228 | if (inode->i_sb->s_flags & MS_RDONLY) | |
229 | goto out; | |
230 | ||
231 | ret = ext4_flush_completed_IO(inode); | |
232 | if (ret < 0) | |
233 | goto out; | |
234 | ||
235 | if (!journal) { | |
236 | ret = __sync_inode(inode, datasync); | |
237 | if (!ret && !list_empty(&inode->i_dentry)) | |
238 | ret = ext4_sync_parent(inode); | |
239 | goto out; | |
240 | } | |
241 | ||
242 | /* | |
243 | * data=writeback,ordered: | |
244 | * The caller's filemap_fdatawrite()/wait will sync the data. | |
245 | * Metadata is in the journal, we wait for proper transaction to | |
246 | * commit here. | |
247 | * | |
248 | * data=journal: | |
249 | * filemap_fdatawrite won't do anything (the buffers are clean). | |
250 | * ext4_force_commit will write the file data into the journal and | |
251 | * will wait on that. | |
252 | * filemap_fdatawait() will encounter a ton of newly-dirtied pages | |
253 | * (they were dirtied by commit). But that's OK - the blocks are | |
254 | * safe in-journal, which is all fsync() needs to ensure. | |
255 | */ | |
256 | if (ext4_should_journal_data(inode)) { | |
257 | ret = ext4_force_commit(inode->i_sb); | |
258 | goto out; | |
259 | } | |
260 | ||
261 | commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; | |
262 | if (journal->j_flags & JBD2_BARRIER && | |
263 | !jbd2_trans_will_send_data_barrier(journal, commit_tid)) | |
264 | needs_barrier = true; | |
265 | jbd2_log_start_commit(journal, commit_tid); | |
266 | ret = jbd2_log_wait_commit(journal, commit_tid); | |
267 | if (needs_barrier) | |
268 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); | |
269 | out: | |
270 | mutex_unlock(&inode->i_mutex); | |
271 | trace_ext4_sync_file_exit(inode, ret); | |
272 | return ret; | |
273 | } |