]>
Commit | Line | Data |
---|---|---|
1e51764a AB |
1 | /* |
2 | * This file is part of UBIFS. | |
3 | * | |
4 | * Copyright (C) 2006-2008 Nokia Corporation. | |
5 | * Copyright (C) 2006, 2007 University of Szeged, Hungary | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License version 2 as published by | |
9 | * the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program; if not, write to the Free Software Foundation, Inc., 51 | |
18 | * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | * Authors: Artem Bityutskiy (Битюцкий Артём) | |
21 | * Adrian Hunter | |
22 | * Zoltan Sogor | |
23 | */ | |
24 | ||
25 | /* | |
26 | * This file implements UBIFS I/O subsystem which provides various I/O-related | |
27 | * helper functions (reading/writing/checking/validating nodes) and implements | |
28 | * write-buffering support. Write buffers help to save space which otherwise | |
29 | * would have been wasted for padding to the nearest minimal I/O unit boundary. | |
30 | * Instead, data first goes to the write-buffer and is flushed when the | |
31 | * buffer is full or when it is not used for some time (by timer). This is | |
32 | * similarto the mechanism is used by JFFS2. | |
33 | * | |
34 | * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by | |
35 | * mutexes defined inside these objects. Since sometimes upper-level code | |
36 | * has to lock the write-buffer (e.g. journal space reservation code), many | |
37 | * functions related to write-buffers have "nolock" suffix which means that the | |
38 | * caller has to lock the write-buffer before calling this function. | |
39 | * | |
40 | * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not | |
41 | * aligned, UBIFS starts the next node from the aligned address, and the padded | |
42 | * bytes may contain any rubbish. In other words, UBIFS does not put padding | |
43 | * bytes in those small gaps. Common headers of nodes store real node lengths, | |
44 | * not aligned lengths. Indexing nodes also store real lengths in branches. | |
45 | * | |
46 | * UBIFS uses padding when it pads to the next min. I/O unit. In this case it | |
47 | * uses padding nodes or padding bytes, if the padding node does not fit. | |
48 | * | |
49 | * All UBIFS nodes are protected by CRC checksums and UBIFS checks all nodes | |
50 | * every time they are read from the flash media. | |
51 | */ | |
52 | ||
53 | #include <linux/crc32.h> | |
54 | #include "ubifs.h" | |
55 | ||
ff46d7b3 AH |
56 | /** |
57 | * ubifs_ro_mode - switch UBIFS to read read-only mode. | |
58 | * @c: UBIFS file-system description object | |
59 | * @err: error code which is the reason of switching to R/O mode | |
60 | */ | |
61 | void ubifs_ro_mode(struct ubifs_info *c, int err) | |
62 | { | |
63 | if (!c->ro_media) { | |
64 | c->ro_media = 1; | |
ccb3eba7 | 65 | c->no_chk_data_crc = 0; |
ff46d7b3 AH |
66 | ubifs_warn("switched to read-only mode, error %d", err); |
67 | dbg_dump_stack(); | |
68 | } | |
69 | } | |
70 | ||
1e51764a AB |
71 | /** |
72 | * ubifs_check_node - check node. | |
73 | * @c: UBIFS file-system description object | |
74 | * @buf: node to check | |
75 | * @lnum: logical eraseblock number | |
76 | * @offs: offset within the logical eraseblock | |
77 | * @quiet: print no messages | |
2953e73f | 78 | * @chk_crc: indicates whether to always check the CRC |
1e51764a AB |
79 | * |
80 | * This function checks node magic number and CRC checksum. This function also | |
81 | * validates node length to prevent UBIFS from becoming crazy when an attacker | |
82 | * feeds it a file-system image with incorrect nodes. For example, too large | |
83 | * node length in the common header could cause UBIFS to read memory outside of | |
84 | * allocated buffer when checking the CRC checksum. | |
85 | * | |
86 | * This function returns zero in case of success %-EUCLEAN in case of bad CRC | |
87 | * or magic. | |
88 | */ | |
89 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, | |
2953e73f | 90 | int offs, int quiet, int chk_crc) |
1e51764a AB |
91 | { |
92 | int err = -EINVAL, type, node_len; | |
93 | uint32_t crc, node_crc, magic; | |
94 | const struct ubifs_ch *ch = buf; | |
95 | ||
96 | ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); | |
97 | ubifs_assert(!(offs & 7) && offs < c->leb_size); | |
98 | ||
99 | magic = le32_to_cpu(ch->magic); | |
100 | if (magic != UBIFS_NODE_MAGIC) { | |
101 | if (!quiet) | |
102 | ubifs_err("bad magic %#08x, expected %#08x", | |
103 | magic, UBIFS_NODE_MAGIC); | |
104 | err = -EUCLEAN; | |
105 | goto out; | |
106 | } | |
107 | ||
108 | type = ch->node_type; | |
109 | if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { | |
110 | if (!quiet) | |
111 | ubifs_err("bad node type %d", type); | |
112 | goto out; | |
113 | } | |
114 | ||
115 | node_len = le32_to_cpu(ch->len); | |
116 | if (node_len + offs > c->leb_size) | |
117 | goto out_len; | |
118 | ||
119 | if (c->ranges[type].max_len == 0) { | |
120 | if (node_len != c->ranges[type].len) | |
121 | goto out_len; | |
122 | } else if (node_len < c->ranges[type].min_len || | |
123 | node_len > c->ranges[type].max_len) | |
124 | goto out_len; | |
125 | ||
2953e73f AH |
126 | if (!chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc) |
127 | if (c->no_chk_data_crc) | |
128 | return 0; | |
129 | ||
1e51764a AB |
130 | crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); |
131 | node_crc = le32_to_cpu(ch->crc); | |
132 | if (crc != node_crc) { | |
133 | if (!quiet) | |
134 | ubifs_err("bad CRC: calculated %#08x, read %#08x", | |
135 | crc, node_crc); | |
136 | err = -EUCLEAN; | |
137 | goto out; | |
138 | } | |
139 | ||
140 | return 0; | |
141 | ||
142 | out_len: | |
143 | if (!quiet) | |
144 | ubifs_err("bad node length %d", node_len); | |
145 | out: | |
146 | if (!quiet) { | |
147 | ubifs_err("bad node at LEB %d:%d", lnum, offs); | |
148 | dbg_dump_node(c, buf); | |
149 | dbg_dump_stack(); | |
150 | } | |
151 | return err; | |
152 | } | |
153 | ||
154 | /** | |
155 | * ubifs_pad - pad flash space. | |
156 | * @c: UBIFS file-system description object | |
157 | * @buf: buffer to put padding to | |
158 | * @pad: how many bytes to pad | |
159 | * | |
160 | * The flash media obliges us to write only in chunks of %c->min_io_size and | |
161 | * when we have to write less data we add padding node to the write-buffer and | |
162 | * pad it to the next minimal I/O unit's boundary. Padding nodes help when the | |
163 | * media is being scanned. If the amount of wasted space is not enough to fit a | |
164 | * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes | |
165 | * pattern (%UBIFS_PADDING_BYTE). | |
166 | * | |
167 | * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is | |
168 | * used. | |
169 | */ | |
170 | void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) | |
171 | { | |
172 | uint32_t crc; | |
173 | ||
174 | ubifs_assert(pad >= 0 && !(pad & 7)); | |
175 | ||
176 | if (pad >= UBIFS_PAD_NODE_SZ) { | |
177 | struct ubifs_ch *ch = buf; | |
178 | struct ubifs_pad_node *pad_node = buf; | |
179 | ||
180 | ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); | |
181 | ch->node_type = UBIFS_PAD_NODE; | |
182 | ch->group_type = UBIFS_NO_NODE_GROUP; | |
183 | ch->padding[0] = ch->padding[1] = 0; | |
184 | ch->sqnum = 0; | |
185 | ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ); | |
186 | pad -= UBIFS_PAD_NODE_SZ; | |
187 | pad_node->pad_len = cpu_to_le32(pad); | |
188 | crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8); | |
189 | ch->crc = cpu_to_le32(crc); | |
190 | memset(buf + UBIFS_PAD_NODE_SZ, 0, pad); | |
191 | } else if (pad > 0) | |
192 | /* Too little space, padding node won't fit */ | |
193 | memset(buf, UBIFS_PADDING_BYTE, pad); | |
194 | } | |
195 | ||
196 | /** | |
197 | * next_sqnum - get next sequence number. | |
198 | * @c: UBIFS file-system description object | |
199 | */ | |
200 | static unsigned long long next_sqnum(struct ubifs_info *c) | |
201 | { | |
202 | unsigned long long sqnum; | |
203 | ||
204 | spin_lock(&c->cnt_lock); | |
205 | sqnum = ++c->max_sqnum; | |
206 | spin_unlock(&c->cnt_lock); | |
207 | ||
208 | if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) { | |
209 | if (sqnum >= SQNUM_WATERMARK) { | |
210 | ubifs_err("sequence number overflow %llu, end of life", | |
211 | sqnum); | |
212 | ubifs_ro_mode(c, -EINVAL); | |
213 | } | |
214 | ubifs_warn("running out of sequence numbers, end of life soon"); | |
215 | } | |
216 | ||
217 | return sqnum; | |
218 | } | |
219 | ||
220 | /** | |
221 | * ubifs_prepare_node - prepare node to be written to flash. | |
222 | * @c: UBIFS file-system description object | |
223 | * @node: the node to pad | |
224 | * @len: node length | |
225 | * @pad: if the buffer has to be padded | |
226 | * | |
227 | * This function prepares node at @node to be written to the media - it | |
228 | * calculates node CRC, fills the common header, and adds proper padding up to | |
229 | * the next minimum I/O unit if @pad is not zero. | |
230 | */ | |
231 | void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) | |
232 | { | |
233 | uint32_t crc; | |
234 | struct ubifs_ch *ch = node; | |
235 | unsigned long long sqnum = next_sqnum(c); | |
236 | ||
237 | ubifs_assert(len >= UBIFS_CH_SZ); | |
238 | ||
239 | ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); | |
240 | ch->len = cpu_to_le32(len); | |
241 | ch->group_type = UBIFS_NO_NODE_GROUP; | |
242 | ch->sqnum = cpu_to_le64(sqnum); | |
243 | ch->padding[0] = ch->padding[1] = 0; | |
244 | crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); | |
245 | ch->crc = cpu_to_le32(crc); | |
246 | ||
247 | if (pad) { | |
248 | len = ALIGN(len, 8); | |
249 | pad = ALIGN(len, c->min_io_size) - len; | |
250 | ubifs_pad(c, node + len, pad); | |
251 | } | |
252 | } | |
253 | ||
254 | /** | |
255 | * ubifs_prep_grp_node - prepare node of a group to be written to flash. | |
256 | * @c: UBIFS file-system description object | |
257 | * @node: the node to pad | |
258 | * @len: node length | |
259 | * @last: indicates the last node of the group | |
260 | * | |
261 | * This function prepares node at @node to be written to the media - it | |
262 | * calculates node CRC and fills the common header. | |
263 | */ | |
264 | void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) | |
265 | { | |
266 | uint32_t crc; | |
267 | struct ubifs_ch *ch = node; | |
268 | unsigned long long sqnum = next_sqnum(c); | |
269 | ||
270 | ubifs_assert(len >= UBIFS_CH_SZ); | |
271 | ||
272 | ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); | |
273 | ch->len = cpu_to_le32(len); | |
274 | if (last) | |
275 | ch->group_type = UBIFS_LAST_OF_NODE_GROUP; | |
276 | else | |
277 | ch->group_type = UBIFS_IN_NODE_GROUP; | |
278 | ch->sqnum = cpu_to_le64(sqnum); | |
279 | ch->padding[0] = ch->padding[1] = 0; | |
280 | crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); | |
281 | ch->crc = cpu_to_le32(crc); | |
282 | } | |
283 | ||
284 | /** | |
285 | * wbuf_timer_callback - write-buffer timer callback function. | |
286 | * @data: timer data (write-buffer descriptor) | |
287 | * | |
288 | * This function is called when the write-buffer timer expires. | |
289 | */ | |
290 | static void wbuf_timer_callback_nolock(unsigned long data) | |
291 | { | |
292 | struct ubifs_wbuf *wbuf = (struct ubifs_wbuf *)data; | |
293 | ||
294 | wbuf->need_sync = 1; | |
295 | wbuf->c->need_wbuf_sync = 1; | |
296 | ubifs_wake_up_bgt(wbuf->c); | |
297 | } | |
298 | ||
299 | /** | |
300 | * new_wbuf_timer - start new write-buffer timer. | |
301 | * @wbuf: write-buffer descriptor | |
302 | */ | |
303 | static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) | |
304 | { | |
305 | ubifs_assert(!timer_pending(&wbuf->timer)); | |
306 | ||
307 | if (!wbuf->timeout) | |
308 | return; | |
309 | ||
310 | wbuf->timer.expires = jiffies + wbuf->timeout; | |
311 | add_timer(&wbuf->timer); | |
312 | } | |
313 | ||
314 | /** | |
315 | * cancel_wbuf_timer - cancel write-buffer timer. | |
316 | * @wbuf: write-buffer descriptor | |
317 | */ | |
318 | static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) | |
319 | { | |
320 | /* | |
321 | * If the syncer is waiting for the lock (from the background thread's | |
322 | * context) and another task is changing write-buffer then the syncing | |
323 | * should be canceled. | |
324 | */ | |
325 | wbuf->need_sync = 0; | |
326 | del_timer(&wbuf->timer); | |
327 | } | |
328 | ||
329 | /** | |
330 | * ubifs_wbuf_sync_nolock - synchronize write-buffer. | |
331 | * @wbuf: write-buffer to synchronize | |
332 | * | |
333 | * This function synchronizes write-buffer @buf and returns zero in case of | |
334 | * success or a negative error code in case of failure. | |
335 | */ | |
336 | int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) | |
337 | { | |
338 | struct ubifs_info *c = wbuf->c; | |
339 | int err, dirt; | |
340 | ||
341 | cancel_wbuf_timer_nolock(wbuf); | |
342 | if (!wbuf->used || wbuf->lnum == -1) | |
343 | /* Write-buffer is empty or not seeked */ | |
344 | return 0; | |
345 | ||
346 | dbg_io("LEB %d:%d, %d bytes", | |
347 | wbuf->lnum, wbuf->offs, wbuf->used); | |
348 | ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); | |
349 | ubifs_assert(!(wbuf->avail & 7)); | |
350 | ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); | |
351 | ||
352 | if (c->ro_media) | |
353 | return -EROFS; | |
354 | ||
355 | ubifs_pad(c, wbuf->buf + wbuf->used, wbuf->avail); | |
356 | err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, | |
357 | c->min_io_size, wbuf->dtype); | |
358 | if (err) { | |
359 | ubifs_err("cannot write %d bytes to LEB %d:%d", | |
360 | c->min_io_size, wbuf->lnum, wbuf->offs); | |
361 | dbg_dump_stack(); | |
362 | return err; | |
363 | } | |
364 | ||
365 | dirt = wbuf->avail; | |
366 | ||
367 | spin_lock(&wbuf->lock); | |
368 | wbuf->offs += c->min_io_size; | |
369 | wbuf->avail = c->min_io_size; | |
370 | wbuf->used = 0; | |
371 | wbuf->next_ino = 0; | |
372 | spin_unlock(&wbuf->lock); | |
373 | ||
374 | if (wbuf->sync_callback) | |
375 | err = wbuf->sync_callback(c, wbuf->lnum, | |
376 | c->leb_size - wbuf->offs, dirt); | |
377 | return err; | |
378 | } | |
379 | ||
380 | /** | |
381 | * ubifs_wbuf_seek_nolock - seek write-buffer. | |
382 | * @wbuf: write-buffer | |
383 | * @lnum: logical eraseblock number to seek to | |
384 | * @offs: logical eraseblock offset to seek to | |
385 | * @dtype: data type | |
386 | * | |
387 | * This function targets the write buffer to logical eraseblock @lnum:@offs. | |
388 | * The write-buffer is synchronized if it is not empty. Returns zero in case of | |
389 | * success and a negative error code in case of failure. | |
390 | */ | |
391 | int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, | |
392 | int dtype) | |
393 | { | |
394 | const struct ubifs_info *c = wbuf->c; | |
395 | ||
396 | dbg_io("LEB %d:%d", lnum, offs); | |
397 | ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); | |
398 | ubifs_assert(offs >= 0 && offs <= c->leb_size); | |
399 | ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); | |
400 | ubifs_assert(lnum != wbuf->lnum); | |
401 | ||
402 | if (wbuf->used > 0) { | |
403 | int err = ubifs_wbuf_sync_nolock(wbuf); | |
404 | ||
405 | if (err) | |
406 | return err; | |
407 | } | |
408 | ||
409 | spin_lock(&wbuf->lock); | |
410 | wbuf->lnum = lnum; | |
411 | wbuf->offs = offs; | |
412 | wbuf->avail = c->min_io_size; | |
413 | wbuf->used = 0; | |
414 | spin_unlock(&wbuf->lock); | |
415 | wbuf->dtype = dtype; | |
416 | ||
417 | return 0; | |
418 | } | |
419 | ||
420 | /** | |
421 | * ubifs_bg_wbufs_sync - synchronize write-buffers. | |
422 | * @c: UBIFS file-system description object | |
423 | * | |
424 | * This function is called by background thread to synchronize write-buffers. | |
425 | * Returns zero in case of success and a negative error code in case of | |
426 | * failure. | |
427 | */ | |
428 | int ubifs_bg_wbufs_sync(struct ubifs_info *c) | |
429 | { | |
430 | int err, i; | |
431 | ||
432 | if (!c->need_wbuf_sync) | |
433 | return 0; | |
434 | c->need_wbuf_sync = 0; | |
435 | ||
436 | if (c->ro_media) { | |
437 | err = -EROFS; | |
438 | goto out_timers; | |
439 | } | |
440 | ||
441 | dbg_io("synchronize"); | |
442 | for (i = 0; i < c->jhead_cnt; i++) { | |
443 | struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; | |
444 | ||
445 | cond_resched(); | |
446 | ||
447 | /* | |
448 | * If the mutex is locked then wbuf is being changed, so | |
449 | * synchronization is not necessary. | |
450 | */ | |
451 | if (mutex_is_locked(&wbuf->io_mutex)) | |
452 | continue; | |
453 | ||
454 | mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); | |
455 | if (!wbuf->need_sync) { | |
456 | mutex_unlock(&wbuf->io_mutex); | |
457 | continue; | |
458 | } | |
459 | ||
460 | err = ubifs_wbuf_sync_nolock(wbuf); | |
461 | mutex_unlock(&wbuf->io_mutex); | |
462 | if (err) { | |
463 | ubifs_err("cannot sync write-buffer, error %d", err); | |
464 | ubifs_ro_mode(c, err); | |
465 | goto out_timers; | |
466 | } | |
467 | } | |
468 | ||
469 | return 0; | |
470 | ||
471 | out_timers: | |
472 | /* Cancel all timers to prevent repeated errors */ | |
473 | for (i = 0; i < c->jhead_cnt; i++) { | |
474 | struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; | |
475 | ||
476 | mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); | |
477 | cancel_wbuf_timer_nolock(wbuf); | |
478 | mutex_unlock(&wbuf->io_mutex); | |
479 | } | |
480 | return err; | |
481 | } | |
482 | ||
483 | /** | |
484 | * ubifs_wbuf_write_nolock - write data to flash via write-buffer. | |
485 | * @wbuf: write-buffer | |
486 | * @buf: node to write | |
487 | * @len: node length | |
488 | * | |
489 | * This function writes data to flash via write-buffer @wbuf. This means that | |
490 | * the last piece of the node won't reach the flash media immediately if it | |
491 | * does not take whole minimal I/O unit. Instead, the node will sit in RAM | |
492 | * until the write-buffer is synchronized (e.g., by timer). | |
493 | * | |
494 | * This function returns zero in case of success and a negative error code in | |
495 | * case of failure. If the node cannot be written because there is no more | |
496 | * space in this logical eraseblock, %-ENOSPC is returned. | |
497 | */ | |
498 | int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) | |
499 | { | |
500 | struct ubifs_info *c = wbuf->c; | |
501 | int err, written, n, aligned_len = ALIGN(len, 8), offs; | |
502 | ||
503 | dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len, | |
504 | dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum, | |
505 | wbuf->offs + wbuf->used); | |
506 | ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); | |
507 | ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); | |
508 | ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); | |
509 | ubifs_assert(wbuf->avail > 0 && wbuf->avail <= c->min_io_size); | |
510 | ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); | |
511 | ||
512 | if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { | |
513 | err = -ENOSPC; | |
514 | goto out; | |
515 | } | |
516 | ||
517 | cancel_wbuf_timer_nolock(wbuf); | |
518 | ||
519 | if (c->ro_media) | |
520 | return -EROFS; | |
521 | ||
522 | if (aligned_len <= wbuf->avail) { | |
523 | /* | |
524 | * The node is not very large and fits entirely within | |
525 | * write-buffer. | |
526 | */ | |
527 | memcpy(wbuf->buf + wbuf->used, buf, len); | |
528 | ||
529 | if (aligned_len == wbuf->avail) { | |
530 | dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, | |
531 | wbuf->offs); | |
532 | err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, | |
533 | wbuf->offs, c->min_io_size, | |
534 | wbuf->dtype); | |
535 | if (err) | |
536 | goto out; | |
537 | ||
538 | spin_lock(&wbuf->lock); | |
539 | wbuf->offs += c->min_io_size; | |
540 | wbuf->avail = c->min_io_size; | |
541 | wbuf->used = 0; | |
542 | wbuf->next_ino = 0; | |
543 | spin_unlock(&wbuf->lock); | |
544 | } else { | |
545 | spin_lock(&wbuf->lock); | |
546 | wbuf->avail -= aligned_len; | |
547 | wbuf->used += aligned_len; | |
548 | spin_unlock(&wbuf->lock); | |
549 | } | |
550 | ||
551 | goto exit; | |
552 | } | |
553 | ||
554 | /* | |
555 | * The node is large enough and does not fit entirely within current | |
556 | * minimal I/O unit. We have to fill and flush write-buffer and switch | |
557 | * to the next min. I/O unit. | |
558 | */ | |
559 | dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs); | |
560 | memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); | |
561 | err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, | |
562 | c->min_io_size, wbuf->dtype); | |
563 | if (err) | |
564 | goto out; | |
565 | ||
566 | offs = wbuf->offs + c->min_io_size; | |
567 | len -= wbuf->avail; | |
568 | aligned_len -= wbuf->avail; | |
569 | written = wbuf->avail; | |
570 | ||
571 | /* | |
572 | * The remaining data may take more whole min. I/O units, so write the | |
573 | * remains multiple to min. I/O unit size directly to the flash media. | |
574 | * We align node length to 8-byte boundary because we anyway flash wbuf | |
575 | * if the remaining space is less than 8 bytes. | |
576 | */ | |
577 | n = aligned_len >> c->min_io_shift; | |
578 | if (n) { | |
579 | n <<= c->min_io_shift; | |
580 | dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, offs); | |
581 | err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written, offs, n, | |
582 | wbuf->dtype); | |
583 | if (err) | |
584 | goto out; | |
585 | offs += n; | |
586 | aligned_len -= n; | |
587 | len -= n; | |
588 | written += n; | |
589 | } | |
590 | ||
591 | spin_lock(&wbuf->lock); | |
592 | if (aligned_len) | |
593 | /* | |
594 | * And now we have what's left and what does not take whole | |
595 | * min. I/O unit, so write it to the write-buffer and we are | |
596 | * done. | |
597 | */ | |
598 | memcpy(wbuf->buf, buf + written, len); | |
599 | ||
600 | wbuf->offs = offs; | |
601 | wbuf->used = aligned_len; | |
602 | wbuf->avail = c->min_io_size - aligned_len; | |
603 | wbuf->next_ino = 0; | |
604 | spin_unlock(&wbuf->lock); | |
605 | ||
606 | exit: | |
607 | if (wbuf->sync_callback) { | |
608 | int free = c->leb_size - wbuf->offs - wbuf->used; | |
609 | ||
610 | err = wbuf->sync_callback(c, wbuf->lnum, free, 0); | |
611 | if (err) | |
612 | goto out; | |
613 | } | |
614 | ||
615 | if (wbuf->used) | |
616 | new_wbuf_timer_nolock(wbuf); | |
617 | ||
618 | return 0; | |
619 | ||
620 | out: | |
621 | ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", | |
622 | len, wbuf->lnum, wbuf->offs, err); | |
623 | dbg_dump_node(c, buf); | |
624 | dbg_dump_stack(); | |
625 | dbg_dump_leb(c, wbuf->lnum); | |
626 | return err; | |
627 | } | |
628 | ||
629 | /** | |
630 | * ubifs_write_node - write node to the media. | |
631 | * @c: UBIFS file-system description object | |
632 | * @buf: the node to write | |
633 | * @len: node length | |
634 | * @lnum: logical eraseblock number | |
635 | * @offs: offset within the logical eraseblock | |
636 | * @dtype: node life-time hint (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN) | |
637 | * | |
638 | * This function automatically fills node magic number, assigns sequence | |
639 | * number, and calculates node CRC checksum. The length of the @buf buffer has | |
640 | * to be aligned to the minimal I/O unit size. This function automatically | |
641 | * appends padding node and padding bytes if needed. Returns zero in case of | |
642 | * success and a negative error code in case of failure. | |
643 | */ | |
644 | int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, | |
645 | int offs, int dtype) | |
646 | { | |
647 | int err, buf_len = ALIGN(len, c->min_io_size); | |
648 | ||
649 | dbg_io("LEB %d:%d, %s, length %d (aligned %d)", | |
650 | lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len, | |
651 | buf_len); | |
652 | ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); | |
653 | ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); | |
654 | ||
655 | if (c->ro_media) | |
656 | return -EROFS; | |
657 | ||
658 | ubifs_prepare_node(c, buf, len, 1); | |
659 | err = ubi_leb_write(c->ubi, lnum, buf, offs, buf_len, dtype); | |
660 | if (err) { | |
661 | ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", | |
662 | buf_len, lnum, offs, err); | |
663 | dbg_dump_node(c, buf); | |
664 | dbg_dump_stack(); | |
665 | } | |
666 | ||
667 | return err; | |
668 | } | |
669 | ||
670 | /** | |
671 | * ubifs_read_node_wbuf - read node from the media or write-buffer. | |
672 | * @wbuf: wbuf to check for un-written data | |
673 | * @buf: buffer to read to | |
674 | * @type: node type | |
675 | * @len: node length | |
676 | * @lnum: logical eraseblock number | |
677 | * @offs: offset within the logical eraseblock | |
678 | * | |
679 | * This function reads a node of known type and length, checks it and stores | |
680 | * in @buf. If the node partially or fully sits in the write-buffer, this | |
681 | * function takes data from the buffer, otherwise it reads the flash media. | |
682 | * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative | |
683 | * error code in case of failure. | |
684 | */ | |
685 | int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, | |
686 | int lnum, int offs) | |
687 | { | |
688 | const struct ubifs_info *c = wbuf->c; | |
689 | int err, rlen, overlap; | |
690 | struct ubifs_ch *ch = buf; | |
691 | ||
692 | dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); | |
693 | ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); | |
694 | ubifs_assert(!(offs & 7) && offs < c->leb_size); | |
695 | ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); | |
696 | ||
697 | spin_lock(&wbuf->lock); | |
698 | overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); | |
699 | if (!overlap) { | |
700 | /* We may safely unlock the write-buffer and read the data */ | |
701 | spin_unlock(&wbuf->lock); | |
702 | return ubifs_read_node(c, buf, type, len, lnum, offs); | |
703 | } | |
704 | ||
705 | /* Don't read under wbuf */ | |
706 | rlen = wbuf->offs - offs; | |
707 | if (rlen < 0) | |
708 | rlen = 0; | |
709 | ||
710 | /* Copy the rest from the write-buffer */ | |
711 | memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); | |
712 | spin_unlock(&wbuf->lock); | |
713 | ||
714 | if (rlen > 0) { | |
715 | /* Read everything that goes before write-buffer */ | |
716 | err = ubi_read(c->ubi, lnum, buf, offs, rlen); | |
717 | if (err && err != -EBADMSG) { | |
718 | ubifs_err("failed to read node %d from LEB %d:%d, " | |
719 | "error %d", type, lnum, offs, err); | |
720 | dbg_dump_stack(); | |
721 | return err; | |
722 | } | |
723 | } | |
724 | ||
725 | if (type != ch->node_type) { | |
726 | ubifs_err("bad node type (%d but expected %d)", | |
727 | ch->node_type, type); | |
728 | goto out; | |
729 | } | |
730 | ||
2953e73f | 731 | err = ubifs_check_node(c, buf, lnum, offs, 0, 0); |
1e51764a AB |
732 | if (err) { |
733 | ubifs_err("expected node type %d", type); | |
734 | return err; | |
735 | } | |
736 | ||
737 | rlen = le32_to_cpu(ch->len); | |
738 | if (rlen != len) { | |
739 | ubifs_err("bad node length %d, expected %d", rlen, len); | |
740 | goto out; | |
741 | } | |
742 | ||
743 | return 0; | |
744 | ||
745 | out: | |
746 | ubifs_err("bad node at LEB %d:%d", lnum, offs); | |
747 | dbg_dump_node(c, buf); | |
748 | dbg_dump_stack(); | |
749 | return -EINVAL; | |
750 | } | |
751 | ||
752 | /** | |
753 | * ubifs_read_node - read node. | |
754 | * @c: UBIFS file-system description object | |
755 | * @buf: buffer to read to | |
756 | * @type: node type | |
757 | * @len: node length (not aligned) | |
758 | * @lnum: logical eraseblock number | |
759 | * @offs: offset within the logical eraseblock | |
760 | * | |
761 | * This function reads a node of known type and and length, checks it and | |
762 | * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched | |
763 | * and a negative error code in case of failure. | |
764 | */ | |
765 | int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, | |
766 | int lnum, int offs) | |
767 | { | |
768 | int err, l; | |
769 | struct ubifs_ch *ch = buf; | |
770 | ||
771 | dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); | |
772 | ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); | |
773 | ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size); | |
774 | ubifs_assert(!(offs & 7) && offs < c->leb_size); | |
775 | ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); | |
776 | ||
777 | err = ubi_read(c->ubi, lnum, buf, offs, len); | |
778 | if (err && err != -EBADMSG) { | |
779 | ubifs_err("cannot read node %d from LEB %d:%d, error %d", | |
780 | type, lnum, offs, err); | |
781 | return err; | |
782 | } | |
783 | ||
784 | if (type != ch->node_type) { | |
785 | ubifs_err("bad node type (%d but expected %d)", | |
786 | ch->node_type, type); | |
787 | goto out; | |
788 | } | |
789 | ||
2953e73f | 790 | err = ubifs_check_node(c, buf, lnum, offs, 0, 0); |
1e51764a AB |
791 | if (err) { |
792 | ubifs_err("expected node type %d", type); | |
793 | return err; | |
794 | } | |
795 | ||
796 | l = le32_to_cpu(ch->len); | |
797 | if (l != len) { | |
798 | ubifs_err("bad node length %d, expected %d", l, len); | |
799 | goto out; | |
800 | } | |
801 | ||
802 | return 0; | |
803 | ||
804 | out: | |
805 | ubifs_err("bad node at LEB %d:%d", lnum, offs); | |
806 | dbg_dump_node(c, buf); | |
807 | dbg_dump_stack(); | |
808 | return -EINVAL; | |
809 | } | |
810 | ||
811 | /** | |
812 | * ubifs_wbuf_init - initialize write-buffer. | |
813 | * @c: UBIFS file-system description object | |
814 | * @wbuf: write-buffer to initialize | |
815 | * | |
816 | * This function initializes write buffer. Returns zero in case of success | |
817 | * %-ENOMEM in case of failure. | |
818 | */ | |
819 | int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) | |
820 | { | |
821 | size_t size; | |
822 | ||
823 | wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL); | |
824 | if (!wbuf->buf) | |
825 | return -ENOMEM; | |
826 | ||
827 | size = (c->min_io_size / UBIFS_CH_SZ + 1) * sizeof(ino_t); | |
828 | wbuf->inodes = kmalloc(size, GFP_KERNEL); | |
829 | if (!wbuf->inodes) { | |
830 | kfree(wbuf->buf); | |
831 | wbuf->buf = NULL; | |
832 | return -ENOMEM; | |
833 | } | |
834 | ||
835 | wbuf->used = 0; | |
836 | wbuf->lnum = wbuf->offs = -1; | |
837 | wbuf->avail = c->min_io_size; | |
838 | wbuf->dtype = UBI_UNKNOWN; | |
839 | wbuf->sync_callback = NULL; | |
840 | mutex_init(&wbuf->io_mutex); | |
841 | spin_lock_init(&wbuf->lock); | |
842 | ||
843 | wbuf->c = c; | |
844 | init_timer(&wbuf->timer); | |
845 | wbuf->timer.function = wbuf_timer_callback_nolock; | |
846 | wbuf->timer.data = (unsigned long)wbuf; | |
847 | wbuf->timeout = DEFAULT_WBUF_TIMEOUT; | |
848 | wbuf->next_ino = 0; | |
849 | ||
850 | return 0; | |
851 | } | |
852 | ||
853 | /** | |
854 | * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array. | |
855 | * @wbuf: the write-buffer whereto add | |
856 | * @inum: the inode number | |
857 | * | |
858 | * This function adds an inode number to the inode array of the write-buffer. | |
859 | */ | |
860 | void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum) | |
861 | { | |
862 | if (!wbuf->buf) | |
863 | /* NOR flash or something similar */ | |
864 | return; | |
865 | ||
866 | spin_lock(&wbuf->lock); | |
867 | if (wbuf->used) | |
868 | wbuf->inodes[wbuf->next_ino++] = inum; | |
869 | spin_unlock(&wbuf->lock); | |
870 | } | |
871 | ||
872 | /** | |
873 | * wbuf_has_ino - returns if the wbuf contains data from the inode. | |
874 | * @wbuf: the write-buffer | |
875 | * @inum: the inode number | |
876 | * | |
877 | * This function returns with %1 if the write-buffer contains some data from the | |
878 | * given inode otherwise it returns with %0. | |
879 | */ | |
880 | static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum) | |
881 | { | |
882 | int i, ret = 0; | |
883 | ||
884 | spin_lock(&wbuf->lock); | |
885 | for (i = 0; i < wbuf->next_ino; i++) | |
886 | if (inum == wbuf->inodes[i]) { | |
887 | ret = 1; | |
888 | break; | |
889 | } | |
890 | spin_unlock(&wbuf->lock); | |
891 | ||
892 | return ret; | |
893 | } | |
894 | ||
895 | /** | |
896 | * ubifs_sync_wbufs_by_inode - synchronize write-buffers for an inode. | |
897 | * @c: UBIFS file-system description object | |
898 | * @inode: inode to synchronize | |
899 | * | |
900 | * This function synchronizes write-buffers which contain nodes belonging to | |
901 | * @inode. Returns zero in case of success and a negative error code in case of | |
902 | * failure. | |
903 | */ | |
904 | int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode) | |
905 | { | |
906 | int i, err = 0; | |
907 | ||
908 | for (i = 0; i < c->jhead_cnt; i++) { | |
909 | struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; | |
910 | ||
911 | if (i == GCHD) | |
912 | /* | |
913 | * GC head is special, do not look at it. Even if the | |
914 | * head contains something related to this inode, it is | |
915 | * a _copy_ of corresponding on-flash node which sits | |
916 | * somewhere else. | |
917 | */ | |
918 | continue; | |
919 | ||
920 | if (!wbuf_has_ino(wbuf, inode->i_ino)) | |
921 | continue; | |
922 | ||
923 | mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); | |
924 | if (wbuf_has_ino(wbuf, inode->i_ino)) | |
925 | err = ubifs_wbuf_sync_nolock(wbuf); | |
926 | mutex_unlock(&wbuf->io_mutex); | |
927 | ||
928 | if (err) { | |
929 | ubifs_ro_mode(c, err); | |
930 | return err; | |
931 | } | |
932 | } | |
933 | return 0; | |
934 | } |