]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * linux/fs/affs/file.c | |
4 | * | |
5 | * (c) 1996 Hans-Joachim Widmaier - Rewritten | |
6 | * | |
7 | * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. | |
8 | * | |
9 | * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. | |
10 | * | |
11 | * (C) 1991 Linus Torvalds - minix filesystem | |
12 | * | |
13 | * affs regular file handling primitives | |
14 | */ | |
15 | ||
16 | #include <linux/uio.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/mpage.h> | |
19 | #include "affs.h" | |
20 | ||
21 | static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); | |
22 | ||
23 | static int | |
24 | affs_file_open(struct inode *inode, struct file *filp) | |
25 | { | |
26 | pr_debug("open(%lu,%d)\n", | |
27 | inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); | |
28 | atomic_inc(&AFFS_I(inode)->i_opencnt); | |
29 | return 0; | |
30 | } | |
31 | ||
32 | static int | |
33 | affs_file_release(struct inode *inode, struct file *filp) | |
34 | { | |
35 | pr_debug("release(%lu, %d)\n", | |
36 | inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); | |
37 | ||
38 | if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) { | |
39 | inode_lock(inode); | |
40 | if (inode->i_size != AFFS_I(inode)->mmu_private) | |
41 | affs_truncate(inode); | |
42 | affs_free_prealloc(inode); | |
43 | inode_unlock(inode); | |
44 | } | |
45 | ||
46 | return 0; | |
47 | } | |
48 | ||
49 | static int | |
50 | affs_grow_extcache(struct inode *inode, u32 lc_idx) | |
51 | { | |
52 | struct super_block *sb = inode->i_sb; | |
53 | struct buffer_head *bh; | |
54 | u32 lc_max; | |
55 | int i, j, key; | |
56 | ||
57 | if (!AFFS_I(inode)->i_lc) { | |
58 | char *ptr = (char *)get_zeroed_page(GFP_NOFS); | |
59 | if (!ptr) | |
60 | return -ENOMEM; | |
61 | AFFS_I(inode)->i_lc = (u32 *)ptr; | |
62 | AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2); | |
63 | } | |
64 | ||
65 | lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift; | |
66 | ||
67 | if (AFFS_I(inode)->i_extcnt > lc_max) { | |
68 | u32 lc_shift, lc_mask, tmp, off; | |
69 | ||
70 | /* need to recalculate linear cache, start from old size */ | |
71 | lc_shift = AFFS_I(inode)->i_lc_shift; | |
72 | tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift; | |
73 | for (; tmp; tmp >>= 1) | |
74 | lc_shift++; | |
75 | lc_mask = (1 << lc_shift) - 1; | |
76 | ||
77 | /* fix idx and old size to new shift */ | |
78 | lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift); | |
79 | AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift); | |
80 | ||
81 | /* first shrink old cache to make more space */ | |
82 | off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift); | |
83 | for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off) | |
84 | AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j]; | |
85 | ||
86 | AFFS_I(inode)->i_lc_shift = lc_shift; | |
87 | AFFS_I(inode)->i_lc_mask = lc_mask; | |
88 | } | |
89 | ||
90 | /* fill cache to the needed index */ | |
91 | i = AFFS_I(inode)->i_lc_size; | |
92 | AFFS_I(inode)->i_lc_size = lc_idx + 1; | |
93 | for (; i <= lc_idx; i++) { | |
94 | if (!i) { | |
95 | AFFS_I(inode)->i_lc[0] = inode->i_ino; | |
96 | continue; | |
97 | } | |
98 | key = AFFS_I(inode)->i_lc[i - 1]; | |
99 | j = AFFS_I(inode)->i_lc_mask + 1; | |
100 | // unlock cache | |
101 | for (; j > 0; j--) { | |
102 | bh = affs_bread(sb, key); | |
103 | if (!bh) | |
104 | goto err; | |
105 | key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); | |
106 | affs_brelse(bh); | |
107 | } | |
108 | // lock cache | |
109 | AFFS_I(inode)->i_lc[i] = key; | |
110 | } | |
111 | ||
112 | return 0; | |
113 | ||
114 | err: | |
115 | // lock cache | |
116 | return -EIO; | |
117 | } | |
118 | ||
119 | static struct buffer_head * | |
120 | affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext) | |
121 | { | |
122 | struct super_block *sb = inode->i_sb; | |
123 | struct buffer_head *new_bh; | |
124 | u32 blocknr, tmp; | |
125 | ||
126 | blocknr = affs_alloc_block(inode, bh->b_blocknr); | |
127 | if (!blocknr) | |
128 | return ERR_PTR(-ENOSPC); | |
129 | ||
130 | new_bh = affs_getzeroblk(sb, blocknr); | |
131 | if (!new_bh) { | |
132 | affs_free_block(sb, blocknr); | |
133 | return ERR_PTR(-EIO); | |
134 | } | |
135 | ||
136 | AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST); | |
137 | AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr); | |
138 | AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE); | |
139 | AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino); | |
140 | affs_fix_checksum(sb, new_bh); | |
141 | ||
142 | mark_buffer_dirty_inode(new_bh, inode); | |
143 | ||
144 | tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); | |
145 | if (tmp) | |
146 | affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp); | |
147 | AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr); | |
148 | affs_adjust_checksum(bh, blocknr - tmp); | |
149 | mark_buffer_dirty_inode(bh, inode); | |
150 | ||
151 | AFFS_I(inode)->i_extcnt++; | |
152 | mark_inode_dirty(inode); | |
153 | ||
154 | return new_bh; | |
155 | } | |
156 | ||
157 | static inline struct buffer_head * | |
158 | affs_get_extblock(struct inode *inode, u32 ext) | |
159 | { | |
160 | /* inline the simplest case: same extended block as last time */ | |
161 | struct buffer_head *bh = AFFS_I(inode)->i_ext_bh; | |
162 | if (ext == AFFS_I(inode)->i_ext_last) | |
163 | get_bh(bh); | |
164 | else | |
165 | /* we have to do more (not inlined) */ | |
166 | bh = affs_get_extblock_slow(inode, ext); | |
167 | ||
168 | return bh; | |
169 | } | |
170 | ||
171 | static struct buffer_head * | |
172 | affs_get_extblock_slow(struct inode *inode, u32 ext) | |
173 | { | |
174 | struct super_block *sb = inode->i_sb; | |
175 | struct buffer_head *bh; | |
176 | u32 ext_key; | |
177 | u32 lc_idx, lc_off, ac_idx; | |
178 | u32 tmp, idx; | |
179 | ||
180 | if (ext == AFFS_I(inode)->i_ext_last + 1) { | |
181 | /* read the next extended block from the current one */ | |
182 | bh = AFFS_I(inode)->i_ext_bh; | |
183 | ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); | |
184 | if (ext < AFFS_I(inode)->i_extcnt) | |
185 | goto read_ext; | |
186 | BUG_ON(ext > AFFS_I(inode)->i_extcnt); | |
187 | bh = affs_alloc_extblock(inode, bh, ext); | |
188 | if (IS_ERR(bh)) | |
189 | return bh; | |
190 | goto store_ext; | |
191 | } | |
192 | ||
193 | if (ext == 0) { | |
194 | /* we seek back to the file header block */ | |
195 | ext_key = inode->i_ino; | |
196 | goto read_ext; | |
197 | } | |
198 | ||
199 | if (ext >= AFFS_I(inode)->i_extcnt) { | |
200 | struct buffer_head *prev_bh; | |
201 | ||
202 | /* allocate a new extended block */ | |
203 | BUG_ON(ext > AFFS_I(inode)->i_extcnt); | |
204 | ||
205 | /* get previous extended block */ | |
206 | prev_bh = affs_get_extblock(inode, ext - 1); | |
207 | if (IS_ERR(prev_bh)) | |
208 | return prev_bh; | |
209 | bh = affs_alloc_extblock(inode, prev_bh, ext); | |
210 | affs_brelse(prev_bh); | |
211 | if (IS_ERR(bh)) | |
212 | return bh; | |
213 | goto store_ext; | |
214 | } | |
215 | ||
216 | again: | |
217 | /* check if there is an extended cache and whether it's large enough */ | |
218 | lc_idx = ext >> AFFS_I(inode)->i_lc_shift; | |
219 | lc_off = ext & AFFS_I(inode)->i_lc_mask; | |
220 | ||
221 | if (lc_idx >= AFFS_I(inode)->i_lc_size) { | |
222 | int err; | |
223 | ||
224 | err = affs_grow_extcache(inode, lc_idx); | |
225 | if (err) | |
226 | return ERR_PTR(err); | |
227 | goto again; | |
228 | } | |
229 | ||
230 | /* every n'th key we find in the linear cache */ | |
231 | if (!lc_off) { | |
232 | ext_key = AFFS_I(inode)->i_lc[lc_idx]; | |
233 | goto read_ext; | |
234 | } | |
235 | ||
236 | /* maybe it's still in the associative cache */ | |
237 | ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK; | |
238 | if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) { | |
239 | ext_key = AFFS_I(inode)->i_ac[ac_idx].key; | |
240 | goto read_ext; | |
241 | } | |
242 | ||
243 | /* try to find one of the previous extended blocks */ | |
244 | tmp = ext; | |
245 | idx = ac_idx; | |
246 | while (--tmp, --lc_off > 0) { | |
247 | idx = (idx - 1) & AFFS_AC_MASK; | |
248 | if (AFFS_I(inode)->i_ac[idx].ext == tmp) { | |
249 | ext_key = AFFS_I(inode)->i_ac[idx].key; | |
250 | goto find_ext; | |
251 | } | |
252 | } | |
253 | ||
254 | /* fall back to the linear cache */ | |
255 | ext_key = AFFS_I(inode)->i_lc[lc_idx]; | |
256 | find_ext: | |
257 | /* read all extended blocks until we find the one we need */ | |
258 | //unlock cache | |
259 | do { | |
260 | bh = affs_bread(sb, ext_key); | |
261 | if (!bh) | |
262 | goto err_bread; | |
263 | ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); | |
264 | affs_brelse(bh); | |
265 | tmp++; | |
266 | } while (tmp < ext); | |
267 | //lock cache | |
268 | ||
269 | /* store it in the associative cache */ | |
270 | // recalculate ac_idx? | |
271 | AFFS_I(inode)->i_ac[ac_idx].ext = ext; | |
272 | AFFS_I(inode)->i_ac[ac_idx].key = ext_key; | |
273 | ||
274 | read_ext: | |
275 | /* finally read the right extended block */ | |
276 | //unlock cache | |
277 | bh = affs_bread(sb, ext_key); | |
278 | if (!bh) | |
279 | goto err_bread; | |
280 | //lock cache | |
281 | ||
282 | store_ext: | |
283 | /* release old cached extended block and store the new one */ | |
284 | affs_brelse(AFFS_I(inode)->i_ext_bh); | |
285 | AFFS_I(inode)->i_ext_last = ext; | |
286 | AFFS_I(inode)->i_ext_bh = bh; | |
287 | get_bh(bh); | |
288 | ||
289 | return bh; | |
290 | ||
291 | err_bread: | |
292 | affs_brelse(bh); | |
293 | return ERR_PTR(-EIO); | |
294 | } | |
295 | ||
296 | static int | |
297 | affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) | |
298 | { | |
299 | struct super_block *sb = inode->i_sb; | |
300 | struct buffer_head *ext_bh; | |
301 | u32 ext; | |
302 | ||
303 | pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino, | |
304 | (unsigned long long)block); | |
305 | ||
306 | BUG_ON(block > (sector_t)0x7fffffffUL); | |
307 | ||
308 | if (block >= AFFS_I(inode)->i_blkcnt) { | |
309 | if (block > AFFS_I(inode)->i_blkcnt || !create) | |
310 | goto err_big; | |
311 | } else | |
312 | create = 0; | |
313 | ||
314 | //lock cache | |
315 | affs_lock_ext(inode); | |
316 | ||
317 | ext = (u32)block / AFFS_SB(sb)->s_hashsize; | |
318 | block -= ext * AFFS_SB(sb)->s_hashsize; | |
319 | ext_bh = affs_get_extblock(inode, ext); | |
320 | if (IS_ERR(ext_bh)) | |
321 | goto err_ext; | |
322 | map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block))); | |
323 | ||
324 | if (create) { | |
325 | u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr); | |
326 | if (!blocknr) | |
327 | goto err_alloc; | |
328 | set_buffer_new(bh_result); | |
329 | AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize; | |
330 | AFFS_I(inode)->i_blkcnt++; | |
331 | ||
332 | /* store new block */ | |
333 | if (bh_result->b_blocknr) | |
334 | affs_warning(sb, "get_block", | |
335 | "block already set (%llx)", | |
336 | (unsigned long long)bh_result->b_blocknr); | |
337 | AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr); | |
338 | AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1); | |
339 | affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1); | |
340 | bh_result->b_blocknr = blocknr; | |
341 | ||
342 | if (!block) { | |
343 | /* insert first block into header block */ | |
344 | u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data); | |
345 | if (tmp) | |
346 | affs_warning(sb, "get_block", "first block already set (%d)", tmp); | |
347 | AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr); | |
348 | affs_adjust_checksum(ext_bh, blocknr - tmp); | |
349 | } | |
350 | } | |
351 | ||
352 | affs_brelse(ext_bh); | |
353 | //unlock cache | |
354 | affs_unlock_ext(inode); | |
355 | return 0; | |
356 | ||
357 | err_big: | |
358 | affs_error(inode->i_sb, "get_block", "strange block request %llu", | |
359 | (unsigned long long)block); | |
360 | return -EIO; | |
361 | err_ext: | |
362 | // unlock cache | |
363 | affs_unlock_ext(inode); | |
364 | return PTR_ERR(ext_bh); | |
365 | err_alloc: | |
366 | brelse(ext_bh); | |
367 | clear_buffer_mapped(bh_result); | |
368 | bh_result->b_bdev = NULL; | |
369 | // unlock cache | |
370 | affs_unlock_ext(inode); | |
371 | return -ENOSPC; | |
372 | } | |
373 | ||
374 | static int affs_writepages(struct address_space *mapping, | |
375 | struct writeback_control *wbc) | |
376 | { | |
377 | return mpage_writepages(mapping, wbc, affs_get_block); | |
378 | } | |
379 | ||
380 | static int affs_read_folio(struct file *file, struct folio *folio) | |
381 | { | |
382 | return block_read_full_folio(folio, affs_get_block); | |
383 | } | |
384 | ||
385 | static void affs_write_failed(struct address_space *mapping, loff_t to) | |
386 | { | |
387 | struct inode *inode = mapping->host; | |
388 | ||
389 | if (to > inode->i_size) { | |
390 | truncate_pagecache(inode, inode->i_size); | |
391 | affs_truncate(inode); | |
392 | } | |
393 | } | |
394 | ||
395 | static ssize_t | |
396 | affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | |
397 | { | |
398 | struct file *file = iocb->ki_filp; | |
399 | struct address_space *mapping = file->f_mapping; | |
400 | struct inode *inode = mapping->host; | |
401 | size_t count = iov_iter_count(iter); | |
402 | loff_t offset = iocb->ki_pos; | |
403 | ssize_t ret; | |
404 | ||
405 | if (iov_iter_rw(iter) == WRITE) { | |
406 | loff_t size = offset + count; | |
407 | ||
408 | if (AFFS_I(inode)->mmu_private < size) | |
409 | return 0; | |
410 | } | |
411 | ||
412 | ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block); | |
413 | if (ret < 0 && iov_iter_rw(iter) == WRITE) | |
414 | affs_write_failed(mapping, offset + count); | |
415 | return ret; | |
416 | } | |
417 | ||
418 | static int affs_write_begin(struct file *file, struct address_space *mapping, | |
419 | loff_t pos, unsigned len, | |
420 | struct page **pagep, void **fsdata) | |
421 | { | |
422 | int ret; | |
423 | ||
424 | *pagep = NULL; | |
425 | ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, | |
426 | affs_get_block, | |
427 | &AFFS_I(mapping->host)->mmu_private); | |
428 | if (unlikely(ret)) | |
429 | affs_write_failed(mapping, pos + len); | |
430 | ||
431 | return ret; | |
432 | } | |
433 | ||
434 | static int affs_write_end(struct file *file, struct address_space *mapping, | |
435 | loff_t pos, unsigned int len, unsigned int copied, | |
436 | struct page *page, void *fsdata) | |
437 | { | |
438 | struct inode *inode = mapping->host; | |
439 | int ret; | |
440 | ||
441 | ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); | |
442 | ||
443 | /* Clear Archived bit on file writes, as AmigaOS would do */ | |
444 | if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { | |
445 | AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; | |
446 | mark_inode_dirty(inode); | |
447 | } | |
448 | ||
449 | return ret; | |
450 | } | |
451 | ||
452 | static sector_t _affs_bmap(struct address_space *mapping, sector_t block) | |
453 | { | |
454 | return generic_block_bmap(mapping,block,affs_get_block); | |
455 | } | |
456 | ||
457 | const struct address_space_operations affs_aops = { | |
458 | .dirty_folio = block_dirty_folio, | |
459 | .invalidate_folio = block_invalidate_folio, | |
460 | .read_folio = affs_read_folio, | |
461 | .writepages = affs_writepages, | |
462 | .write_begin = affs_write_begin, | |
463 | .write_end = affs_write_end, | |
464 | .direct_IO = affs_direct_IO, | |
465 | .migrate_folio = buffer_migrate_folio, | |
466 | .bmap = _affs_bmap | |
467 | }; | |
468 | ||
469 | static inline struct buffer_head * | |
470 | affs_bread_ino(struct inode *inode, int block, int create) | |
471 | { | |
472 | struct buffer_head *bh, tmp_bh; | |
473 | int err; | |
474 | ||
475 | tmp_bh.b_state = 0; | |
476 | err = affs_get_block(inode, block, &tmp_bh, create); | |
477 | if (!err) { | |
478 | bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr); | |
479 | if (bh) { | |
480 | bh->b_state |= tmp_bh.b_state; | |
481 | return bh; | |
482 | } | |
483 | err = -EIO; | |
484 | } | |
485 | return ERR_PTR(err); | |
486 | } | |
487 | ||
488 | static inline struct buffer_head * | |
489 | affs_getzeroblk_ino(struct inode *inode, int block) | |
490 | { | |
491 | struct buffer_head *bh, tmp_bh; | |
492 | int err; | |
493 | ||
494 | tmp_bh.b_state = 0; | |
495 | err = affs_get_block(inode, block, &tmp_bh, 1); | |
496 | if (!err) { | |
497 | bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr); | |
498 | if (bh) { | |
499 | bh->b_state |= tmp_bh.b_state; | |
500 | return bh; | |
501 | } | |
502 | err = -EIO; | |
503 | } | |
504 | return ERR_PTR(err); | |
505 | } | |
506 | ||
507 | static inline struct buffer_head * | |
508 | affs_getemptyblk_ino(struct inode *inode, int block) | |
509 | { | |
510 | struct buffer_head *bh, tmp_bh; | |
511 | int err; | |
512 | ||
513 | tmp_bh.b_state = 0; | |
514 | err = affs_get_block(inode, block, &tmp_bh, 1); | |
515 | if (!err) { | |
516 | bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr); | |
517 | if (bh) { | |
518 | bh->b_state |= tmp_bh.b_state; | |
519 | return bh; | |
520 | } | |
521 | err = -EIO; | |
522 | } | |
523 | return ERR_PTR(err); | |
524 | } | |
525 | ||
526 | static int affs_do_read_folio_ofs(struct folio *folio, size_t to, int create) | |
527 | { | |
528 | struct inode *inode = folio->mapping->host; | |
529 | struct super_block *sb = inode->i_sb; | |
530 | struct buffer_head *bh; | |
531 | size_t pos = 0; | |
532 | size_t bidx, boff, bsize; | |
533 | u32 tmp; | |
534 | ||
535 | pr_debug("%s(%lu, %ld, 0, %zu)\n", __func__, inode->i_ino, | |
536 | folio->index, to); | |
537 | BUG_ON(to > folio_size(folio)); | |
538 | bsize = AFFS_SB(sb)->s_data_blksize; | |
539 | tmp = folio_pos(folio); | |
540 | bidx = tmp / bsize; | |
541 | boff = tmp % bsize; | |
542 | ||
543 | while (pos < to) { | |
544 | bh = affs_bread_ino(inode, bidx, create); | |
545 | if (IS_ERR(bh)) | |
546 | return PTR_ERR(bh); | |
547 | tmp = min(bsize - boff, to - pos); | |
548 | BUG_ON(pos + tmp > to || tmp > bsize); | |
549 | memcpy_to_folio(folio, pos, AFFS_DATA(bh) + boff, tmp); | |
550 | affs_brelse(bh); | |
551 | bidx++; | |
552 | pos += tmp; | |
553 | boff = 0; | |
554 | } | |
555 | return 0; | |
556 | } | |
557 | ||
558 | static int | |
559 | affs_extent_file_ofs(struct inode *inode, u32 newsize) | |
560 | { | |
561 | struct super_block *sb = inode->i_sb; | |
562 | struct buffer_head *bh, *prev_bh; | |
563 | u32 bidx, boff; | |
564 | u32 size, bsize; | |
565 | u32 tmp; | |
566 | ||
567 | pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize); | |
568 | bsize = AFFS_SB(sb)->s_data_blksize; | |
569 | bh = NULL; | |
570 | size = AFFS_I(inode)->mmu_private; | |
571 | bidx = size / bsize; | |
572 | boff = size % bsize; | |
573 | if (boff) { | |
574 | bh = affs_bread_ino(inode, bidx, 0); | |
575 | if (IS_ERR(bh)) | |
576 | return PTR_ERR(bh); | |
577 | tmp = min(bsize - boff, newsize - size); | |
578 | BUG_ON(boff + tmp > bsize || tmp > bsize); | |
579 | memset(AFFS_DATA(bh) + boff, 0, tmp); | |
580 | be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); | |
581 | affs_fix_checksum(sb, bh); | |
582 | mark_buffer_dirty_inode(bh, inode); | |
583 | size += tmp; | |
584 | bidx++; | |
585 | } else if (bidx) { | |
586 | bh = affs_bread_ino(inode, bidx - 1, 0); | |
587 | if (IS_ERR(bh)) | |
588 | return PTR_ERR(bh); | |
589 | } | |
590 | ||
591 | while (size < newsize) { | |
592 | prev_bh = bh; | |
593 | bh = affs_getzeroblk_ino(inode, bidx); | |
594 | if (IS_ERR(bh)) | |
595 | goto out; | |
596 | tmp = min(bsize, newsize - size); | |
597 | BUG_ON(tmp > bsize); | |
598 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | |
599 | AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); | |
600 | AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); | |
601 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); | |
602 | affs_fix_checksum(sb, bh); | |
603 | bh->b_state &= ~(1UL << BH_New); | |
604 | mark_buffer_dirty_inode(bh, inode); | |
605 | if (prev_bh) { | |
606 | u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); | |
607 | ||
608 | if (tmp_next) | |
609 | affs_warning(sb, "extent_file_ofs", | |
610 | "next block already set for %d (%d)", | |
611 | bidx, tmp_next); | |
612 | AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); | |
613 | affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); | |
614 | mark_buffer_dirty_inode(prev_bh, inode); | |
615 | affs_brelse(prev_bh); | |
616 | } | |
617 | size += bsize; | |
618 | bidx++; | |
619 | } | |
620 | affs_brelse(bh); | |
621 | inode->i_size = AFFS_I(inode)->mmu_private = newsize; | |
622 | return 0; | |
623 | ||
624 | out: | |
625 | inode->i_size = AFFS_I(inode)->mmu_private = newsize; | |
626 | return PTR_ERR(bh); | |
627 | } | |
628 | ||
629 | static int affs_read_folio_ofs(struct file *file, struct folio *folio) | |
630 | { | |
631 | struct inode *inode = folio->mapping->host; | |
632 | size_t to; | |
633 | int err; | |
634 | ||
635 | pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, folio->index); | |
636 | to = folio_size(folio); | |
637 | if (folio_pos(folio) + to > inode->i_size) { | |
638 | to = inode->i_size - folio_pos(folio); | |
639 | folio_zero_segment(folio, to, folio_size(folio)); | |
640 | } | |
641 | ||
642 | err = affs_do_read_folio_ofs(folio, to, 0); | |
643 | if (!err) | |
644 | folio_mark_uptodate(folio); | |
645 | folio_unlock(folio); | |
646 | return err; | |
647 | } | |
648 | ||
649 | static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, | |
650 | loff_t pos, unsigned len, | |
651 | struct page **pagep, void **fsdata) | |
652 | { | |
653 | struct inode *inode = mapping->host; | |
654 | struct folio *folio; | |
655 | pgoff_t index; | |
656 | int err = 0; | |
657 | ||
658 | pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, | |
659 | pos + len); | |
660 | if (pos > AFFS_I(inode)->mmu_private) { | |
661 | /* XXX: this probably leaves a too-big i_size in case of | |
662 | * failure. Should really be updating i_size at write_end time | |
663 | */ | |
664 | err = affs_extent_file_ofs(inode, pos); | |
665 | if (err) | |
666 | return err; | |
667 | } | |
668 | ||
669 | index = pos >> PAGE_SHIFT; | |
670 | folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, | |
671 | mapping_gfp_mask(mapping)); | |
672 | if (IS_ERR(folio)) | |
673 | return PTR_ERR(folio); | |
674 | *pagep = &folio->page; | |
675 | ||
676 | if (folio_test_uptodate(folio)) | |
677 | return 0; | |
678 | ||
679 | /* XXX: inefficient but safe in the face of short writes */ | |
680 | err = affs_do_read_folio_ofs(folio, folio_size(folio), 1); | |
681 | if (err) { | |
682 | folio_unlock(folio); | |
683 | folio_put(folio); | |
684 | } | |
685 | return err; | |
686 | } | |
687 | ||
688 | static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |
689 | loff_t pos, unsigned len, unsigned copied, | |
690 | struct page *page, void *fsdata) | |
691 | { | |
692 | struct folio *folio = page_folio(page); | |
693 | struct inode *inode = mapping->host; | |
694 | struct super_block *sb = inode->i_sb; | |
695 | struct buffer_head *bh, *prev_bh; | |
696 | char *data; | |
697 | u32 bidx, boff, bsize; | |
698 | unsigned from, to; | |
699 | u32 tmp; | |
700 | int written; | |
701 | ||
702 | from = pos & (PAGE_SIZE - 1); | |
703 | to = from + len; | |
704 | /* | |
705 | * XXX: not sure if this can handle short copies (len < copied), but | |
706 | * we don't have to, because the folio should always be uptodate here, | |
707 | * due to write_begin. | |
708 | */ | |
709 | ||
710 | pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, | |
711 | pos + len); | |
712 | bsize = AFFS_SB(sb)->s_data_blksize; | |
713 | data = folio_address(folio); | |
714 | ||
715 | bh = NULL; | |
716 | written = 0; | |
717 | tmp = (folio->index << PAGE_SHIFT) + from; | |
718 | bidx = tmp / bsize; | |
719 | boff = tmp % bsize; | |
720 | if (boff) { | |
721 | bh = affs_bread_ino(inode, bidx, 0); | |
722 | if (IS_ERR(bh)) { | |
723 | written = PTR_ERR(bh); | |
724 | goto err_first_bh; | |
725 | } | |
726 | tmp = min(bsize - boff, to - from); | |
727 | BUG_ON(boff + tmp > bsize || tmp > bsize); | |
728 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); | |
729 | be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); | |
730 | affs_fix_checksum(sb, bh); | |
731 | mark_buffer_dirty_inode(bh, inode); | |
732 | written += tmp; | |
733 | from += tmp; | |
734 | bidx++; | |
735 | } else if (bidx) { | |
736 | bh = affs_bread_ino(inode, bidx - 1, 0); | |
737 | if (IS_ERR(bh)) { | |
738 | written = PTR_ERR(bh); | |
739 | goto err_first_bh; | |
740 | } | |
741 | } | |
742 | while (from + bsize <= to) { | |
743 | prev_bh = bh; | |
744 | bh = affs_getemptyblk_ino(inode, bidx); | |
745 | if (IS_ERR(bh)) | |
746 | goto err_bh; | |
747 | memcpy(AFFS_DATA(bh), data + from, bsize); | |
748 | if (buffer_new(bh)) { | |
749 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | |
750 | AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); | |
751 | AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); | |
752 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); | |
753 | AFFS_DATA_HEAD(bh)->next = 0; | |
754 | bh->b_state &= ~(1UL << BH_New); | |
755 | if (prev_bh) { | |
756 | u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); | |
757 | ||
758 | if (tmp_next) | |
759 | affs_warning(sb, "commit_write_ofs", | |
760 | "next block already set for %d (%d)", | |
761 | bidx, tmp_next); | |
762 | AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); | |
763 | affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); | |
764 | mark_buffer_dirty_inode(prev_bh, inode); | |
765 | } | |
766 | } | |
767 | affs_brelse(prev_bh); | |
768 | affs_fix_checksum(sb, bh); | |
769 | mark_buffer_dirty_inode(bh, inode); | |
770 | written += bsize; | |
771 | from += bsize; | |
772 | bidx++; | |
773 | } | |
774 | if (from < to) { | |
775 | prev_bh = bh; | |
776 | bh = affs_bread_ino(inode, bidx, 1); | |
777 | if (IS_ERR(bh)) | |
778 | goto err_bh; | |
779 | tmp = min(bsize, to - from); | |
780 | BUG_ON(tmp > bsize); | |
781 | memcpy(AFFS_DATA(bh), data + from, tmp); | |
782 | if (buffer_new(bh)) { | |
783 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | |
784 | AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); | |
785 | AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); | |
786 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); | |
787 | AFFS_DATA_HEAD(bh)->next = 0; | |
788 | bh->b_state &= ~(1UL << BH_New); | |
789 | if (prev_bh) { | |
790 | u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); | |
791 | ||
792 | if (tmp_next) | |
793 | affs_warning(sb, "commit_write_ofs", | |
794 | "next block already set for %d (%d)", | |
795 | bidx, tmp_next); | |
796 | AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); | |
797 | affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); | |
798 | mark_buffer_dirty_inode(prev_bh, inode); | |
799 | } | |
800 | } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp) | |
801 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); | |
802 | affs_brelse(prev_bh); | |
803 | affs_fix_checksum(sb, bh); | |
804 | mark_buffer_dirty_inode(bh, inode); | |
805 | written += tmp; | |
806 | from += tmp; | |
807 | bidx++; | |
808 | } | |
809 | folio_mark_uptodate(folio); | |
810 | ||
811 | done: | |
812 | affs_brelse(bh); | |
813 | tmp = (folio->index << PAGE_SHIFT) + from; | |
814 | if (tmp > inode->i_size) | |
815 | inode->i_size = AFFS_I(inode)->mmu_private = tmp; | |
816 | ||
817 | /* Clear Archived bit on file writes, as AmigaOS would do */ | |
818 | if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { | |
819 | AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; | |
820 | mark_inode_dirty(inode); | |
821 | } | |
822 | ||
823 | err_first_bh: | |
824 | folio_unlock(folio); | |
825 | folio_put(folio); | |
826 | ||
827 | return written; | |
828 | ||
829 | err_bh: | |
830 | bh = prev_bh; | |
831 | if (!written) | |
832 | written = PTR_ERR(bh); | |
833 | goto done; | |
834 | } | |
835 | ||
836 | const struct address_space_operations affs_aops_ofs = { | |
837 | .dirty_folio = block_dirty_folio, | |
838 | .invalidate_folio = block_invalidate_folio, | |
839 | .read_folio = affs_read_folio_ofs, | |
840 | //.writepages = affs_writepages_ofs, | |
841 | .write_begin = affs_write_begin_ofs, | |
842 | .write_end = affs_write_end_ofs, | |
843 | .migrate_folio = filemap_migrate_folio, | |
844 | }; | |
845 | ||
846 | /* Free any preallocated blocks. */ | |
847 | ||
848 | void | |
849 | affs_free_prealloc(struct inode *inode) | |
850 | { | |
851 | struct super_block *sb = inode->i_sb; | |
852 | ||
853 | pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino); | |
854 | ||
855 | while (AFFS_I(inode)->i_pa_cnt) { | |
856 | AFFS_I(inode)->i_pa_cnt--; | |
857 | affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc); | |
858 | } | |
859 | } | |
860 | ||
861 | /* Truncate (or enlarge) a file to the requested size. */ | |
862 | ||
863 | void | |
864 | affs_truncate(struct inode *inode) | |
865 | { | |
866 | struct super_block *sb = inode->i_sb; | |
867 | u32 ext, ext_key; | |
868 | u32 last_blk, blkcnt, blk; | |
869 | u32 size; | |
870 | struct buffer_head *ext_bh; | |
871 | int i; | |
872 | ||
873 | pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n", | |
874 | inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size); | |
875 | ||
876 | last_blk = 0; | |
877 | ext = 0; | |
878 | if (inode->i_size) { | |
879 | last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize; | |
880 | ext = last_blk / AFFS_SB(sb)->s_hashsize; | |
881 | } | |
882 | ||
883 | if (inode->i_size > AFFS_I(inode)->mmu_private) { | |
884 | struct address_space *mapping = inode->i_mapping; | |
885 | struct page *page; | |
886 | void *fsdata = NULL; | |
887 | loff_t isize = inode->i_size; | |
888 | int res; | |
889 | ||
890 | res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata); | |
891 | if (!res) | |
892 | res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata); | |
893 | else | |
894 | inode->i_size = AFFS_I(inode)->mmu_private; | |
895 | mark_inode_dirty(inode); | |
896 | return; | |
897 | } else if (inode->i_size == AFFS_I(inode)->mmu_private) | |
898 | return; | |
899 | ||
900 | // lock cache | |
901 | ext_bh = affs_get_extblock(inode, ext); | |
902 | if (IS_ERR(ext_bh)) { | |
903 | affs_warning(sb, "truncate", | |
904 | "unexpected read error for ext block %u (%ld)", | |
905 | ext, PTR_ERR(ext_bh)); | |
906 | return; | |
907 | } | |
908 | if (AFFS_I(inode)->i_lc) { | |
909 | /* clear linear cache */ | |
910 | i = (ext + 1) >> AFFS_I(inode)->i_lc_shift; | |
911 | if (AFFS_I(inode)->i_lc_size > i) { | |
912 | AFFS_I(inode)->i_lc_size = i; | |
913 | for (; i < AFFS_LC_SIZE; i++) | |
914 | AFFS_I(inode)->i_lc[i] = 0; | |
915 | } | |
916 | /* clear associative cache */ | |
917 | for (i = 0; i < AFFS_AC_SIZE; i++) | |
918 | if (AFFS_I(inode)->i_ac[i].ext >= ext) | |
919 | AFFS_I(inode)->i_ac[i].ext = 0; | |
920 | } | |
921 | ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); | |
922 | ||
923 | blkcnt = AFFS_I(inode)->i_blkcnt; | |
924 | i = 0; | |
925 | blk = last_blk; | |
926 | if (inode->i_size) { | |
927 | i = last_blk % AFFS_SB(sb)->s_hashsize + 1; | |
928 | blk++; | |
929 | } else | |
930 | AFFS_HEAD(ext_bh)->first_data = 0; | |
931 | AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i); | |
932 | size = AFFS_SB(sb)->s_hashsize; | |
933 | if (size > blkcnt - blk + i) | |
934 | size = blkcnt - blk + i; | |
935 | for (; i < size; i++, blk++) { | |
936 | affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); | |
937 | AFFS_BLOCK(sb, ext_bh, i) = 0; | |
938 | } | |
939 | AFFS_TAIL(sb, ext_bh)->extension = 0; | |
940 | affs_fix_checksum(sb, ext_bh); | |
941 | mark_buffer_dirty_inode(ext_bh, inode); | |
942 | affs_brelse(ext_bh); | |
943 | ||
944 | if (inode->i_size) { | |
945 | AFFS_I(inode)->i_blkcnt = last_blk + 1; | |
946 | AFFS_I(inode)->i_extcnt = ext + 1; | |
947 | if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) { | |
948 | struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0); | |
949 | u32 tmp; | |
950 | if (IS_ERR(bh)) { | |
951 | affs_warning(sb, "truncate", | |
952 | "unexpected read error for last block %u (%ld)", | |
953 | ext, PTR_ERR(bh)); | |
954 | return; | |
955 | } | |
956 | tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next); | |
957 | AFFS_DATA_HEAD(bh)->next = 0; | |
958 | affs_adjust_checksum(bh, -tmp); | |
959 | affs_brelse(bh); | |
960 | } | |
961 | } else { | |
962 | AFFS_I(inode)->i_blkcnt = 0; | |
963 | AFFS_I(inode)->i_extcnt = 1; | |
964 | } | |
965 | AFFS_I(inode)->mmu_private = inode->i_size; | |
966 | // unlock cache | |
967 | ||
968 | while (ext_key) { | |
969 | ext_bh = affs_bread(sb, ext_key); | |
970 | size = AFFS_SB(sb)->s_hashsize; | |
971 | if (size > blkcnt - blk) | |
972 | size = blkcnt - blk; | |
973 | for (i = 0; i < size; i++, blk++) | |
974 | affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); | |
975 | affs_free_block(sb, ext_key); | |
976 | ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); | |
977 | affs_brelse(ext_bh); | |
978 | } | |
979 | affs_free_prealloc(inode); | |
980 | } | |
981 | ||
982 | int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) | |
983 | { | |
984 | struct inode *inode = filp->f_mapping->host; | |
985 | int ret, err; | |
986 | ||
987 | err = file_write_and_wait_range(filp, start, end); | |
988 | if (err) | |
989 | return err; | |
990 | ||
991 | inode_lock(inode); | |
992 | ret = write_inode_now(inode, 0); | |
993 | err = sync_blockdev(inode->i_sb->s_bdev); | |
994 | if (!ret) | |
995 | ret = err; | |
996 | inode_unlock(inode); | |
997 | return ret; | |
998 | } | |
999 | const struct file_operations affs_file_operations = { | |
1000 | .llseek = generic_file_llseek, | |
1001 | .read_iter = generic_file_read_iter, | |
1002 | .write_iter = generic_file_write_iter, | |
1003 | .mmap = generic_file_mmap, | |
1004 | .open = affs_file_open, | |
1005 | .release = affs_file_release, | |
1006 | .fsync = affs_file_fsync, | |
1007 | .splice_read = filemap_splice_read, | |
1008 | }; | |
1009 | ||
1010 | const struct inode_operations affs_file_inode_operations = { | |
1011 | .setattr = affs_notify_change, | |
1012 | }; |