]> Git Repo - J-linux.git/blob - fs/ufs/inode.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / ufs / inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ufs/inode.c
4  *
5  * Copyright (C) 1998
6  * Daniel Pirkl <[email protected]>
7  * Charles University, Faculty of Mathematics and Physics
8  *
9  *  from
10  *
11  *  linux/fs/ext2/inode.c
12  *
13  * Copyright (C) 1992, 1993, 1994, 1995
14  * Remy Card ([email protected])
15  * Laboratoire MASI - Institut Blaise Pascal
16  * Universite Pierre et Marie Curie (Paris VI)
17  *
18  *  from
19  *
20  *  linux/fs/minix/inode.c
21  *
22  *  Copyright (C) 1991, 1992  Linus Torvalds
23  *
24  *  Goal-directed block allocation by Stephen Tweedie ([email protected]), 1993
25  *  Big-endian to little-endian byte-swapping/bitmaps by
26  *        David S. Miller ([email protected]), 1995
27  */
28
29 #include <linux/uaccess.h>
30
31 #include <linux/errno.h>
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/stat.h>
35 #include <linux/string.h>
36 #include <linux/mm.h>
37 #include <linux/buffer_head.h>
38 #include <linux/mpage.h>
39 #include <linux/writeback.h>
40 #include <linux/iversion.h>
41
42 #include "ufs_fs.h"
43 #include "ufs.h"
44 #include "swab.h"
45 #include "util.h"
46
47 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
48 {
49         struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
50         int ptrs = uspi->s_apb;
51         int ptrs_bits = uspi->s_apbshift;
52         const long direct_blocks = UFS_NDADDR,
53                 indirect_blocks = ptrs,
54                 double_blocks = (1 << (ptrs_bits * 2));
55         int n = 0;
56
57
58         UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
59         if (i_block < direct_blocks) {
60                 offsets[n++] = i_block;
61         } else if ((i_block -= direct_blocks) < indirect_blocks) {
62                 offsets[n++] = UFS_IND_BLOCK;
63                 offsets[n++] = i_block;
64         } else if ((i_block -= indirect_blocks) < double_blocks) {
65                 offsets[n++] = UFS_DIND_BLOCK;
66                 offsets[n++] = i_block >> ptrs_bits;
67                 offsets[n++] = i_block & (ptrs - 1);
68         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
69                 offsets[n++] = UFS_TIND_BLOCK;
70                 offsets[n++] = i_block >> (ptrs_bits * 2);
71                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
72                 offsets[n++] = i_block & (ptrs - 1);
73         } else {
74                 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
75         }
76         return n;
77 }
78
79 typedef struct {
80         void    *p;
81         union {
82                 __fs32  key32;
83                 __fs64  key64;
84         };
85         struct buffer_head *bh;
86 } Indirect;
87
88 static inline int grow_chain32(struct ufs_inode_info *ufsi,
89                                struct buffer_head *bh, __fs32 *v,
90                                Indirect *from, Indirect *to)
91 {
92         Indirect *p;
93         unsigned seq;
94         to->bh = bh;
95         do {
96                 seq = read_seqbegin(&ufsi->meta_lock);
97                 to->key32 = *(__fs32 *)(to->p = v);
98                 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
99                         ;
100         } while (read_seqretry(&ufsi->meta_lock, seq));
101         return (p > to);
102 }
103
104 static inline int grow_chain64(struct ufs_inode_info *ufsi,
105                                struct buffer_head *bh, __fs64 *v,
106                                Indirect *from, Indirect *to)
107 {
108         Indirect *p;
109         unsigned seq;
110         to->bh = bh;
111         do {
112                 seq = read_seqbegin(&ufsi->meta_lock);
113                 to->key64 = *(__fs64 *)(to->p = v);
114                 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
115                         ;
116         } while (read_seqretry(&ufsi->meta_lock, seq));
117         return (p > to);
118 }
119
120 /*
121  * Returns the location of the fragment from
122  * the beginning of the filesystem.
123  */
124
125 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
126 {
127         struct ufs_inode_info *ufsi = UFS_I(inode);
128         struct super_block *sb = inode->i_sb;
129         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
130         u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
131         int shift = uspi->s_apbshift-uspi->s_fpbshift;
132         Indirect chain[4], *q = chain;
133         unsigned *p;
134         unsigned flags = UFS_SB(sb)->s_flags;
135         u64 res = 0;
136
137         UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
138                 uspi->s_fpbshift, uspi->s_apbmask,
139                 (unsigned long long)mask);
140
141         if (depth == 0)
142                 goto no_block;
143
144 again:
145         p = offsets;
146
147         if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
148                 goto ufs2;
149
150         if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
151                 goto changed;
152         if (!q->key32)
153                 goto no_block;
154         while (--depth) {
155                 __fs32 *ptr;
156                 struct buffer_head *bh;
157                 unsigned n = *p++;
158
159                 bh = sb_bread(sb, uspi->s_sbbase +
160                                   fs32_to_cpu(sb, q->key32) + (n>>shift));
161                 if (!bh)
162                         goto no_block;
163                 ptr = (__fs32 *)bh->b_data + (n & mask);
164                 if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
165                         goto changed;
166                 if (!q->key32)
167                         goto no_block;
168         }
169         res = fs32_to_cpu(sb, q->key32);
170         goto found;
171
172 ufs2:
173         if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
174                 goto changed;
175         if (!q->key64)
176                 goto no_block;
177
178         while (--depth) {
179                 __fs64 *ptr;
180                 struct buffer_head *bh;
181                 unsigned n = *p++;
182
183                 bh = sb_bread(sb, uspi->s_sbbase +
184                                   fs64_to_cpu(sb, q->key64) + (n>>shift));
185                 if (!bh)
186                         goto no_block;
187                 ptr = (__fs64 *)bh->b_data + (n & mask);
188                 if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
189                         goto changed;
190                 if (!q->key64)
191                         goto no_block;
192         }
193         res = fs64_to_cpu(sb, q->key64);
194 found:
195         res += uspi->s_sbbase;
196 no_block:
197         while (q > chain) {
198                 brelse(q->bh);
199                 q--;
200         }
201         return res;
202
203 changed:
204         while (q > chain) {
205                 brelse(q->bh);
206                 q--;
207         }
208         goto again;
209 }
210
211 /*
212  * Unpacking tails: we have a file with partial final block and
213  * we had been asked to extend it.  If the fragment being written
214  * is within the same block, we need to extend the tail just to cover
215  * that fragment.  Otherwise the tail is extended to full block.
216  *
217  * Note that we might need to create a _new_ tail, but that will
218  * be handled elsewhere; this is strictly for resizing old
219  * ones.
220  */
221 static bool
222 ufs_extend_tail(struct inode *inode, u64 writes_to,
223                   int *err, struct folio *locked_folio)
224 {
225         struct ufs_inode_info *ufsi = UFS_I(inode);
226         struct super_block *sb = inode->i_sb;
227         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
228         unsigned lastfrag = ufsi->i_lastfrag;   /* it's a short file, so unsigned is enough */
229         unsigned block = ufs_fragstoblks(lastfrag);
230         unsigned new_size;
231         void *p;
232         u64 tmp;
233
234         if (writes_to < (lastfrag | uspi->s_fpbmask))
235                 new_size = (writes_to & uspi->s_fpbmask) + 1;
236         else
237                 new_size = uspi->s_fpb;
238
239         p = ufs_get_direct_data_ptr(uspi, ufsi, block);
240         tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
241                                 new_size - (lastfrag & uspi->s_fpbmask), err,
242                                 locked_folio);
243         return tmp != 0;
244 }
245
246 /**
247  * ufs_inode_getfrag() - allocate new fragment(s)
248  * @inode: pointer to inode
249  * @index: number of block pointer within the inode's array.
250  * @new_fragment: number of new allocated fragment(s)
251  * @err: we set it if something wrong
252  * @new: we set it if we allocate new block
253  * @locked_folio: for ufs_new_fragments()
254  */
255 static u64 ufs_inode_getfrag(struct inode *inode, unsigned index,
256                   sector_t new_fragment, int *err,
257                   int *new, struct folio *locked_folio)
258 {
259         struct ufs_inode_info *ufsi = UFS_I(inode);
260         struct super_block *sb = inode->i_sb;
261         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
262         u64 tmp, goal, lastfrag;
263         unsigned nfrags = uspi->s_fpb;
264         void *p;
265
266         p = ufs_get_direct_data_ptr(uspi, ufsi, index);
267         tmp = ufs_data_ptr_to_cpu(sb, p);
268         if (tmp)
269                 goto out;
270
271         lastfrag = ufsi->i_lastfrag;
272
273         /* will that be a new tail? */
274         if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
275                 nfrags = (new_fragment & uspi->s_fpbmask) + 1;
276
277         goal = 0;
278         if (index) {
279                 goal = ufs_data_ptr_to_cpu(sb,
280                                  ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
281                 if (goal)
282                         goal += uspi->s_fpb;
283         }
284         tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
285                                 goal, nfrags, err, locked_folio);
286
287         if (!tmp) {
288                 *err = -ENOSPC;
289                 return 0;
290         }
291
292         if (new)
293                 *new = 1;
294         inode_set_ctime_current(inode);
295         if (IS_SYNC(inode))
296                 ufs_sync_inode (inode);
297         mark_inode_dirty(inode);
298 out:
299         return tmp + uspi->s_sbbase;
300 }
301
302 /**
303  * ufs_inode_getblock() - allocate new block
304  * @inode: pointer to inode
305  * @ind_block: block number of the indirect block
306  * @index: number of pointer within the indirect block
307  * @new_fragment: number of new allocated fragment
308  *  (block will hold this fragment and also uspi->s_fpb-1)
309  * @err: see ufs_inode_getfrag()
310  * @new: see ufs_inode_getfrag()
311  * @locked_folio: see ufs_inode_getfrag()
312  */
313 static u64 ufs_inode_getblock(struct inode *inode, u64 ind_block,
314                 unsigned index, sector_t new_fragment, int *err,
315                 int *new, struct folio *locked_folio)
316 {
317         struct super_block *sb = inode->i_sb;
318         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
319         int shift = uspi->s_apbshift - uspi->s_fpbshift;
320         u64 tmp = 0, goal;
321         struct buffer_head *bh;
322         void *p;
323
324         if (!ind_block)
325                 return 0;
326
327         bh = sb_bread(sb, ind_block + (index >> shift));
328         if (unlikely(!bh)) {
329                 *err = -EIO;
330                 return 0;
331         }
332
333         index &= uspi->s_apbmask >> uspi->s_fpbshift;
334         if (uspi->fs_magic == UFS2_MAGIC)
335                 p = (__fs64 *)bh->b_data + index;
336         else
337                 p = (__fs32 *)bh->b_data + index;
338
339         tmp = ufs_data_ptr_to_cpu(sb, p);
340         if (tmp)
341                 goto out;
342
343         if (index && (uspi->fs_magic == UFS2_MAGIC ?
344                       (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
345                       (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
346                 goal = tmp + uspi->s_fpb;
347         else
348                 goal = bh->b_blocknr + uspi->s_fpb;
349         tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
350                                 uspi->s_fpb, err, locked_folio);
351         if (!tmp)
352                 goto out;
353
354         if (new)
355                 *new = 1;
356
357         mark_buffer_dirty(bh);
358         if (IS_SYNC(inode))
359                 sync_dirty_buffer(bh);
360         inode_set_ctime_current(inode);
361         mark_inode_dirty(inode);
362 out:
363         brelse (bh);
364         UFSD("EXIT\n");
365         if (tmp)
366                 tmp += uspi->s_sbbase;
367         return tmp;
368 }
369
370 /**
371  * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
372  * read_folio, writepages and so on
373  */
374
375 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
376 {
377         struct super_block *sb = inode->i_sb;
378         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
379         int err = 0, new = 0;
380         unsigned offsets[4];
381         int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
382         u64 phys64 = 0;
383         unsigned frag = fragment & uspi->s_fpbmask;
384
385         phys64 = ufs_frag_map(inode, offsets, depth);
386         if (!create)
387                 goto done;
388
389         if (phys64) {
390                 if (fragment >= UFS_NDIR_FRAGMENT)
391                         goto done;
392                 read_seqlock_excl(&UFS_I(inode)->meta_lock);
393                 if (fragment < UFS_I(inode)->i_lastfrag) {
394                         read_sequnlock_excl(&UFS_I(inode)->meta_lock);
395                         goto done;
396                 }
397                 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
398         }
399         /* This code entered only while writing ....? */
400
401         mutex_lock(&UFS_I(inode)->truncate_mutex);
402
403         UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
404         if (unlikely(!depth)) {
405                 ufs_warning(sb, "ufs_get_block", "block > big");
406                 err = -EIO;
407                 goto out;
408         }
409
410         if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
411                 unsigned lastfrag = UFS_I(inode)->i_lastfrag;
412                 unsigned tailfrags = lastfrag & uspi->s_fpbmask;
413                 if (tailfrags && fragment >= lastfrag) {
414                         if (!ufs_extend_tail(inode, fragment,
415                                              &err, bh_result->b_folio))
416                                 goto out;
417                 }
418         }
419
420         if (depth == 1) {
421                 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
422                                            &err, &new, bh_result->b_folio);
423         } else {
424                 int i;
425                 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
426                                            &err, NULL, NULL);
427                 for (i = 1; i < depth - 1; i++)
428                         phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
429                                                 fragment, &err, NULL, NULL);
430                 phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
431                                 fragment, &err, &new, bh_result->b_folio);
432         }
433 out:
434         if (phys64) {
435                 phys64 += frag;
436                 map_bh(bh_result, sb, phys64);
437                 if (new)
438                         set_buffer_new(bh_result);
439         }
440         mutex_unlock(&UFS_I(inode)->truncate_mutex);
441         return err;
442
443 done:
444         if (phys64)
445                 map_bh(bh_result, sb, phys64 + frag);
446         return 0;
447 }
448
449 static int ufs_writepages(struct address_space *mapping,
450                 struct writeback_control *wbc)
451 {
452         return mpage_writepages(mapping, wbc, ufs_getfrag_block);
453 }
454
455 static int ufs_read_folio(struct file *file, struct folio *folio)
456 {
457         return block_read_full_folio(folio, ufs_getfrag_block);
458 }
459
460 int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
461 {
462         return __block_write_begin(folio, pos, len, ufs_getfrag_block);
463 }
464
465 static void ufs_truncate_blocks(struct inode *);
466
467 static void ufs_write_failed(struct address_space *mapping, loff_t to)
468 {
469         struct inode *inode = mapping->host;
470
471         if (to > inode->i_size) {
472                 truncate_pagecache(inode, inode->i_size);
473                 ufs_truncate_blocks(inode);
474         }
475 }
476
477 static int ufs_write_begin(struct file *file, struct address_space *mapping,
478                         loff_t pos, unsigned len,
479                         struct folio **foliop, void **fsdata)
480 {
481         int ret;
482
483         ret = block_write_begin(mapping, pos, len, foliop, ufs_getfrag_block);
484         if (unlikely(ret))
485                 ufs_write_failed(mapping, pos + len);
486
487         return ret;
488 }
489
490 static int ufs_write_end(struct file *file, struct address_space *mapping,
491                         loff_t pos, unsigned len, unsigned copied,
492                         struct folio *folio, void *fsdata)
493 {
494         int ret;
495
496         ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
497         if (ret < len)
498                 ufs_write_failed(mapping, pos + len);
499         return ret;
500 }
501
502 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
503 {
504         return generic_block_bmap(mapping,block,ufs_getfrag_block);
505 }
506
507 const struct address_space_operations ufs_aops = {
508         .dirty_folio = block_dirty_folio,
509         .invalidate_folio = block_invalidate_folio,
510         .read_folio = ufs_read_folio,
511         .writepages = ufs_writepages,
512         .write_begin = ufs_write_begin,
513         .write_end = ufs_write_end,
514         .migrate_folio = buffer_migrate_folio,
515         .bmap = ufs_bmap
516 };
517
518 static void ufs_set_inode_ops(struct inode *inode)
519 {
520         if (S_ISREG(inode->i_mode)) {
521                 inode->i_op = &ufs_file_inode_operations;
522                 inode->i_fop = &ufs_file_operations;
523                 inode->i_mapping->a_ops = &ufs_aops;
524         } else if (S_ISDIR(inode->i_mode)) {
525                 inode->i_op = &ufs_dir_inode_operations;
526                 inode->i_fop = &ufs_dir_operations;
527                 inode->i_mapping->a_ops = &ufs_aops;
528         } else if (S_ISLNK(inode->i_mode)) {
529                 if (!inode->i_blocks) {
530                         inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
531                         inode->i_op = &simple_symlink_inode_operations;
532                 } else {
533                         inode->i_mapping->a_ops = &ufs_aops;
534                         inode->i_op = &page_symlink_inode_operations;
535                         inode_nohighmem(inode);
536                 }
537         } else
538                 init_special_inode(inode, inode->i_mode,
539                                    ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
540 }
541
542 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
543 {
544         struct ufs_inode_info *ufsi = UFS_I(inode);
545         struct super_block *sb = inode->i_sb;
546         umode_t mode;
547
548         /*
549          * Copy data to the in-core inode.
550          */
551         inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
552         set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
553         if (inode->i_nlink == 0)
554                 return -ESTALE;
555
556         /*
557          * Linux now has 32-bit uid and gid, so we can support EFT.
558          */
559         i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
560         i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
561
562         inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
563         inode_set_atime(inode,
564                         (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec),
565                         0);
566         inode_set_ctime(inode,
567                         (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec),
568                         0);
569         inode_set_mtime(inode,
570                         (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec),
571                         0);
572         inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
573         inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
574         ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
575         ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
576         ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
577
578
579         if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
580                 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
581                        sizeof(ufs_inode->ui_u2.ui_addr));
582         } else {
583                 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
584                        sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
585                 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
586         }
587         return 0;
588 }
589
590 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
591 {
592         struct ufs_inode_info *ufsi = UFS_I(inode);
593         struct super_block *sb = inode->i_sb;
594         umode_t mode;
595
596         UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
597         /*
598          * Copy data to the in-core inode.
599          */
600         inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
601         set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
602         if (inode->i_nlink == 0)
603                 return -ESTALE;
604
605         /*
606          * Linux now has 32-bit uid and gid, so we can support EFT.
607          */
608         i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
609         i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
610
611         inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
612         inode_set_atime(inode, fs64_to_cpu(sb, ufs2_inode->ui_atime),
613                         fs32_to_cpu(sb, ufs2_inode->ui_atimensec));
614         inode_set_ctime(inode, fs64_to_cpu(sb, ufs2_inode->ui_ctime),
615                         fs32_to_cpu(sb, ufs2_inode->ui_ctimensec));
616         inode_set_mtime(inode, fs64_to_cpu(sb, ufs2_inode->ui_mtime),
617                         fs32_to_cpu(sb, ufs2_inode->ui_mtimensec));
618         inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
619         inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
620         ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
621         /*
622         ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
623         ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
624         */
625
626         if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
627                 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
628                        sizeof(ufs2_inode->ui_u2.ui_addr));
629         } else {
630                 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
631                        sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
632                 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
633         }
634         return 0;
635 }
636
637 struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
638 {
639         struct ufs_inode_info *ufsi;
640         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
641         struct buffer_head * bh;
642         struct inode *inode;
643         int err = -EIO;
644
645         UFSD("ENTER, ino %lu\n", ino);
646
647         if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
648                 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
649                             ino);
650                 return ERR_PTR(-EIO);
651         }
652
653         inode = iget_locked(sb, ino);
654         if (!inode)
655                 return ERR_PTR(-ENOMEM);
656         if (!(inode->i_state & I_NEW))
657                 return inode;
658
659         ufsi = UFS_I(inode);
660
661         bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
662         if (!bh) {
663                 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
664                             inode->i_ino);
665                 goto bad_inode;
666         }
667         if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
668                 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
669
670                 err = ufs2_read_inode(inode,
671                                       ufs2_inode + ufs_inotofsbo(inode->i_ino));
672         } else {
673                 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
674
675                 err = ufs1_read_inode(inode,
676                                       ufs_inode + ufs_inotofsbo(inode->i_ino));
677         }
678         brelse(bh);
679         if (err)
680                 goto bad_inode;
681
682         inode_inc_iversion(inode);
683         ufsi->i_lastfrag =
684                 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
685         ufsi->i_dir_start_lookup = 0;
686         ufsi->i_osync = 0;
687
688         ufs_set_inode_ops(inode);
689
690         UFSD("EXIT\n");
691         unlock_new_inode(inode);
692         return inode;
693
694 bad_inode:
695         iget_failed(inode);
696         return ERR_PTR(err);
697 }
698
699 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
700 {
701         struct super_block *sb = inode->i_sb;
702         struct ufs_inode_info *ufsi = UFS_I(inode);
703
704         ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
705         ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
706
707         ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
708         ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
709
710         ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
711         ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb,
712                                                  inode_get_atime_sec(inode));
713         ufs_inode->ui_atime.tv_usec = 0;
714         ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb,
715                                                  inode_get_ctime_sec(inode));
716         ufs_inode->ui_ctime.tv_usec = 0;
717         ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb,
718                                                  inode_get_mtime_sec(inode));
719         ufs_inode->ui_mtime.tv_usec = 0;
720         ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
721         ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
722         ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
723
724         if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
725                 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
726                 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
727         }
728
729         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
730                 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
731                 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
732         } else if (inode->i_blocks) {
733                 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
734                        sizeof(ufs_inode->ui_u2.ui_addr));
735         }
736         else {
737                 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
738                        sizeof(ufs_inode->ui_u2.ui_symlink));
739         }
740
741         if (!inode->i_nlink)
742                 memset (ufs_inode, 0, sizeof(struct ufs_inode));
743 }
744
745 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
746 {
747         struct super_block *sb = inode->i_sb;
748         struct ufs_inode_info *ufsi = UFS_I(inode);
749
750         UFSD("ENTER\n");
751         ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
752         ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
753
754         ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
755         ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
756
757         ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
758         ufs_inode->ui_atime = cpu_to_fs64(sb, inode_get_atime_sec(inode));
759         ufs_inode->ui_atimensec = cpu_to_fs32(sb,
760                                               inode_get_atime_nsec(inode));
761         ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime_sec(inode));
762         ufs_inode->ui_ctimensec = cpu_to_fs32(sb,
763                                               inode_get_ctime_nsec(inode));
764         ufs_inode->ui_mtime = cpu_to_fs64(sb, inode_get_mtime_sec(inode));
765         ufs_inode->ui_mtimensec = cpu_to_fs32(sb,
766                                               inode_get_mtime_nsec(inode));
767
768         ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
769         ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
770         ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
771
772         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
773                 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
774                 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
775         } else if (inode->i_blocks) {
776                 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
777                        sizeof(ufs_inode->ui_u2.ui_addr));
778         } else {
779                 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
780                        sizeof(ufs_inode->ui_u2.ui_symlink));
781         }
782
783         if (!inode->i_nlink)
784                 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
785         UFSD("EXIT\n");
786 }
787
788 static int ufs_update_inode(struct inode * inode, int do_sync)
789 {
790         struct super_block *sb = inode->i_sb;
791         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
792         struct buffer_head * bh;
793
794         UFSD("ENTER, ino %lu\n", inode->i_ino);
795
796         if (inode->i_ino < UFS_ROOTINO ||
797             inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
798                 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
799                 return -1;
800         }
801
802         bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
803         if (!bh) {
804                 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
805                 return -1;
806         }
807         if (uspi->fs_magic == UFS2_MAGIC) {
808                 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
809
810                 ufs2_update_inode(inode,
811                                   ufs2_inode + ufs_inotofsbo(inode->i_ino));
812         } else {
813                 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
814
815                 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
816         }
817
818         mark_buffer_dirty(bh);
819         if (do_sync)
820                 sync_dirty_buffer(bh);
821         brelse (bh);
822
823         UFSD("EXIT\n");
824         return 0;
825 }
826
827 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
828 {
829         return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
830 }
831
832 int ufs_sync_inode (struct inode *inode)
833 {
834         return ufs_update_inode (inode, 1);
835 }
836
837 void ufs_evict_inode(struct inode * inode)
838 {
839         int want_delete = 0;
840
841         if (!inode->i_nlink && !is_bad_inode(inode))
842                 want_delete = 1;
843
844         truncate_inode_pages_final(&inode->i_data);
845         if (want_delete) {
846                 inode->i_size = 0;
847                 if (inode->i_blocks &&
848                     (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
849                      S_ISLNK(inode->i_mode)))
850                         ufs_truncate_blocks(inode);
851                 ufs_update_inode(inode, inode_needs_sync(inode));
852         }
853
854         invalidate_inode_buffers(inode);
855         clear_inode(inode);
856
857         if (want_delete)
858                 ufs_free_inode(inode);
859 }
860
861 struct to_free {
862         struct inode *inode;
863         u64 to;
864         unsigned count;
865 };
866
867 static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
868 {
869         if (ctx->count && ctx->to != from) {
870                 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
871                 ctx->count = 0;
872         }
873         ctx->count += count;
874         ctx->to = from + count;
875 }
876
877 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
878
879 /*
880  * used only for truncation down to direct blocks.
881  */
882 static void ufs_trunc_direct(struct inode *inode)
883 {
884         struct ufs_inode_info *ufsi = UFS_I(inode);
885         struct super_block *sb = inode->i_sb;
886         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
887         unsigned int new_frags, old_frags;
888         unsigned int old_slot, new_slot;
889         unsigned int old_tail, new_tail;
890         struct to_free ctx = {.inode = inode};
891
892         UFSD("ENTER: ino %lu\n", inode->i_ino);
893
894         new_frags = DIRECT_FRAGMENT;
895         // new_frags = first fragment past the new EOF
896         old_frags = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
897         // old_frags = first fragment past the old EOF or covered by indirects
898
899         if (new_frags >= old_frags)      // expanding - nothing to free
900                 goto done;
901
902         old_tail = ufs_fragnum(old_frags);
903         old_slot = ufs_fragstoblks(old_frags);
904         new_tail = ufs_fragnum(new_frags);
905         new_slot = ufs_fragstoblks(new_frags);
906
907         if (old_slot == new_slot) { // old_tail > 0
908                 void *p = ufs_get_direct_data_ptr(uspi, ufsi, old_slot);
909                 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
910                 if (!tmp)
911                         ufs_panic(sb, __func__, "internal error");
912                 if (!new_tail) {
913                         write_seqlock(&ufsi->meta_lock);
914                         ufs_data_ptr_clear(uspi, p);
915                         write_sequnlock(&ufsi->meta_lock);
916                 }
917                 ufs_free_fragments(inode, tmp + new_tail, old_tail - new_tail);
918         } else {
919                 unsigned int slot = new_slot;
920
921                 if (new_tail) {
922                         void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot++);
923                         u64 tmp = ufs_data_ptr_to_cpu(sb, p);
924                         if (!tmp)
925                                 ufs_panic(sb, __func__, "internal error");
926
927                         ufs_free_fragments(inode, tmp + new_tail,
928                                                 uspi->s_fpb - new_tail);
929                 }
930                 while (slot < old_slot) {
931                         void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot++);
932                         u64 tmp = ufs_data_ptr_to_cpu(sb, p);
933                         if (!tmp)
934                                 continue;
935                         write_seqlock(&ufsi->meta_lock);
936                         ufs_data_ptr_clear(uspi, p);
937                         write_sequnlock(&ufsi->meta_lock);
938
939                         free_data(&ctx, tmp, uspi->s_fpb);
940                 }
941
942                 free_data(&ctx, 0, 0);
943
944                 if (old_tail) {
945                         void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot);
946                         u64 tmp = ufs_data_ptr_to_cpu(sb, p);
947                         if (!tmp)
948                                 ufs_panic(sb, __func__, "internal error");
949                         write_seqlock(&ufsi->meta_lock);
950                         ufs_data_ptr_clear(uspi, p);
951                         write_sequnlock(&ufsi->meta_lock);
952
953                         ufs_free_fragments(inode, tmp, old_tail);
954                 }
955         }
956 done:
957         UFSD("EXIT: ino %lu\n", inode->i_ino);
958 }
959
960 static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
961 {
962         struct super_block *sb = inode->i_sb;
963         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
964         struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
965         unsigned i;
966
967         if (!ubh)
968                 return;
969
970         if (--depth) {
971                 for (i = 0; i < uspi->s_apb; i++) {
972                         void *p = ubh_get_data_ptr(uspi, ubh, i);
973                         u64 block = ufs_data_ptr_to_cpu(sb, p);
974                         if (block)
975                                 free_full_branch(inode, block, depth);
976                 }
977         } else {
978                 struct to_free ctx = {.inode = inode};
979
980                 for (i = 0; i < uspi->s_apb; i++) {
981                         void *p = ubh_get_data_ptr(uspi, ubh, i);
982                         u64 block = ufs_data_ptr_to_cpu(sb, p);
983                         if (block)
984                                 free_data(&ctx, block, uspi->s_fpb);
985                 }
986                 free_data(&ctx, 0, 0);
987         }
988
989         ubh_bforget(ubh);
990         ufs_free_blocks(inode, ind_block, uspi->s_fpb);
991 }
992
993 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
994 {
995         struct super_block *sb = inode->i_sb;
996         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
997         unsigned i;
998
999         if (--depth) {
1000                 for (i = from; i < uspi->s_apb ; i++) {
1001                         void *p = ubh_get_data_ptr(uspi, ubh, i);
1002                         u64 block = ufs_data_ptr_to_cpu(sb, p);
1003                         if (block) {
1004                                 write_seqlock(&UFS_I(inode)->meta_lock);
1005                                 ufs_data_ptr_clear(uspi, p);
1006                                 write_sequnlock(&UFS_I(inode)->meta_lock);
1007                                 ubh_mark_buffer_dirty(ubh);
1008                                 free_full_branch(inode, block, depth);
1009                         }
1010                 }
1011         } else {
1012                 struct to_free ctx = {.inode = inode};
1013
1014                 for (i = from; i < uspi->s_apb; i++) {
1015                         void *p = ubh_get_data_ptr(uspi, ubh, i);
1016                         u64 block = ufs_data_ptr_to_cpu(sb, p);
1017                         if (block) {
1018                                 write_seqlock(&UFS_I(inode)->meta_lock);
1019                                 ufs_data_ptr_clear(uspi, p);
1020                                 write_sequnlock(&UFS_I(inode)->meta_lock);
1021                                 ubh_mark_buffer_dirty(ubh);
1022                                 free_data(&ctx, block, uspi->s_fpb);
1023                         }
1024                 }
1025                 free_data(&ctx, 0, 0);
1026         }
1027         if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1028                 ubh_sync_block(ubh);
1029         ubh_brelse(ubh);
1030 }
1031
1032 static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1033 {
1034         int err = 0;
1035         struct super_block *sb = inode->i_sb;
1036         struct address_space *mapping = inode->i_mapping;
1037         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1038         unsigned i, end;
1039         sector_t lastfrag;
1040         struct folio *folio;
1041         struct buffer_head *bh;
1042         u64 phys64;
1043
1044         lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1045
1046         if (!lastfrag)
1047                 goto out;
1048
1049         lastfrag--;
1050
1051         folio = ufs_get_locked_folio(mapping, lastfrag >>
1052                                        (PAGE_SHIFT - inode->i_blkbits));
1053         if (IS_ERR(folio)) {
1054                 err = -EIO;
1055                 goto out;
1056         }
1057
1058         end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1059         bh = folio_buffers(folio);
1060         for (i = 0; i < end; ++i)
1061                 bh = bh->b_this_page;
1062
1063        err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1064
1065        if (unlikely(err))
1066                goto out_unlock;
1067
1068        if (buffer_new(bh)) {
1069                clear_buffer_new(bh);
1070                clean_bdev_bh_alias(bh);
1071                /*
1072                 * we do not zeroize fragment, because of
1073                 * if it maped to hole, it already contains zeroes
1074                 */
1075                set_buffer_uptodate(bh);
1076                mark_buffer_dirty(bh);
1077                 folio_mark_dirty(folio);
1078        }
1079
1080        if (lastfrag >= UFS_IND_FRAGMENT) {
1081                end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1082                phys64 = bh->b_blocknr + 1;
1083                for (i = 0; i < end; ++i) {
1084                        bh = sb_getblk(sb, i + phys64);
1085                        lock_buffer(bh);
1086                        memset(bh->b_data, 0, sb->s_blocksize);
1087                        set_buffer_uptodate(bh);
1088                        mark_buffer_dirty(bh);
1089                        unlock_buffer(bh);
1090                        sync_dirty_buffer(bh);
1091                        brelse(bh);
1092                }
1093        }
1094 out_unlock:
1095        ufs_put_locked_folio(folio);
1096 out:
1097        return err;
1098 }
1099
1100 static void ufs_truncate_blocks(struct inode *inode)
1101 {
1102         struct ufs_inode_info *ufsi = UFS_I(inode);
1103         struct super_block *sb = inode->i_sb;
1104         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1105         unsigned offsets[4];
1106         int depth;
1107         int depth2;
1108         unsigned i;
1109         struct ufs_buffer_head *ubh[3];
1110         void *p;
1111         u64 block;
1112
1113         if (inode->i_size) {
1114                 sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1115                 depth = ufs_block_to_path(inode, last, offsets);
1116                 if (!depth)
1117                         return;
1118         } else {
1119                 depth = 1;
1120         }
1121
1122         for (depth2 = depth - 1; depth2; depth2--)
1123                 if (offsets[depth2] != uspi->s_apb - 1)
1124                         break;
1125
1126         mutex_lock(&ufsi->truncate_mutex);
1127         if (depth == 1) {
1128                 ufs_trunc_direct(inode);
1129                 offsets[0] = UFS_IND_BLOCK;
1130         } else {
1131                 /* get the blocks that should be partially emptied */
1132                 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1133                 for (i = 0; i < depth2; i++) {
1134                         block = ufs_data_ptr_to_cpu(sb, p);
1135                         if (!block)
1136                                 break;
1137                         ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1138                         if (!ubh[i]) {
1139                                 write_seqlock(&ufsi->meta_lock);
1140                                 ufs_data_ptr_clear(uspi, p);
1141                                 write_sequnlock(&ufsi->meta_lock);
1142                                 break;
1143                         }
1144                         p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1145                 }
1146                 while (i--)
1147                         free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1148         }
1149         for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1150                 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1151                 block = ufs_data_ptr_to_cpu(sb, p);
1152                 if (block) {
1153                         write_seqlock(&ufsi->meta_lock);
1154                         ufs_data_ptr_clear(uspi, p);
1155                         write_sequnlock(&ufsi->meta_lock);
1156                         free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1157                 }
1158         }
1159         read_seqlock_excl(&ufsi->meta_lock);
1160         ufsi->i_lastfrag = DIRECT_FRAGMENT;
1161         read_sequnlock_excl(&ufsi->meta_lock);
1162         mark_inode_dirty(inode);
1163         mutex_unlock(&ufsi->truncate_mutex);
1164 }
1165
1166 static int ufs_truncate(struct inode *inode, loff_t size)
1167 {
1168         int err = 0;
1169
1170         UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1171              inode->i_ino, (unsigned long long)size,
1172              (unsigned long long)i_size_read(inode));
1173
1174         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1175               S_ISLNK(inode->i_mode)))
1176                 return -EINVAL;
1177         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1178                 return -EPERM;
1179
1180         err = ufs_alloc_lastblock(inode, size);
1181
1182         if (err)
1183                 goto out;
1184
1185         block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1186
1187         truncate_setsize(inode, size);
1188
1189         ufs_truncate_blocks(inode);
1190         inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1191         mark_inode_dirty(inode);
1192 out:
1193         UFSD("EXIT: err %d\n", err);
1194         return err;
1195 }
1196
1197 int ufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1198                 struct iattr *attr)
1199 {
1200         struct inode *inode = d_inode(dentry);
1201         unsigned int ia_valid = attr->ia_valid;
1202         int error;
1203
1204         error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1205         if (error)
1206                 return error;
1207
1208         if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1209                 error = ufs_truncate(inode, attr->ia_size);
1210                 if (error)
1211                         return error;
1212         }
1213
1214         setattr_copy(&nop_mnt_idmap, inode, attr);
1215         mark_inode_dirty(inode);
1216         return 0;
1217 }
1218
1219 const struct inode_operations ufs_file_inode_operations = {
1220         .setattr = ufs_setattr,
1221 };
This page took 0.095225 seconds and 4 git commands to generate.