]> Git Repo - linux.git/blob - fs/erofs/data.c
f2fs: move f2fs to use reader-unfair rwsems
[linux.git] / fs / erofs / data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 #include <linux/dax.h>
10 #include <trace/events/erofs.h>
11
12 void erofs_unmap_metabuf(struct erofs_buf *buf)
13 {
14         if (buf->kmap_type == EROFS_KMAP)
15                 kunmap(buf->page);
16         else if (buf->kmap_type == EROFS_KMAP_ATOMIC)
17                 kunmap_atomic(buf->base);
18         buf->base = NULL;
19         buf->kmap_type = EROFS_NO_KMAP;
20 }
21
22 void erofs_put_metabuf(struct erofs_buf *buf)
23 {
24         if (!buf->page)
25                 return;
26         erofs_unmap_metabuf(buf);
27         put_page(buf->page);
28         buf->page = NULL;
29 }
30
31 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
32                         erofs_blk_t blkaddr, enum erofs_kmap_type type)
33 {
34         struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
35         erofs_off_t offset = blknr_to_addr(blkaddr);
36         pgoff_t index = offset >> PAGE_SHIFT;
37         struct page *page = buf->page;
38
39         if (!page || page->index != index) {
40                 erofs_put_metabuf(buf);
41                 page = read_cache_page_gfp(mapping, index,
42                                 mapping_gfp_constraint(mapping, ~__GFP_FS));
43                 if (IS_ERR(page))
44                         return page;
45                 /* should already be PageUptodate, no need to lock page */
46                 buf->page = page;
47         }
48         if (buf->kmap_type == EROFS_NO_KMAP) {
49                 if (type == EROFS_KMAP)
50                         buf->base = kmap(page);
51                 else if (type == EROFS_KMAP_ATOMIC)
52                         buf->base = kmap_atomic(page);
53                 buf->kmap_type = type;
54         } else if (buf->kmap_type != type) {
55                 DBG_BUGON(1);
56                 return ERR_PTR(-EFAULT);
57         }
58         if (type == EROFS_NO_KMAP)
59                 return NULL;
60         return buf->base + (offset & ~PAGE_MASK);
61 }
62
63 static int erofs_map_blocks_flatmode(struct inode *inode,
64                                      struct erofs_map_blocks *map,
65                                      int flags)
66 {
67         erofs_blk_t nblocks, lastblk;
68         u64 offset = map->m_la;
69         struct erofs_inode *vi = EROFS_I(inode);
70         bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
71
72         nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
73         lastblk = nblocks - tailendpacking;
74
75         /* there is no hole in flatmode */
76         map->m_flags = EROFS_MAP_MAPPED;
77         if (offset < blknr_to_addr(lastblk)) {
78                 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
79                 map->m_plen = blknr_to_addr(lastblk) - offset;
80         } else if (tailendpacking) {
81                 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
82                 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
83
84                 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
85                         vi->xattr_isize + erofs_blkoff(map->m_la);
86                 map->m_plen = inode->i_size - offset;
87
88                 /* inline data should be located in the same meta block */
89                 if (erofs_blkoff(map->m_pa) + map->m_plen > EROFS_BLKSIZ) {
90                         erofs_err(inode->i_sb,
91                                   "inline data cross block boundary @ nid %llu",
92                                   vi->nid);
93                         DBG_BUGON(1);
94                         return -EFSCORRUPTED;
95                 }
96                 map->m_flags |= EROFS_MAP_META;
97         } else {
98                 erofs_err(inode->i_sb,
99                           "internal error @ nid: %llu (size %llu), m_la 0x%llx",
100                           vi->nid, inode->i_size, map->m_la);
101                 DBG_BUGON(1);
102                 return -EIO;
103         }
104         return 0;
105 }
106
107 static int erofs_map_blocks(struct inode *inode,
108                             struct erofs_map_blocks *map, int flags)
109 {
110         struct super_block *sb = inode->i_sb;
111         struct erofs_inode *vi = EROFS_I(inode);
112         struct erofs_inode_chunk_index *idx;
113         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
114         u64 chunknr;
115         unsigned int unit;
116         erofs_off_t pos;
117         void *kaddr;
118         int err = 0;
119
120         trace_erofs_map_blocks_enter(inode, map, flags);
121         map->m_deviceid = 0;
122         if (map->m_la >= inode->i_size) {
123                 /* leave out-of-bound access unmapped */
124                 map->m_flags = 0;
125                 map->m_plen = 0;
126                 goto out;
127         }
128
129         if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
130                 err = erofs_map_blocks_flatmode(inode, map, flags);
131                 goto out;
132         }
133
134         if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
135                 unit = sizeof(*idx);                    /* chunk index */
136         else
137                 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;      /* block map */
138
139         chunknr = map->m_la >> vi->chunkbits;
140         pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
141                     vi->xattr_isize, unit) + unit * chunknr;
142
143         kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
144         if (IS_ERR(kaddr)) {
145                 err = PTR_ERR(kaddr);
146                 goto out;
147         }
148         map->m_la = chunknr << vi->chunkbits;
149         map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
150                             roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
151
152         /* handle block map */
153         if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
154                 __le32 *blkaddr = kaddr + erofs_blkoff(pos);
155
156                 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
157                         map->m_flags = 0;
158                 } else {
159                         map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
160                         map->m_flags = EROFS_MAP_MAPPED;
161                 }
162                 goto out_unlock;
163         }
164         /* parse chunk indexes */
165         idx = kaddr + erofs_blkoff(pos);
166         switch (le32_to_cpu(idx->blkaddr)) {
167         case EROFS_NULL_ADDR:
168                 map->m_flags = 0;
169                 break;
170         default:
171                 map->m_deviceid = le16_to_cpu(idx->device_id) &
172                         EROFS_SB(sb)->device_id_mask;
173                 map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
174                 map->m_flags = EROFS_MAP_MAPPED;
175                 break;
176         }
177 out_unlock:
178         erofs_put_metabuf(&buf);
179 out:
180         if (!err)
181                 map->m_llen = map->m_plen;
182         trace_erofs_map_blocks_exit(inode, map, flags, 0);
183         return err;
184 }
185
186 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
187 {
188         struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
189         struct erofs_device_info *dif;
190         int id;
191
192         /* primary device by default */
193         map->m_bdev = sb->s_bdev;
194         map->m_daxdev = EROFS_SB(sb)->dax_dev;
195         map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
196
197         if (map->m_deviceid) {
198                 down_read(&devs->rwsem);
199                 dif = idr_find(&devs->tree, map->m_deviceid - 1);
200                 if (!dif) {
201                         up_read(&devs->rwsem);
202                         return -ENODEV;
203                 }
204                 map->m_bdev = dif->bdev;
205                 map->m_daxdev = dif->dax_dev;
206                 map->m_dax_part_off = dif->dax_part_off;
207                 up_read(&devs->rwsem);
208         } else if (devs->extra_devices) {
209                 down_read(&devs->rwsem);
210                 idr_for_each_entry(&devs->tree, dif, id) {
211                         erofs_off_t startoff, length;
212
213                         if (!dif->mapped_blkaddr)
214                                 continue;
215                         startoff = blknr_to_addr(dif->mapped_blkaddr);
216                         length = blknr_to_addr(dif->blocks);
217
218                         if (map->m_pa >= startoff &&
219                             map->m_pa < startoff + length) {
220                                 map->m_pa -= startoff;
221                                 map->m_bdev = dif->bdev;
222                                 map->m_daxdev = dif->dax_dev;
223                                 map->m_dax_part_off = dif->dax_part_off;
224                                 break;
225                         }
226                 }
227                 up_read(&devs->rwsem);
228         }
229         return 0;
230 }
231
232 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
233                 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
234 {
235         int ret;
236         struct erofs_map_blocks map;
237         struct erofs_map_dev mdev;
238
239         map.m_la = offset;
240         map.m_llen = length;
241
242         ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
243         if (ret < 0)
244                 return ret;
245
246         mdev = (struct erofs_map_dev) {
247                 .m_deviceid = map.m_deviceid,
248                 .m_pa = map.m_pa,
249         };
250         ret = erofs_map_dev(inode->i_sb, &mdev);
251         if (ret)
252                 return ret;
253
254         iomap->offset = map.m_la;
255         if (flags & IOMAP_DAX) {
256                 iomap->dax_dev = mdev.m_daxdev;
257                 iomap->offset += mdev.m_dax_part_off;
258         } else {
259                 iomap->bdev = mdev.m_bdev;
260         }
261         iomap->length = map.m_llen;
262         iomap->flags = 0;
263         iomap->private = NULL;
264
265         if (!(map.m_flags & EROFS_MAP_MAPPED)) {
266                 iomap->type = IOMAP_HOLE;
267                 iomap->addr = IOMAP_NULL_ADDR;
268                 if (!iomap->length)
269                         iomap->length = length;
270                 return 0;
271         }
272
273         if (map.m_flags & EROFS_MAP_META) {
274                 void *ptr;
275                 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
276
277                 iomap->type = IOMAP_INLINE;
278                 ptr = erofs_read_metabuf(&buf, inode->i_sb,
279                                          erofs_blknr(mdev.m_pa), EROFS_KMAP);
280                 if (IS_ERR(ptr))
281                         return PTR_ERR(ptr);
282                 iomap->inline_data = ptr + erofs_blkoff(mdev.m_pa);
283                 iomap->private = buf.base;
284         } else {
285                 iomap->type = IOMAP_MAPPED;
286                 iomap->addr = mdev.m_pa;
287         }
288         return 0;
289 }
290
291 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
292                 ssize_t written, unsigned int flags, struct iomap *iomap)
293 {
294         void *ptr = iomap->private;
295
296         if (ptr) {
297                 struct erofs_buf buf = {
298                         .page = kmap_to_page(ptr),
299                         .base = ptr,
300                         .kmap_type = EROFS_KMAP,
301                 };
302
303                 DBG_BUGON(iomap->type != IOMAP_INLINE);
304                 erofs_put_metabuf(&buf);
305         } else {
306                 DBG_BUGON(iomap->type == IOMAP_INLINE);
307         }
308         return written;
309 }
310
311 static const struct iomap_ops erofs_iomap_ops = {
312         .iomap_begin = erofs_iomap_begin,
313         .iomap_end = erofs_iomap_end,
314 };
315
316 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
317                  u64 start, u64 len)
318 {
319         if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
320 #ifdef CONFIG_EROFS_FS_ZIP
321                 return iomap_fiemap(inode, fieinfo, start, len,
322                                     &z_erofs_iomap_report_ops);
323 #else
324                 return -EOPNOTSUPP;
325 #endif
326         }
327         return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
328 }
329
330 /*
331  * since we dont have write or truncate flows, so no inode
332  * locking needs to be held at the moment.
333  */
334 static int erofs_readpage(struct file *file, struct page *page)
335 {
336         return iomap_readpage(page, &erofs_iomap_ops);
337 }
338
339 static void erofs_readahead(struct readahead_control *rac)
340 {
341         return iomap_readahead(rac, &erofs_iomap_ops);
342 }
343
344 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
345 {
346         return iomap_bmap(mapping, block, &erofs_iomap_ops);
347 }
348
349 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
350 {
351         struct inode *inode = file_inode(iocb->ki_filp);
352         loff_t align = iocb->ki_pos | iov_iter_count(to) |
353                 iov_iter_alignment(to);
354         struct block_device *bdev = inode->i_sb->s_bdev;
355         unsigned int blksize_mask;
356
357         if (bdev)
358                 blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
359         else
360                 blksize_mask = (1 << inode->i_blkbits) - 1;
361
362         if (align & blksize_mask)
363                 return -EINVAL;
364         return 0;
365 }
366
367 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
368 {
369         /* no need taking (shared) inode lock since it's a ro filesystem */
370         if (!iov_iter_count(to))
371                 return 0;
372
373 #ifdef CONFIG_FS_DAX
374         if (IS_DAX(iocb->ki_filp->f_mapping->host))
375                 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
376 #endif
377         if (iocb->ki_flags & IOCB_DIRECT) {
378                 int err = erofs_prepare_dio(iocb, to);
379
380                 if (!err)
381                         return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
382                                             NULL, 0, 0);
383                 if (err < 0)
384                         return err;
385         }
386         return filemap_read(iocb, to, 0);
387 }
388
389 /* for uncompressed (aligned) files and raw access for other files */
390 const struct address_space_operations erofs_raw_access_aops = {
391         .readpage = erofs_readpage,
392         .readahead = erofs_readahead,
393         .bmap = erofs_bmap,
394         .direct_IO = noop_direct_IO,
395 };
396
397 #ifdef CONFIG_FS_DAX
398 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
399                 enum page_entry_size pe_size)
400 {
401         return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
402 }
403
404 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
405 {
406         return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
407 }
408
409 static const struct vm_operations_struct erofs_dax_vm_ops = {
410         .fault          = erofs_dax_fault,
411         .huge_fault     = erofs_dax_huge_fault,
412 };
413
414 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
415 {
416         if (!IS_DAX(file_inode(file)))
417                 return generic_file_readonly_mmap(file, vma);
418
419         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
420                 return -EINVAL;
421
422         vma->vm_ops = &erofs_dax_vm_ops;
423         vma->vm_flags |= VM_HUGEPAGE;
424         return 0;
425 }
426 #else
427 #define erofs_file_mmap generic_file_readonly_mmap
428 #endif
429
430 const struct file_operations erofs_file_fops = {
431         .llseek         = generic_file_llseek,
432         .read_iter      = erofs_file_read_iter,
433         .mmap           = erofs_file_mmap,
434         .splice_read    = generic_file_splice_read,
435 };
This page took 0.057033 seconds and 4 git commands to generate.