1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2022 Alibaba Cloud
9 #include <linux/cpuhotplug.h>
10 #include <trace/events/erofs.h>
12 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_INLINE_BVECS 2
16 * let's leave a type here in case of introducing
17 * another tagged pointer later.
19 typedef void *z_erofs_next_pcluster_t;
27 #define __Z_EROFS_BVSET(name, total) \
29 /* point to the next page which contains the following bvecs */ \
30 struct page *nextpage; \
31 struct z_erofs_bvec bvec[total]; \
33 __Z_EROFS_BVSET(z_erofs_bvset,);
34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
37 * Structure fields follow one of the following exclusion rules.
39 * I: Modifiable by initialization/destruction paths and read-only
42 * L: Field should be protected by the pcluster lock;
44 * A: Field should be accessed / updated in atomic for parallelized code.
46 struct z_erofs_pcluster {
47 struct erofs_workgroup obj;
50 /* A: point to next chained pcluster or TAILs */
51 z_erofs_next_pcluster_t next;
53 /* L: the maximum decompression size of this round */
56 /* L: total number of bvecs */
59 /* I: pcluster size (compressed size) in bytes */
60 unsigned int pclustersize;
62 /* I: page offset of start position of decompression */
63 unsigned short pageofs_out;
65 /* I: page offset of inline compressed data */
66 unsigned short pageofs_in;
69 /* L: inline a certain number of bvec for bootstrap */
70 struct z_erofs_bvset_inline bvset;
72 /* I: can be used to free the pcluster by RCU. */
76 /* I: compression algorithm format */
77 unsigned char algorithmformat;
79 /* L: whether partial decompression or not */
82 /* L: indicate several pageofs_outs or not */
85 /* L: whether extra buffer allocations are best-effort */
88 /* A: compressed bvecs (can be cached or inplaced pages) */
89 struct z_erofs_bvec compressed_bvecs[];
92 /* the end of a chain of pclusters */
93 #define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA)
94 #define Z_EROFS_PCLUSTER_NIL (NULL)
96 struct z_erofs_decompressqueue {
97 struct super_block *sb;
98 atomic_t pending_bios;
99 z_erofs_next_pcluster_t head;
102 struct completion done;
103 struct work_struct work;
104 struct kthread_work kthread_work;
109 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
111 return !pcl->obj.index;
114 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
116 return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
119 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
120 static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
122 return fo->mapping == MNGD_MAPPING(sbi);
126 * bit 30: I/O error occurred on this folio
127 * bit 0 - 29: remaining parts to complete this folio
129 #define Z_EROFS_FOLIO_EIO (1 << 30)
131 static void z_erofs_onlinefolio_init(struct folio *folio)
136 } u = { .o = ATOMIC_INIT(1) };
138 folio->private = u.v; /* valid only if file-backed folio is locked */
141 static void z_erofs_onlinefolio_split(struct folio *folio)
143 atomic_inc((atomic_t *)&folio->private);
146 static void z_erofs_onlinefolio_end(struct folio *folio, int err)
151 orig = atomic_read((atomic_t *)&folio->private);
152 v = (orig - 1) | (err ? Z_EROFS_FOLIO_EIO : 0);
153 } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
155 if (v & ~Z_EROFS_FOLIO_EIO)
158 folio_end_read(folio, !(v & Z_EROFS_FOLIO_EIO));
161 #define Z_EROFS_ONSTACK_PAGES 32
164 * since pclustersize is variable for big pcluster feature, introduce slab
165 * pools implementation for different pcluster sizes.
167 struct z_erofs_pcluster_slab {
168 struct kmem_cache *slab;
169 unsigned int maxpages;
173 #define _PCLP(n) { .maxpages = n }
175 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
176 _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
177 _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
180 struct z_erofs_bvec_iter {
182 struct z_erofs_bvset *bvset;
183 unsigned int nr, cur;
186 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
189 kunmap_local(iter->bvset);
193 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
195 unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
196 /* have to access nextpage in advance, otherwise it will be unmapped */
197 struct page *nextpage = iter->bvset->nextpage;
198 struct page *oldpage;
200 DBG_BUGON(!nextpage);
201 oldpage = z_erofs_bvec_iter_end(iter);
202 iter->bvpage = nextpage;
203 iter->bvset = kmap_local_page(nextpage);
204 iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
209 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
210 struct z_erofs_bvset_inline *bvset,
211 unsigned int bootstrap_nr,
214 *iter = (struct z_erofs_bvec_iter) {
216 .bvset = (struct z_erofs_bvset *)bvset,
219 while (cur > iter->nr) {
221 z_erofs_bvset_flip(iter);
226 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
227 struct z_erofs_bvec *bvec,
228 struct page **candidate_bvpage,
229 struct page **pagepool)
231 if (iter->cur >= iter->nr) {
232 struct page *nextpage = *candidate_bvpage;
235 nextpage = erofs_allocpage(pagepool, GFP_KERNEL);
238 set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
240 DBG_BUGON(iter->bvset->nextpage);
241 iter->bvset->nextpage = nextpage;
242 z_erofs_bvset_flip(iter);
244 iter->bvset->nextpage = NULL;
245 *candidate_bvpage = NULL;
247 iter->bvset->bvec[iter->cur++] = *bvec;
251 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
252 struct z_erofs_bvec *bvec,
253 struct page **old_bvpage)
255 if (iter->cur == iter->nr)
256 *old_bvpage = z_erofs_bvset_flip(iter);
259 *bvec = iter->bvset->bvec[iter->cur++];
262 static void z_erofs_destroy_pcluster_pool(void)
266 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
267 if (!pcluster_pool[i].slab)
269 kmem_cache_destroy(pcluster_pool[i].slab);
270 pcluster_pool[i].slab = NULL;
274 static int z_erofs_create_pcluster_pool(void)
276 struct z_erofs_pcluster_slab *pcs;
277 struct z_erofs_pcluster *a;
280 for (pcs = pcluster_pool;
281 pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
282 size = struct_size(a, compressed_bvecs, pcs->maxpages);
284 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
285 pcs->slab = kmem_cache_create(pcs->name, size, 0,
286 SLAB_RECLAIM_ACCOUNT, NULL);
290 z_erofs_destroy_pcluster_pool();
296 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
298 unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
299 struct z_erofs_pcluster_slab *pcs = pcluster_pool;
301 for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
302 struct z_erofs_pcluster *pcl;
304 if (nrpages > pcs->maxpages)
307 pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
309 return ERR_PTR(-ENOMEM);
310 pcl->pclustersize = size;
313 return ERR_PTR(-EINVAL);
316 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
318 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
321 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
322 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
324 if (pclusterpages > pcs->maxpages)
327 kmem_cache_free(pcs->slab, pcl);
333 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
335 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
336 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
338 static void erofs_destroy_percpu_workers(void)
340 struct kthread_worker *worker;
343 for_each_possible_cpu(cpu) {
344 worker = rcu_dereference_protected(
345 z_erofs_pcpu_workers[cpu], 1);
346 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
348 kthread_destroy_worker(worker);
350 kfree(z_erofs_pcpu_workers);
353 static struct kthread_worker *erofs_init_percpu_worker(int cpu)
355 struct kthread_worker *worker =
356 kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
360 if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
361 sched_set_fifo_low(worker->task);
365 static int erofs_init_percpu_workers(void)
367 struct kthread_worker *worker;
370 z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
371 sizeof(struct kthread_worker *), GFP_ATOMIC);
372 if (!z_erofs_pcpu_workers)
375 for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */
376 worker = erofs_init_percpu_worker(cpu);
378 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
383 static inline void erofs_destroy_percpu_workers(void) {}
384 static inline int erofs_init_percpu_workers(void) { return 0; }
387 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
388 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
389 static enum cpuhp_state erofs_cpuhp_state;
391 static int erofs_cpu_online(unsigned int cpu)
393 struct kthread_worker *worker, *old;
395 worker = erofs_init_percpu_worker(cpu);
397 return PTR_ERR(worker);
399 spin_lock(&z_erofs_pcpu_worker_lock);
400 old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
401 lockdep_is_held(&z_erofs_pcpu_worker_lock));
403 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
404 spin_unlock(&z_erofs_pcpu_worker_lock);
406 kthread_destroy_worker(worker);
410 static int erofs_cpu_offline(unsigned int cpu)
412 struct kthread_worker *worker;
414 spin_lock(&z_erofs_pcpu_worker_lock);
415 worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
416 lockdep_is_held(&z_erofs_pcpu_worker_lock));
417 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
418 spin_unlock(&z_erofs_pcpu_worker_lock);
422 kthread_destroy_worker(worker);
426 static int erofs_cpu_hotplug_init(void)
430 state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
431 "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
435 erofs_cpuhp_state = state;
439 static void erofs_cpu_hotplug_destroy(void)
441 if (erofs_cpuhp_state)
442 cpuhp_remove_state_nocalls(erofs_cpuhp_state);
444 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
445 static inline int erofs_cpu_hotplug_init(void) { return 0; }
446 static inline void erofs_cpu_hotplug_destroy(void) {}
449 void z_erofs_exit_subsystem(void)
451 erofs_cpu_hotplug_destroy();
452 erofs_destroy_percpu_workers();
453 destroy_workqueue(z_erofs_workqueue);
454 z_erofs_destroy_pcluster_pool();
455 z_erofs_exit_decompressor();
458 int __init z_erofs_init_subsystem(void)
460 int err = z_erofs_init_decompressor();
463 goto err_decompressor;
465 err = z_erofs_create_pcluster_pool();
467 goto err_pcluster_pool;
469 z_erofs_workqueue = alloc_workqueue("erofs_worker",
470 WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
471 if (!z_erofs_workqueue) {
473 goto err_workqueue_init;
476 err = erofs_init_percpu_workers();
478 goto err_pcpu_worker;
480 err = erofs_cpu_hotplug_init();
486 erofs_destroy_percpu_workers();
488 destroy_workqueue(z_erofs_workqueue);
490 z_erofs_destroy_pcluster_pool();
492 z_erofs_exit_decompressor();
497 enum z_erofs_pclustermode {
498 Z_EROFS_PCLUSTER_INFLIGHT,
500 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
501 * could be dispatched into bypass queue later due to uptodated managed
502 * pages. All related online pages cannot be reused for inplace I/O (or
503 * bvpage) since it can be directly decoded without I/O submission.
505 Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
507 * The pcluster was just linked to a decompression chain by us. It can
508 * also be linked with the remaining pclusters, which means if the
509 * processing page is the tail page of a pcluster, this pcluster can
510 * safely use the whole page (since the previous pcluster is within the
511 * same chain) for in-place I/O, as illustrated below:
512 * ___________________________________________________
513 * | tail (partial) page | head (partial) page |
514 * | (of the current pcl) | (of the previous pcl) |
515 * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
517 * [ (*) the page above can be used as inplace I/O. ]
519 Z_EROFS_PCLUSTER_FOLLOWED,
522 struct z_erofs_decompress_frontend {
523 struct inode *const inode;
524 struct erofs_map_blocks map;
525 struct z_erofs_bvec_iter biter;
527 struct page *pagepool;
528 struct page *candidate_bvpage;
529 struct z_erofs_pcluster *pcl;
530 z_erofs_next_pcluster_t owned_head;
531 enum z_erofs_pclustermode mode;
533 erofs_off_t headoffset;
535 /* a pointer used to pick up inplace I/O pages */
539 #define DECOMPRESS_FRONTEND_INIT(__i) { \
540 .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
541 .mode = Z_EROFS_PCLUSTER_FOLLOWED }
543 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
545 unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
547 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
550 if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
553 if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
554 fe->map.m_la < fe->headoffset)
560 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
562 struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
563 struct z_erofs_pcluster *pcl = fe->pcl;
564 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
565 bool shouldalloc = z_erofs_should_alloc_cache(fe);
566 bool standalone = true;
568 * optimistic allocation without direct reclaim since inplace I/O
569 * can be used if low memory otherwise.
571 gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
572 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
575 if (i_blocksize(fe->inode) != PAGE_SIZE ||
576 fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
579 for (i = 0; i < pclusterpages; ++i) {
580 struct page *page, *newpage;
582 /* Inaccurate check w/o locking to avoid unneeded lookups */
583 if (READ_ONCE(pcl->compressed_bvecs[i].page))
586 page = find_get_page(mc, pcl->obj.index + i);
588 /* I/O is needed, no possible to decompress directly */
594 * Try cached I/O if allocation succeeds or fallback to
595 * in-place I/O instead to avoid any direct reclaim.
597 newpage = erofs_allocpage(&fe->pagepool, gfp);
600 set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
602 spin_lock(&pcl->obj.lockref.lock);
603 if (!pcl->compressed_bvecs[i].page) {
604 pcl->compressed_bvecs[i].page = page ? page : newpage;
605 spin_unlock(&pcl->obj.lockref.lock);
608 spin_unlock(&pcl->obj.lockref.lock);
613 erofs_pagepool_add(&fe->pagepool, newpage);
617 * don't do inplace I/O if all compressed pages are available in
618 * managed cache since it can be moved to the bypass queue instead.
621 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
624 /* (erofs_shrinker) disconnect cached encoded data with pclusters */
625 int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
626 struct erofs_workgroup *grp)
628 struct z_erofs_pcluster *const pcl =
629 container_of(grp, struct z_erofs_pcluster, obj);
630 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
634 DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
635 /* Each cached folio contains one page unless bs > ps is supported */
636 for (i = 0; i < pclusterpages; ++i) {
637 if (pcl->compressed_bvecs[i].page) {
638 folio = page_folio(pcl->compressed_bvecs[i].page);
639 /* Avoid reclaiming or migrating this folio */
640 if (!folio_trylock(folio))
643 if (!erofs_folio_is_managed(sbi, folio))
645 pcl->compressed_bvecs[i].page = NULL;
646 folio_detach_private(folio);
653 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
655 struct z_erofs_pcluster *pcl = folio_get_private(folio);
656 struct z_erofs_bvec *bvec = pcl->compressed_bvecs;
657 struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl);
660 if (!folio_test_private(folio))
664 spin_lock(&pcl->obj.lockref.lock);
665 if (pcl->obj.lockref.count <= 0) {
666 DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
667 for (; bvec < end; ++bvec) {
668 if (bvec->page && page_folio(bvec->page) == folio) {
670 folio_detach_private(folio);
676 spin_unlock(&pcl->obj.lockref.lock);
681 * It will be called only on inode eviction. In case that there are still some
682 * decompression requests in progress, wait with rescheduling for a bit here.
683 * An extra lock could be introduced instead but it seems unnecessary.
685 static void z_erofs_cache_invalidate_folio(struct folio *folio,
686 size_t offset, size_t length)
688 const size_t stop = length + offset;
690 /* Check for potential overflow in debug mode */
691 DBG_BUGON(stop > folio_size(folio) || stop < length);
693 if (offset == 0 && stop == folio_size(folio))
694 while (!z_erofs_cache_release_folio(folio, 0))
698 static const struct address_space_operations z_erofs_cache_aops = {
699 .release_folio = z_erofs_cache_release_folio,
700 .invalidate_folio = z_erofs_cache_invalidate_folio,
703 int erofs_init_managed_cache(struct super_block *sb)
705 struct inode *const inode = new_inode(sb);
711 inode->i_size = OFFSET_MAX;
712 inode->i_mapping->a_ops = &z_erofs_cache_aops;
713 mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
714 EROFS_SB(sb)->managed_cache = inode;
718 /* callers must be with pcluster lock held */
719 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
720 struct z_erofs_bvec *bvec, bool exclusive)
722 struct z_erofs_pcluster *pcl = fe->pcl;
726 /* give priority for inplaceio to use file pages first */
727 spin_lock(&pcl->obj.lockref.lock);
728 while (fe->icur > 0) {
729 if (pcl->compressed_bvecs[--fe->icur].page)
731 pcl->compressed_bvecs[fe->icur] = *bvec;
732 spin_unlock(&pcl->obj.lockref.lock);
735 spin_unlock(&pcl->obj.lockref.lock);
737 /* otherwise, check if it can be used as a bvpage */
738 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
739 !fe->candidate_bvpage)
740 fe->candidate_bvpage = bvec->page;
742 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
744 fe->pcl->vcnt += (ret >= 0);
748 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
750 struct z_erofs_pcluster *pcl = f->pcl;
751 z_erofs_next_pcluster_t *owned_head = &f->owned_head;
753 /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
754 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
755 *owned_head) == Z_EROFS_PCLUSTER_NIL) {
756 *owned_head = &pcl->next;
757 /* so we can attach this pcluster to our submission chain. */
758 f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
762 /* type 2, it belongs to an ongoing chain */
763 f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
766 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
768 struct erofs_map_blocks *map = &fe->map;
769 struct super_block *sb = fe->inode->i_sb;
770 bool ztailpacking = map->m_flags & EROFS_MAP_META;
771 struct z_erofs_pcluster *pcl;
772 struct erofs_workgroup *grp;
775 if (!(map->m_flags & EROFS_MAP_ENCODED) ||
776 (!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
778 return -EFSCORRUPTED;
781 /* no available pcluster, let's allocate one */
782 pcl = z_erofs_alloc_pcluster(map->m_plen);
786 spin_lock_init(&pcl->obj.lockref.lock);
787 pcl->obj.lockref.count = 1; /* one ref for this request */
788 pcl->algorithmformat = map->m_algorithmformat;
792 /* new pclusters should be claimed as type 1, primary and followed */
793 pcl->next = fe->owned_head;
794 pcl->pageofs_out = map->m_la & ~PAGE_MASK;
795 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
798 * lock all primary followed works before visible to others
799 * and mutex_trylock *never* fails for a new pcluster.
801 mutex_init(&pcl->lock);
802 DBG_BUGON(!mutex_trylock(&pcl->lock));
805 pcl->obj.index = 0; /* which indicates ztailpacking */
807 pcl->obj.index = erofs_blknr(sb, map->m_pa);
809 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
815 if (grp != &pcl->obj) {
816 fe->pcl = container_of(grp,
817 struct z_erofs_pcluster, obj);
822 fe->owned_head = &pcl->next;
827 mutex_unlock(&pcl->lock);
828 z_erofs_free_pcluster(pcl);
832 static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
834 struct erofs_map_blocks *map = &fe->map;
835 struct super_block *sb = fe->inode->i_sb;
836 erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
837 struct erofs_workgroup *grp = NULL;
842 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
843 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
845 if (!(map->m_flags & EROFS_MAP_META)) {
846 grp = erofs_find_workgroup(sb, blknr);
847 } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
849 return -EFSCORRUPTED;
853 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
856 ret = z_erofs_register_pcluster(fe);
859 if (ret == -EEXIST) {
860 mutex_lock(&fe->pcl->lock);
861 z_erofs_try_to_claim_pcluster(fe);
866 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
867 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
868 if (!z_erofs_is_inline_pcluster(fe->pcl)) {
869 /* bind cache first when cached decompression is preferred */
870 z_erofs_bind_cache(fe);
874 mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
877 erofs_err(sb, "failed to get inline data %d", ret);
880 get_page(map->buf.page);
881 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
882 fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
883 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
885 /* file-backed inplace I/O pages are traversed in reverse order */
886 fe->icur = z_erofs_pclusterpages(fe->pcl);
891 * keep in mind that no referenced pclusters will be freed
892 * only after a RCU grace period.
894 static void z_erofs_rcu_callback(struct rcu_head *head)
896 z_erofs_free_pcluster(container_of(head,
897 struct z_erofs_pcluster, rcu));
900 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
902 struct z_erofs_pcluster *const pcl =
903 container_of(grp, struct z_erofs_pcluster, obj);
905 call_rcu(&pcl->rcu, z_erofs_rcu_callback);
908 static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
910 struct z_erofs_pcluster *pcl = fe->pcl;
915 z_erofs_bvec_iter_end(&fe->biter);
916 mutex_unlock(&pcl->lock);
918 if (fe->candidate_bvpage)
919 fe->candidate_bvpage = NULL;
922 * if all pending pages are added, don't hold its reference
923 * any longer if the pcluster isn't hosted by ourselves.
925 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
926 erofs_workgroup_put(&pcl->obj);
931 static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
932 unsigned int cur, unsigned int end, erofs_off_t pos)
934 struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
935 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
940 return -EFSCORRUPTED;
942 buf.mapping = packed_inode->i_mapping;
943 for (; cur < end; cur += cnt, pos += cnt) {
944 cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
945 src = erofs_bread(&buf, pos, EROFS_KMAP);
947 erofs_put_metabuf(&buf);
950 memcpy_to_folio(folio, cur, src, cnt);
952 erofs_put_metabuf(&buf);
956 static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
957 struct folio *folio, bool ra)
959 struct inode *const inode = f->inode;
960 struct erofs_map_blocks *const map = &f->map;
961 const loff_t offset = folio_pos(folio);
962 const unsigned int bs = i_blocksize(inode);
963 unsigned int end = folio_size(folio), split = 0, cur, pgs;
967 tight = (bs == PAGE_SIZE);
968 z_erofs_onlinefolio_init(folio);
970 if (offset + end - 1 < map->m_la ||
971 offset + end - 1 >= map->m_la + map->m_llen) {
972 z_erofs_pcluster_end(f);
973 map->m_la = offset + end - 1;
975 err = z_erofs_map_blocks_iter(inode, map, 0);
980 cur = offset > map->m_la ? 0 : map->m_la - offset;
981 pgs = round_down(cur, PAGE_SIZE);
982 /* bump split parts first to avoid several separate cases */
985 if (!(map->m_flags & EROFS_MAP_MAPPED)) {
986 folio_zero_segment(folio, cur, end);
988 } else if (map->m_flags & EROFS_MAP_FRAGMENT) {
989 erofs_off_t fpos = offset + cur - map->m_la;
991 err = z_erofs_read_fragment(inode->i_sb, folio, cur,
992 cur + min(map->m_llen - fpos, end - cur),
993 EROFS_I(inode)->z_fragmentoff + fpos);
999 err = z_erofs_pcluster_begin(f);
1002 f->pcl->besteffort |= !ra;
1005 pgs = round_down(end - 1, PAGE_SIZE);
1007 * Ensure this partial page belongs to this submit chain
1008 * rather than other concurrent submit chains or
1009 * noio(bypass) chains since those chains are handled
1010 * asynchronously thus it cannot be used for inplace I/O
1011 * or bvpage (should be processed in the strict order.)
1013 tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
1016 excl = (split <= 1) || tight;
1020 err = z_erofs_attach_page(f, &((struct z_erofs_bvec) {
1021 .page = folio_page(folio, pgs >> PAGE_SHIFT),
1022 .offset = offset + pgs - map->m_la,
1023 .end = end - pgs, }), excl);
1027 z_erofs_onlinefolio_split(folio);
1028 if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
1029 f->pcl->multibases = true;
1030 if (f->pcl->length < offset + end - map->m_la) {
1031 f->pcl->length = offset + end - map->m_la;
1032 f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
1034 if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
1035 !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
1036 f->pcl->length == map->m_llen)
1037 f->pcl->partial = false;
1039 /* shorten the remaining extent to update progress */
1040 map->m_llen = offset + cur - map->m_la;
1041 map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
1044 tight = (bs == PAGE_SIZE);
1046 } while ((end = cur) > 0);
1047 z_erofs_onlinefolio_end(folio, err);
1051 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
1052 unsigned int readahead_pages)
1054 /* auto: enable for read_folio, disable for readahead */
1055 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
1059 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
1060 (readahead_pages <= sbi->opt.max_sync_decompress_pages))
1066 static bool z_erofs_page_is_invalidated(struct page *page)
1068 return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
1071 struct z_erofs_decompress_backend {
1072 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1073 struct super_block *sb;
1074 struct z_erofs_pcluster *pcl;
1076 /* pages with the longest decompressed length for deduplication */
1077 struct page **decompressed_pages;
1078 /* pages to keep the compressed data */
1079 struct page **compressed_pages;
1081 struct list_head decompressed_secondary_bvecs;
1082 struct page **pagepool;
1083 unsigned int onstack_used, nr_pages;
1086 struct z_erofs_bvec_item {
1087 struct z_erofs_bvec bvec;
1088 struct list_head list;
1091 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1092 struct z_erofs_bvec *bvec)
1094 struct z_erofs_bvec_item *item;
1097 if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
1098 (bvec->end == PAGE_SIZE ||
1099 bvec->offset + bvec->end == be->pcl->length)) {
1100 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
1101 DBG_BUGON(pgnr >= be->nr_pages);
1102 if (!be->decompressed_pages[pgnr]) {
1103 be->decompressed_pages[pgnr] = bvec->page;
1108 /* (cold path) one pcluster is requested multiple times */
1109 item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
1111 list_add(&item->list, &be->decompressed_secondary_bvecs);
1114 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1117 unsigned int off0 = be->pcl->pageofs_out;
1118 struct list_head *p, *n;
1120 list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
1121 struct z_erofs_bvec_item *bvi;
1122 unsigned int end, cur;
1125 bvi = container_of(p, struct z_erofs_bvec_item, list);
1126 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
1127 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
1129 dst = kmap_local_page(bvi->bvec.page);
1131 unsigned int pgnr, scur, len;
1133 pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
1134 DBG_BUGON(pgnr >= be->nr_pages);
1136 scur = bvi->bvec.offset + cur -
1137 ((pgnr << PAGE_SHIFT) - off0);
1138 len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
1139 if (!be->decompressed_pages[pgnr]) {
1140 err = -EFSCORRUPTED;
1144 src = kmap_local_page(be->decompressed_pages[pgnr]);
1145 memcpy(dst + cur, src + scur, len);
1150 z_erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
1156 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1158 struct z_erofs_pcluster *pcl = be->pcl;
1159 struct z_erofs_bvec_iter biter;
1160 struct page *old_bvpage;
1163 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
1164 for (i = 0; i < pcl->vcnt; ++i) {
1165 struct z_erofs_bvec bvec;
1167 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
1170 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1172 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1173 z_erofs_do_decompressed_bvec(be, &bvec);
1176 old_bvpage = z_erofs_bvec_iter_end(&biter);
1178 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1181 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1184 struct z_erofs_pcluster *pcl = be->pcl;
1185 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1188 *overlapped = false;
1189 for (i = 0; i < pclusterpages; ++i) {
1190 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
1191 struct page *page = bvec->page;
1193 /* compressed data ought to be valid before decompressing */
1198 be->compressed_pages[i] = page;
1200 if (z_erofs_is_inline_pcluster(pcl) ||
1201 erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) {
1202 if (!PageUptodate(page))
1207 DBG_BUGON(z_erofs_page_is_invalidated(page));
1208 if (z_erofs_is_shortlived_page(page))
1210 z_erofs_do_decompressed_bvec(be, bvec);
1216 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1219 struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1220 struct z_erofs_pcluster *pcl = be->pcl;
1221 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1222 const struct z_erofs_decompressor *decomp =
1223 z_erofs_decomp[pcl->algorithmformat];
1224 int i, j, jtop, err2;
1228 mutex_lock(&pcl->lock);
1229 be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1231 /* allocate (de)compressed page arrays if cannot be kept on stack */
1232 be->decompressed_pages = NULL;
1233 be->compressed_pages = NULL;
1234 be->onstack_used = 0;
1235 if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1236 be->decompressed_pages = be->onstack_pages;
1237 be->onstack_used = be->nr_pages;
1238 memset(be->decompressed_pages, 0,
1239 sizeof(struct page *) * be->nr_pages);
1242 if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1243 be->compressed_pages = be->onstack_pages + be->onstack_used;
1245 if (!be->decompressed_pages)
1246 be->decompressed_pages =
1247 kvcalloc(be->nr_pages, sizeof(struct page *),
1248 GFP_KERNEL | __GFP_NOFAIL);
1249 if (!be->compressed_pages)
1250 be->compressed_pages =
1251 kvcalloc(pclusterpages, sizeof(struct page *),
1252 GFP_KERNEL | __GFP_NOFAIL);
1254 z_erofs_parse_out_bvecs(be);
1255 err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1259 err = decomp->decompress(&(struct z_erofs_decompress_req) {
1261 .in = be->compressed_pages,
1262 .out = be->decompressed_pages,
1263 .pageofs_in = pcl->pageofs_in,
1264 .pageofs_out = pcl->pageofs_out,
1265 .inputsize = pcl->pclustersize,
1266 .outputsize = pcl->length,
1267 .alg = pcl->algorithmformat,
1268 .inplace_io = overlapped,
1269 .partial_decoding = pcl->partial,
1270 .fillgaps = pcl->multibases,
1271 .gfp = pcl->besteffort ?
1272 GFP_KERNEL | __GFP_NOFAIL :
1273 GFP_NOWAIT | __GFP_NORETRY
1276 /* must handle all compressed pages before actual file pages */
1277 if (z_erofs_is_inline_pcluster(pcl)) {
1278 page = pcl->compressed_bvecs[0].page;
1279 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1282 /* managed folios are still left in compressed_bvecs[] */
1283 for (i = 0; i < pclusterpages; ++i) {
1284 page = be->compressed_pages[i];
1286 erofs_folio_is_managed(sbi, page_folio(page)))
1288 (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1289 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1292 if (be->compressed_pages < be->onstack_pages ||
1293 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1294 kvfree(be->compressed_pages);
1297 z_erofs_fill_other_copies(be, err);
1298 for (i = 0; i < be->nr_pages; ++i) {
1299 page = be->decompressed_pages[i];
1303 DBG_BUGON(z_erofs_page_is_invalidated(page));
1304 if (!z_erofs_is_shortlived_page(page)) {
1305 z_erofs_onlinefolio_end(page_folio(page), err);
1308 if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
1309 erofs_pagepool_add(be->pagepool, page);
1312 for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
1314 if (j >= jtop) /* this bounce page is newly detected */
1315 be->decompressed_pages[jtop++] = page;
1318 erofs_pagepool_add(be->pagepool,
1319 be->decompressed_pages[--jtop]);
1320 if (be->decompressed_pages != be->onstack_pages)
1321 kvfree(be->decompressed_pages);
1324 pcl->partial = true;
1325 pcl->multibases = false;
1326 pcl->besteffort = false;
1327 pcl->bvset.nextpage = NULL;
1330 /* pcluster lock MUST be taken before the following line */
1331 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1332 mutex_unlock(&pcl->lock);
1336 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1337 struct page **pagepool)
1339 struct z_erofs_decompress_backend be = {
1341 .pagepool = pagepool,
1342 .decompressed_secondary_bvecs =
1343 LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1345 z_erofs_next_pcluster_t owned = io->head;
1347 while (owned != Z_EROFS_PCLUSTER_TAIL) {
1348 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1350 be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1351 owned = READ_ONCE(be.pcl->next);
1353 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
1354 if (z_erofs_is_inline_pcluster(be.pcl))
1355 z_erofs_free_pcluster(be.pcl);
1357 erofs_workgroup_put(&be.pcl->obj);
1361 static void z_erofs_decompressqueue_work(struct work_struct *work)
1363 struct z_erofs_decompressqueue *bgq =
1364 container_of(work, struct z_erofs_decompressqueue, u.work);
1365 struct page *pagepool = NULL;
1367 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
1368 z_erofs_decompress_queue(bgq, &pagepool);
1369 erofs_release_pages(&pagepool);
1373 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1374 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
1376 z_erofs_decompressqueue_work((struct work_struct *)work);
1380 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1383 struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1385 /* wake up the caller thread for sync decompression */
1387 if (!atomic_add_return(bios, &io->pending_bios))
1388 complete(&io->u.done);
1392 if (atomic_add_return(bios, &io->pending_bios))
1394 /* Use (kthread_)work and sync decompression for atomic contexts only */
1395 if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
1396 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1397 struct kthread_worker *worker;
1400 worker = rcu_dereference(
1401 z_erofs_pcpu_workers[raw_smp_processor_id()]);
1403 INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
1404 queue_work(z_erofs_workqueue, &io->u.work);
1406 kthread_queue_work(worker, &io->u.kthread_work);
1410 queue_work(z_erofs_workqueue, &io->u.work);
1412 /* enable sync decompression for readahead */
1413 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1414 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1417 z_erofs_decompressqueue_work(&io->u.work);
1420 static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1421 struct z_erofs_decompress_frontend *f,
1422 struct z_erofs_pcluster *pcl,
1424 struct address_space *mc)
1426 gfp_t gfp = mapping_gfp_mask(mc);
1427 bool tocache = false;
1428 struct z_erofs_bvec zbv;
1429 struct address_space *mapping;
1430 struct folio *folio;
1431 int bs = i_blocksize(f->inode);
1433 /* Except for inplace folios, the entire folio can be used for I/Os */
1434 bvec->bv_offset = 0;
1435 bvec->bv_len = PAGE_SIZE;
1437 spin_lock(&pcl->obj.lockref.lock);
1438 zbv = pcl->compressed_bvecs[nr];
1439 spin_unlock(&pcl->obj.lockref.lock);
1441 goto out_allocfolio;
1443 bvec->bv_page = zbv.page;
1444 DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
1446 folio = page_folio(zbv.page);
1448 * Handle preallocated cached folios. We tried to allocate such folios
1449 * without triggering direct reclaim. If allocation failed, inplace
1450 * file-backed folios will be used instead.
1452 if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
1458 mapping = READ_ONCE(folio->mapping);
1460 * File-backed folios for inplace I/Os are all locked steady,
1461 * therefore it is impossible for `mapping` to be NULL.
1463 if (mapping && mapping != mc) {
1465 bvec->bv_offset = round_up(-zbv.offset, bs);
1466 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
1471 if (folio->mapping == mc) {
1473 * The cached folio is still in managed cache but without
1474 * a valid `->private` pcluster hint. Let's reconnect them.
1476 if (!folio_test_private(folio)) {
1477 folio_attach_private(folio, pcl);
1478 /* compressed_bvecs[] already takes a ref before */
1482 /* no need to submit if it is already up-to-date */
1483 if (folio_test_uptodate(folio)) {
1484 folio_unlock(folio);
1485 bvec->bv_page = NULL;
1491 * It has been truncated, so it's unsafe to reuse this one. Let's
1492 * allocate a new page for compressed data.
1494 DBG_BUGON(folio->mapping);
1496 folio_unlock(folio);
1499 zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
1500 spin_lock(&pcl->obj.lockref.lock);
1501 if (pcl->compressed_bvecs[nr].page) {
1502 erofs_pagepool_add(&f->pagepool, zbv.page);
1503 spin_unlock(&pcl->obj.lockref.lock);
1507 bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page;
1508 folio = page_folio(zbv.page);
1509 /* first mark it as a temporary shortlived folio (now 1 ref) */
1510 folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
1511 spin_unlock(&pcl->obj.lockref.lock);
1513 if (!tocache || bs != PAGE_SIZE ||
1514 filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp))
1516 folio_attach_private(folio, pcl);
1517 /* drop a refcount added by allocpage (then 2 refs in total here) */
1521 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
1522 struct z_erofs_decompressqueue *fgq, bool *fg)
1524 struct z_erofs_decompressqueue *q;
1527 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1532 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1533 kthread_init_work(&q->u.kthread_work,
1534 z_erofs_decompressqueue_kthread_work);
1536 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1541 init_completion(&fgq->u.done);
1542 atomic_set(&fgq->pending_bios, 0);
1547 q->head = Z_EROFS_PCLUSTER_TAIL;
1551 /* define decompression jobqueue types */
1558 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1559 z_erofs_next_pcluster_t qtail[],
1560 z_erofs_next_pcluster_t owned_head)
1562 z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1563 z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1565 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
1567 WRITE_ONCE(*submit_qtail, owned_head);
1568 WRITE_ONCE(*bypass_qtail, &pcl->next);
1570 qtail[JQ_BYPASS] = &pcl->next;
1573 static void z_erofs_endio(struct bio *bio)
1575 struct z_erofs_decompressqueue *q = bio->bi_private;
1576 blk_status_t err = bio->bi_status;
1577 struct folio_iter fi;
1579 bio_for_each_folio_all(fi, bio) {
1580 struct folio *folio = fi.folio;
1582 DBG_BUGON(folio_test_uptodate(folio));
1583 DBG_BUGON(z_erofs_page_is_invalidated(&folio->page));
1584 if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio))
1588 folio_mark_uptodate(folio);
1589 folio_unlock(folio);
1593 z_erofs_decompress_kickoff(q, -1);
1598 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1599 struct z_erofs_decompressqueue *fgq,
1600 bool *force_fg, bool readahead)
1602 struct super_block *sb = f->inode->i_sb;
1603 struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1604 z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1605 struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1606 z_erofs_next_pcluster_t owned_head = f->owned_head;
1607 /* bio is NULL initially, so no need to initialize last_{index,bdev} */
1608 erofs_off_t last_pa;
1609 unsigned int nr_bios = 0;
1610 struct bio *bio = NULL;
1611 unsigned long pflags;
1614 /* No need to read from device for pclusters in the bypass queue. */
1615 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1616 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
1618 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1619 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1621 /* by default, all need io submission */
1622 q[JQ_SUBMIT]->head = owned_head;
1625 struct erofs_map_dev mdev;
1626 struct z_erofs_pcluster *pcl;
1627 erofs_off_t cur, end;
1628 struct bio_vec bvec;
1632 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1633 pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1634 owned_head = READ_ONCE(pcl->next);
1636 if (z_erofs_is_inline_pcluster(pcl)) {
1637 move_to_bypass_jobqueue(pcl, qtail, owned_head);
1641 /* no device id here, thus it will always succeed */
1642 mdev = (struct erofs_map_dev) {
1643 .m_pa = erofs_pos(sb, pcl->obj.index),
1645 (void)erofs_map_dev(sb, &mdev);
1648 end = cur + pcl->pclustersize;
1650 z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
1654 if (bio && (cur != last_pa ||
1655 bio->bi_bdev != mdev.m_bdev)) {
1657 if (!erofs_is_fscache_mode(sb))
1660 erofs_fscache_submit_bio(bio);
1663 psi_memstall_leave(&pflags);
1669 if (unlikely(PageWorkingset(bvec.bv_page)) &&
1671 psi_memstall_enter(&pflags);
1676 bio = erofs_is_fscache_mode(sb) ?
1677 erofs_fscache_bio_alloc(&mdev) :
1678 bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1679 REQ_OP_READ, GFP_NOIO);
1680 bio->bi_end_io = z_erofs_endio;
1681 bio->bi_iter.bi_sector = cur >> 9;
1682 bio->bi_private = q[JQ_SUBMIT];
1684 bio->bi_opf |= REQ_RAHEAD;
1688 if (cur + bvec.bv_len > end)
1689 bvec.bv_len = end - cur;
1690 DBG_BUGON(bvec.bv_len < sb->s_blocksize);
1691 if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
1695 last_pa = cur + bvec.bv_len;
1697 } while ((cur += bvec.bv_len) < end);
1700 qtail[JQ_SUBMIT] = &pcl->next;
1702 move_to_bypass_jobqueue(pcl, qtail, owned_head);
1703 } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1706 if (!erofs_is_fscache_mode(sb))
1709 erofs_fscache_submit_bio(bio);
1711 psi_memstall_leave(&pflags);
1715 * although background is preferred, no one is pending for submission.
1716 * don't issue decompression but drop it directly instead.
1718 if (!*force_fg && !nr_bios) {
1719 kvfree(q[JQ_SUBMIT]);
1722 z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
1725 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1726 bool force_fg, bool ra)
1728 struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1730 if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1732 z_erofs_submit_queue(f, io, &force_fg, ra);
1734 /* handle bypass queue (no i/o pclusters) immediately */
1735 z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
1740 /* wait until all bios are completed */
1741 wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1743 /* handle synchronous decompress queue in the caller context */
1744 z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
1748 * Since partial uptodate is still unimplemented for now, we have to use
1749 * approximate readmore strategies as a start.
1751 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1752 struct readahead_control *rac, bool backmost)
1754 struct inode *inode = f->inode;
1755 struct erofs_map_blocks *map = &f->map;
1756 erofs_off_t cur, end, headoffset = f->headoffset;
1761 end = headoffset + readahead_length(rac) - 1;
1763 end = headoffset + PAGE_SIZE - 1;
1765 err = z_erofs_map_blocks_iter(inode, map,
1766 EROFS_GET_BLOCKS_READMORE);
1770 /* expand ra for the trailing edge if readahead */
1772 cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1773 readahead_expand(rac, headoffset, cur - headoffset);
1776 end = round_up(end, PAGE_SIZE);
1778 end = round_up(map->m_la, PAGE_SIZE);
1783 cur = map->m_la + map->m_llen - 1;
1784 while ((cur >= end) && (cur < i_size_read(inode))) {
1785 pgoff_t index = cur >> PAGE_SHIFT;
1786 struct folio *folio;
1788 folio = erofs_grab_folio_nowait(inode->i_mapping, index);
1789 if (!IS_ERR_OR_NULL(folio)) {
1790 if (folio_test_uptodate(folio))
1791 folio_unlock(folio);
1793 z_erofs_scan_folio(f, folio, !!rac);
1797 if (cur < PAGE_SIZE)
1799 cur = (index << PAGE_SHIFT) - 1;
1803 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1805 struct inode *const inode = folio->mapping->host;
1806 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1807 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1810 trace_erofs_read_folio(folio, false);
1811 f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
1813 z_erofs_pcluster_readmore(&f, NULL, true);
1814 err = z_erofs_scan_folio(&f, folio, false);
1815 z_erofs_pcluster_readmore(&f, NULL, false);
1816 z_erofs_pcluster_end(&f);
1818 /* if some compressed cluster ready, need submit them anyway */
1819 z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
1821 if (err && err != -EINTR)
1822 erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
1823 err, folio->index, EROFS_I(inode)->nid);
1825 erofs_put_metabuf(&f.map.buf);
1826 erofs_release_pages(&f.pagepool);
1830 static void z_erofs_readahead(struct readahead_control *rac)
1832 struct inode *const inode = rac->mapping->host;
1833 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1834 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1835 struct folio *head = NULL, *folio;
1836 unsigned int nr_folios;
1839 f.headoffset = readahead_pos(rac);
1841 z_erofs_pcluster_readmore(&f, rac, true);
1842 nr_folios = readahead_count(rac);
1843 trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
1845 while ((folio = readahead_folio(rac))) {
1846 folio->private = head;
1850 /* traverse in reverse order for best metadata I/O performance */
1853 head = folio_get_private(folio);
1855 err = z_erofs_scan_folio(&f, folio, true);
1856 if (err && err != -EINTR)
1857 erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
1858 folio->index, EROFS_I(inode)->nid);
1860 z_erofs_pcluster_readmore(&f, rac, false);
1861 z_erofs_pcluster_end(&f);
1863 z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true);
1864 erofs_put_metabuf(&f.map.buf);
1865 erofs_release_pages(&f.pagepool);
1868 const struct address_space_operations z_erofs_aops = {
1869 .read_folio = z_erofs_read_folio,
1870 .readahead = z_erofs_readahead,