]> Git Repo - linux.git/blobdiff - mm/filemap.c
ip_tunnels: allow VXLAN/GENEVE to inherit TOS/TTL from VLAN
[linux.git] / mm / filemap.c
index 9daeaab36081fd0ab86a9aaf4f8b7b62cfd45086..ffdfbc8b0e3cab54b046b36929b1a38fd987e04a 100644 (file)
@@ -2385,6 +2385,8 @@ static void filemap_get_read_batch(struct address_space *mapping,
                        continue;
                if (xas.xa_index > max || xa_is_value(folio))
                        break;
+               if (xa_is_sibling(folio))
+                       break;
                if (!folio_try_get_rcu(folio))
                        goto retry;
 
@@ -2629,6 +2631,13 @@ err:
        return err;
 }
 
+static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
+{
+       unsigned int shift = folio_shift(folio);
+
+       return (pos1 >> shift == pos2 >> shift);
+}
+
 /**
  * filemap_read - Read data from the page cache.
  * @iocb: The iocb to read.
@@ -2700,11 +2709,11 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
                writably_mapped = mapping_writably_mapped(mapping);
 
                /*
-                * When a sequential read accesses a page several times, only
+                * When a read accesses the same folio several times, only
                 * mark it as accessed the first time.
                 */
-               if (iocb->ki_pos >> PAGE_SHIFT !=
-                   ra->prev_pos >> PAGE_SHIFT)
+               if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
+                                                       fbatch.folios[0]))
                        folio_mark_accessed(fbatch.folios[0]);
 
                for (i = 0; i < folio_batch_count(&fbatch); i++) {
@@ -2991,11 +3000,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        struct address_space *mapping = file->f_mapping;
        DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
        struct file *fpin = NULL;
+       unsigned long vm_flags = vmf->vma->vm_flags;
        unsigned int mmap_miss;
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        /* Use the readahead code, even if readahead is disabled */
-       if (vmf->vma->vm_flags & VM_HUGEPAGE) {
+       if (vm_flags & VM_HUGEPAGE) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
                ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
                ra->size = HPAGE_PMD_NR;
@@ -3003,7 +3013,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
                 * Fetch two PMD folios, so we get the chance to actually
                 * readahead, unless we've been told not to.
                 */
-               if (!(vmf->vma->vm_flags & VM_RAND_READ))
+               if (!(vm_flags & VM_RAND_READ))
                        ra->size *= 2;
                ra->async_size = HPAGE_PMD_NR;
                page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
@@ -3012,12 +3022,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
 #endif
 
        /* If we don't want any read-ahead, don't bother */
-       if (vmf->vma->vm_flags & VM_RAND_READ)
+       if (vm_flags & VM_RAND_READ)
                return fpin;
        if (!ra->ra_pages)
                return fpin;
 
-       if (vmf->vma->vm_flags & VM_SEQ_READ) {
+       if (vm_flags & VM_SEQ_READ) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
                page_cache_sync_ra(&ractl, ra->ra_pages);
                return fpin;
This page took 0.034465 seconds and 4 git commands to generate.