]> Git Repo - J-linux.git/commitdiff
merge mm-hotfixes-stable into mm-nonmm-stable to pick up stackdepot changes
authorAndrew Morton <[email protected]>
Sat, 24 Feb 2024 01:28:43 +0000 (17:28 -0800)
committerAndrew Morton <[email protected]>
Sat, 24 Feb 2024 01:28:43 +0000 (17:28 -0800)
1  2 
MAINTAINERS
mm/filemap.c
mm/kasan/common.c
mm/migrate.c

diff --combined MAINTAINERS
index 1dbc57efa6d06092fbfe71a618e98ee34f50894a,f7c81cea9b69e5e67ae42fe4f44f6632dcf067fc..f3f5981ced296144d4763d006ac2e623e36c7c0a
@@@ -14111,6 -14111,17 +14111,17 @@@ F: mm
  F:    tools/mm/
  F:    tools/testing/selftests/mm/
  
+ MEMORY MAPPING
+ M:    Andrew Morton <[email protected]>
+ R:    Liam R. Howlett <[email protected]>
+ R:    Vlastimil Babka <[email protected]>
+ R:    Lorenzo Stoakes <[email protected]>
+ L:    [email protected]
+ S:    Maintained
+ W:    http://www.linux-mm.org
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+ F:    mm/mmap.c
  MEMORY TECHNOLOGY DEVICES (MTD)
  M:    Miquel Raynal <[email protected]>
  M:    Richard Weinberger <[email protected]>
@@@ -24412,7 -24423,6 +24423,7 @@@ F:   include/linux/zpool.
  F:    include/linux/zswap.h
  F:    mm/zpool.c
  F:    mm/zswap.c
 +F:    tools/testing/selftests/cgroup/test_zswap.c
  
  THE REST
  M:    Linus Torvalds <[email protected]>
diff --combined mm/filemap.c
index 142864338ca4f2600bd54a411ffba6fbb558fcde,4a30de98a8c75daec31d1d79d15a9d9514e9fd1d..b7a21551fbc7cab3a6d19ecd98aa5471858722c5
@@@ -843,7 -843,7 +843,7 @@@ noinline int __filemap_add_folio(struc
                struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
  {
        XA_STATE(xas, &mapping->i_pages, index);
 -      int huge = folio_test_hugetlb(folio);
 +      bool huge = folio_test_hugetlb(folio);
        bool charged = false;
        long nr = 1;
  
@@@ -1354,7 -1354,7 +1354,7 @@@ void migration_entry_wait_on_locked(swp
        unsigned long pflags;
        bool in_thrashing;
        wait_queue_head_t *q;
 -      struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
 +      struct folio *folio = pfn_swap_entry_folio(entry);
  
        q = folio_waitqueue(folio);
        if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
@@@ -4111,28 -4111,40 +4111,40 @@@ static void filemap_cachestat(struct ad
  
        rcu_read_lock();
        xas_for_each(&xas, folio, last_index) {
+               int order;
                unsigned long nr_pages;
                pgoff_t folio_first_index, folio_last_index;
  
+               /*
+                * Don't deref the folio. It is not pinned, and might
+                * get freed (and reused) underneath us.
+                *
+                * We *could* pin it, but that would be expensive for
+                * what should be a fast and lightweight syscall.
+                *
+                * Instead, derive all information of interest from
+                * the rcu-protected xarray.
+                */
                if (xas_retry(&xas, folio))
                        continue;
  
+               order = xa_get_order(xas.xa, xas.xa_index);
+               nr_pages = 1 << order;
+               folio_first_index = round_down(xas.xa_index, 1 << order);
+               folio_last_index = folio_first_index + nr_pages - 1;
+               /* Folios might straddle the range boundaries, only count covered pages */
+               if (folio_first_index < first_index)
+                       nr_pages -= first_index - folio_first_index;
+               if (folio_last_index > last_index)
+                       nr_pages -= folio_last_index - last_index;
                if (xa_is_value(folio)) {
                        /* page is evicted */
                        void *shadow = (void *)folio;
                        bool workingset; /* not used */
-                       int order = xa_get_order(xas.xa, xas.xa_index);
-                       nr_pages = 1 << order;
-                       folio_first_index = round_down(xas.xa_index, 1 << order);
-                       folio_last_index = folio_first_index + nr_pages - 1;
-                       /* Folios might straddle the range boundaries, only count covered pages */
-                       if (folio_first_index < first_index)
-                               nr_pages -= first_index - folio_first_index;
-                       if (folio_last_index > last_index)
-                               nr_pages -= folio_last_index - last_index;
  
                        cs->nr_evicted += nr_pages;
  
                        goto resched;
                }
  
-               nr_pages = folio_nr_pages(folio);
-               folio_first_index = folio_pgoff(folio);
-               folio_last_index = folio_first_index + nr_pages - 1;
-               /* Folios might straddle the range boundaries, only count covered pages */
-               if (folio_first_index < first_index)
-                       nr_pages -= first_index - folio_first_index;
-               if (folio_last_index > last_index)
-                       nr_pages -= folio_last_index - last_index;
                /* page is in cache */
                cs->nr_cache += nr_pages;
  
-               if (folio_test_dirty(folio))
+               if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
                        cs->nr_dirty += nr_pages;
  
-               if (folio_test_writeback(folio))
+               if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
                        cs->nr_writeback += nr_pages;
  
  resched:
diff --combined mm/kasan/common.c
index f2747ed30da084411658211d59fb9f1eeab1d0ec,6ca63e8dda741b5e4094f7205f0b74a163be2e43..e7c9a4dc89f826943a37dd39ce876776be55b23d
@@@ -55,7 -55,7 +55,7 @@@ void kasan_set_track(struct kasan_trac
        u64 ts_nsec = local_clock();
  
        track->cpu = cpu;
 -      track->timestamp = ts_nsec >> 3;
 +      track->timestamp = ts_nsec >> 9;
  #endif /* CONFIG_KASAN_EXTRA_INFO */
        track->pid = current->pid;
        track->stack = stack;
@@@ -65,8 -65,7 +65,7 @@@ void kasan_save_track(struct kasan_trac
  {
        depot_stack_handle_t stack;
  
-       stack = kasan_save_stack(flags,
-                       STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+       stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
        kasan_set_track(track, stack);
  }
  
@@@ -266,10 -265,9 +265,9 @@@ bool __kasan_slab_free(struct kmem_cach
                return true;
  
        /*
-        * If the object is not put into quarantine, it will likely be quickly
-        * reallocated. Thus, release its metadata now.
+        * Note: Keep per-object metadata to allow KASAN print stack traces for
+        * use-after-free-before-realloc bugs.
         */
-       kasan_release_object_meta(cache, object);
  
        /* Let slab put the object onto the freelist. */
        return false;
diff --combined mm/migrate.c
index 05d6ca437321564925e4f1ac0e2949684c32b1f3,c27b1f8097d4a72e569ce5a06be42b93184e9db0..73a052a382f13a21bd72e23fb5996ae07c3022d3
@@@ -211,17 -211,14 +211,17 @@@ static bool remove_migration_pte(struc
                folio_get(folio);
                pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
                old_pte = ptep_get(pvmw.pte);
 -              if (pte_swp_soft_dirty(old_pte))
 -                      pte = pte_mksoft_dirty(pte);
  
                entry = pte_to_swp_entry(old_pte);
                if (!is_migration_entry_young(entry))
                        pte = pte_mkold(pte);
                if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
                        pte = pte_mkdirty(pte);
 +              if (pte_swp_soft_dirty(old_pte))
 +                      pte = pte_mksoft_dirty(pte);
 +              else
 +                      pte = pte_clear_soft_dirty(pte);
 +
                if (is_writable_migration_entry(entry))
                        pte = pte_mkwrite(pte, vma);
                else if (pte_swp_uffd_wp(old_pte))
@@@ -2522,6 -2519,14 +2522,14 @@@ static int numamigrate_isolate_folio(pg
                        if (managed_zone(pgdat->node_zones + z))
                                break;
                }
+               /*
+                * If there are no managed zones, it should not proceed
+                * further.
+                */
+               if (z < 0)
+                       return 0;
                wakeup_kswapd(pgdat->node_zones + z, 0,
                              folio_order(folio), ZONE_MOVABLE);
                return 0;
This page took 0.091779 seconds and 4 git commands to generate.