]> Git Repo - linux.git/blobdiff - mm/rmap.c
mm: CONFIG_MMU for PG_mlocked
[linux.git] / mm / rmap.c
index 28aafe2b530668b03c766619a83873ee2a91087e..eb3dfc8355ea8d8cf364ec081f21455d67f86275 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -203,7 +203,7 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
 
        rcu_read_lock();
        anon_mapping = (unsigned long) page->mapping;
-       if (!(anon_mapping & PAGE_MAPPING_ANON))
+       if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
                goto out;
        if (!page_mapped(page))
                goto out;
@@ -242,14 +242,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
 }
 
 /*
- * At what user virtual address is page expected in vma? checking that the
- * page matches the vma: currently only used on anon pages, by unuse_vma;
+ * At what user virtual address is page expected in vma?
+ * checking that the page matches the vma.
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
        if (PageAnon(page)) {
-               if ((void *)vma->anon_vma !=
-                   (void *)page->mapping - PAGE_MAPPING_ANON)
+               if (vma->anon_vma != page_anon_vma(page))
                        return -EFAULT;
        } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
                if (!vma->vm_file ||
@@ -388,9 +387,10 @@ static int page_referenced_one(struct page *page,
 out_unmap:
        (*mapcount)--;
        pte_unmap_unlock(pte, ptl);
-out:
+
        if (referenced)
                *vm_flags |= vma->vm_flags;
+out:
        return referenced;
 }
 
@@ -512,7 +512,7 @@ int page_referenced(struct page *page,
                referenced++;
 
        *vm_flags = 0;
-       if (page_mapped(page) && page->mapping) {
+       if (page_mapped(page) && page_rmapping(page)) {
                if (PageAnon(page))
                        referenced += page_referenced_anon(page, mem_cont,
                                                                vm_flags);
@@ -788,6 +788,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        ret = SWAP_MLOCK;
                        goto out_unmap;
                }
+               if (TTU_ACTION(flags) == TTU_MUNLOCK)
+                       goto out_unmap;
        }
        if (!(flags & TTU_IGNORE_ACCESS)) {
                if (ptep_clear_flush_young_notify(vma, address, pte)) {
@@ -822,7 +824,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * Store the swap location in the pte.
                         * See handle_pte_fault() ...
                         */
-                       swap_duplicate(entry);
+                       if (swap_duplicate(entry) < 0) {
+                               set_pte_at(mm, address, pte, pteval);
+                               ret = SWAP_FAIL;
+                               goto out_unmap;
+                       }
                        if (list_empty(&mm->mmlist)) {
                                spin_lock(&mmlist_lock);
                                if (list_empty(&mm->mmlist))
@@ -849,12 +855,22 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        } else
                dec_mm_counter(mm, file_rss);
 
-
        page_remove_rmap(page);
        page_cache_release(page);
 
 out_unmap:
        pte_unmap_unlock(pte, ptl);
+
+       if (ret == SWAP_MLOCK) {
+               ret = SWAP_AGAIN;
+               if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
+                       if (vma->vm_flags & VM_LOCKED) {
+                               mlock_vma_page(page);
+                               ret = SWAP_MLOCK;
+                       }
+                       up_read(&vma->vm_mm->mmap_sem);
+               }
+       }
 out:
        return ret;
 }
@@ -922,11 +938,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                return ret;
 
        /*
-        * MLOCK_PAGES => feature is configured.
-        * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
+        * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
         * keep the sem while scanning the cluster for mlocking pages.
         */
-       if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
+       if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
                locked_vma = (vma->vm_flags & VM_LOCKED);
                if (!locked_vma)
                        up_read(&vma->vm_mm->mmap_sem); /* don't need it */
@@ -976,29 +991,11 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
        return ret;
 }
 
-/*
- * common handling for pages mapped in VM_LOCKED vmas
- */
-static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
-{
-       int mlocked = 0;
-
-       if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
-               if (vma->vm_flags & VM_LOCKED) {
-                       mlock_vma_page(page);
-                       mlocked++;      /* really mlocked the page */
-               }
-               up_read(&vma->vm_mm->mmap_sem);
-       }
-       return mlocked;
-}
-
 /**
  * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
  * rmap method
  * @page: the page to unmap/unlock
- * @unlock:  request for unlock rather than unmap [unlikely]
- * @migration:  unmapping for migration - ignored if @unlock
+ * @flags: action and flags
  *
  * Find all the mappings of a page using the mapping pointer and the vma chains
  * contained in the anon_vma struct it points to.
@@ -1014,42 +1011,19 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
 {
        struct anon_vma *anon_vma;
        struct vm_area_struct *vma;
-       unsigned int mlocked = 0;
        int ret = SWAP_AGAIN;
-       int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
-
-       if (MLOCK_PAGES && unlikely(unlock))
-               ret = SWAP_SUCCESS;     /* default for try_to_munlock() */
 
        anon_vma = page_lock_anon_vma(page);
        if (!anon_vma)
                return ret;
 
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-               if (MLOCK_PAGES && unlikely(unlock)) {
-                       if (!((vma->vm_flags & VM_LOCKED) &&
-                             page_mapped_in_vma(page, vma)))
-                               continue;  /* must visit all unlocked vmas */
-                       ret = SWAP_MLOCK;  /* saw at least one mlocked vma */
-               } else {
-                       ret = try_to_unmap_one(page, vma, flags);
-                       if (ret == SWAP_FAIL || !page_mapped(page))
-                               break;
-               }
-               if (ret == SWAP_MLOCK) {
-                       mlocked = try_to_mlock_page(page, vma);
-                       if (mlocked)
-                               break;  /* stop if actually mlocked page */
-               }
+               ret = try_to_unmap_one(page, vma, flags);
+               if (ret != SWAP_AGAIN || !page_mapped(page))
+                       break;
        }
 
        page_unlock_anon_vma(anon_vma);
-
-       if (mlocked)
-               ret = SWAP_MLOCK;       /* actually mlocked the page */
-       else if (ret == SWAP_MLOCK)
-               ret = SWAP_AGAIN;       /* saw VM_LOCKED vma */
-
        return ret;
 }
 
@@ -1079,48 +1053,27 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
        unsigned long max_nl_cursor = 0;
        unsigned long max_nl_size = 0;
        unsigned int mapcount;
-       unsigned int mlocked = 0;
-       int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
-
-       if (MLOCK_PAGES && unlikely(unlock))
-               ret = SWAP_SUCCESS;     /* default for try_to_munlock() */
 
        spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
-               if (MLOCK_PAGES && unlikely(unlock)) {
-                       if (!((vma->vm_flags & VM_LOCKED) &&
-                                               page_mapped_in_vma(page, vma)))
-                               continue;       /* must visit all vmas */
-                       ret = SWAP_MLOCK;
-               } else {
-                       ret = try_to_unmap_one(page, vma, flags);
-                       if (ret == SWAP_FAIL || !page_mapped(page))
-                               goto out;
-               }
-               if (ret == SWAP_MLOCK) {
-                       mlocked = try_to_mlock_page(page, vma);
-                       if (mlocked)
-                               break;  /* stop if actually mlocked page */
-               }
+               ret = try_to_unmap_one(page, vma, flags);
+               if (ret != SWAP_AGAIN || !page_mapped(page))
+                       goto out;
        }
 
-       if (mlocked)
+       if (list_empty(&mapping->i_mmap_nonlinear))
                goto out;
 
-       if (list_empty(&mapping->i_mmap_nonlinear))
+       /*
+        * We don't bother to try to find the munlocked page in nonlinears.
+        * It's costly. Instead, later, page reclaim logic may call
+        * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
+        */
+       if (TTU_ACTION(flags) == TTU_MUNLOCK)
                goto out;
 
        list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
-               if (MLOCK_PAGES && unlikely(unlock)) {
-                       if (!(vma->vm_flags & VM_LOCKED))
-                               continue;       /* must visit all vmas */
-                       ret = SWAP_MLOCK;       /* leave mlocked == 0 */
-                       goto out;               /* no need to look further */
-               }
-               if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
-                       (vma->vm_flags & VM_LOCKED))
-                       continue;
                cursor = (unsigned long) vma->vm_private_data;
                if (cursor > max_nl_cursor)
                        max_nl_cursor = cursor;
@@ -1153,16 +1106,12 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
        do {
                list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
-                       if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
-                           (vma->vm_flags & VM_LOCKED))
-                               continue;
                        cursor = (unsigned long) vma->vm_private_data;
                        while ( cursor < max_nl_cursor &&
                                cursor < vma->vm_end - vma->vm_start) {
-                               ret = try_to_unmap_cluster(cursor, &mapcount,
-                                                               vma, page);
-                               if (ret == SWAP_MLOCK)
-                                       mlocked = 2;    /* to return below */
+                               if (try_to_unmap_cluster(cursor, &mapcount,
+                                               vma, page) == SWAP_MLOCK)
+                                       ret = SWAP_MLOCK;
                                cursor += CLUSTER_SIZE;
                                vma->vm_private_data = (void *) cursor;
                                if ((int)mapcount <= 0)
@@ -1183,10 +1132,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
                vma->vm_private_data = NULL;
 out:
        spin_unlock(&mapping->i_mmap_lock);
-       if (mlocked)
-               ret = SWAP_MLOCK;       /* actually mlocked the page */
-       else if (ret == SWAP_MLOCK)
-               ret = SWAP_AGAIN;       /* saw VM_LOCKED vma */
        return ret;
 }
 
@@ -1229,7 +1174,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
  *
  * Return values are:
  *
- * SWAP_SUCCESS        - no vma's holding page mlocked.
+ * SWAP_AGAIN  - no vma is holding page mlocked, or,
  * SWAP_AGAIN  - page mapped in mlocked vma -- couldn't acquire mmap sem
  * SWAP_MLOCK  - page is now mlocked.
  */
This page took 0.039163 seconds and 4 git commands to generate.