]> Git Repo - linux.git/commitdiff
Merge tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm...
authorLinus Torvalds <[email protected]>
Mon, 31 Dec 2018 17:46:39 +0000 (09:46 -0800)
committerLinus Torvalds <[email protected]>
Mon, 31 Dec 2018 17:46:39 +0000 (09:46 -0800)
Pull dax fix from Dan Williams:
 "Clean up unnecessary usage of prepare_to_wait_exclusive().

  While I feel a bit silly sending a single-commit pull-request there is
  nothing else queued up for dax this cycle. This change has shipped in
  -next for multiple releases"

* tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  dax: Use non-exclusive wait in wait_entry_unlocked()

1  2 
fs/dax.c

diff --combined fs/dax.c
index 262e14f299337803aa3656adca515722cb89796b,042d3b31b413a588b1701d1da159527bbb146bbd..6959837cc4659e16e19788d7bc362c3fb10b1b4e
+++ b/fs/dax.c
@@@ -246,18 -246,16 +246,16 @@@ static void wait_entry_unlocked(struct 
        ewait.wait.func = wake_exceptional_entry_func;
  
        wq = dax_entry_waitqueue(xas, entry, &ewait.key);
-       prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+       /*
+        * Unlike get_unlocked_entry() there is no guarantee that this
+        * path ever successfully retrieves an unlocked entry before an
+        * inode dies. Perform a non-exclusive wait in case this path
+        * never successfully performs its own wake up.
+        */
+       prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
        xas_unlock_irq(xas);
        schedule();
        finish_wait(wq, &ewait.wait);
-       /*
-        * Entry lock waits are exclusive. Wake up the next waiter since
-        * we aren't sure we will acquire the entry lock and thus wake
-        * the next waiter up on unlock.
-        */
-       if (waitqueue_active(wq))
-               __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
  }
  
  static void put_unlocked_entry(struct xa_state *xas, void *entry)
@@@ -779,8 -777,7 +777,8 @@@ static void dax_entry_mkclean(struct ad
  
        i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
 -              unsigned long address, start, end;
 +              struct mmu_notifier_range range;
 +              unsigned long address;
  
                cond_resched();
  
                 * call mmu_notifier_invalidate_range_start() on our behalf
                 * before taking any lock.
                 */
 -              if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
 +              if (follow_pte_pmd(vma->vm_mm, address, &range,
 +                                 &ptep, &pmdp, &ptl))
                        continue;
  
                /*
@@@ -837,7 -833,7 +835,7 @@@ unlock_pte
                        pte_unmap_unlock(ptep, ptl);
                }
  
 -              mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
 +              mmu_notifier_invalidate_range_end(&range);
        }
        i_mmap_unlock_read(mapping);
  }
This page took 0.052547 seconds and 4 git commands to generate.