]> Git Repo - linux.git/commitdiff
exit: Factor coredump_exit_mm out of exit_mm
authorEric W. Biederman <[email protected]>
Wed, 1 Sep 2021 16:23:38 +0000 (11:23 -0500)
committerEric W. Biederman <[email protected]>
Wed, 6 Oct 2021 16:28:21 +0000 (11:28 -0500)
Separate the coredump logic from the ordinary exit_mm logic
by moving the coredump logic out of exit_mm into it's own
function coredump_exit_mm.

Link: https://lkml.kernel.org/r/87a6k2x277.fsf@disp2133
Reviewed-by: Kees Cook <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
fs/coredump.c
kernel/exit.c
mm/oom_kill.c

index 3224dee44d30ed3206ec3881545a540d6c59db56..5e0e08a7fb9b13964a9e27c93d3a6e450986831c 100644 (file)
@@ -404,8 +404,8 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
         *
         * do_exit:
         *      The caller holds mm->mmap_lock. This means that the task which
-        *      uses this mm can't pass exit_mm(), so it can't exit or clear
-        *      its ->mm.
+        *      uses this mm can't pass coredump_exit_mm(), so it can't exit or
+        *      clear its ->mm.
         *
         * de_thread:
         *      It does list_replace_rcu(&leader->tasks, &current->tasks),
@@ -500,7 +500,7 @@ static void coredump_finish(struct mm_struct *mm, bool core_dumped)
                next = curr->next;
                task = curr->task;
                /*
-                * see exit_mm(), curr->task must not see
+                * see coredump_exit_mm(), curr->task must not see
                 * ->task == NULL before we read ->next.
                 */
                smp_mb();
index 91a43e57a32ebbf155287f580094e3c0f66e1188..cb1619d8fd64f7bb7f7ee4c0b88671dae0c653a9 100644 (file)
@@ -339,6 +339,46 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
        }
 }
 
+static void coredump_exit_mm(struct mm_struct *mm)
+{
+       struct core_state *core_state;
+
+       /*
+        * Serialize with any possible pending coredump.
+        * We must hold mmap_lock around checking core_state
+        * and clearing tsk->mm.  The core-inducing thread
+        * will increment ->nr_threads for each thread in the
+        * group with ->mm != NULL.
+        */
+       core_state = mm->core_state;
+       if (core_state) {
+               struct core_thread self;
+
+               mmap_read_unlock(mm);
+
+               self.task = current;
+               if (self.task->flags & PF_SIGNALED)
+                       self.next = xchg(&core_state->dumper.next, &self);
+               else
+                       self.task = NULL;
+               /*
+                * Implies mb(), the result of xchg() must be visible
+                * to core_state->dumper.
+                */
+               if (atomic_dec_and_test(&core_state->nr_threads))
+                       complete(&core_state->startup);
+
+               for (;;) {
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       if (!self.task) /* see coredump_finish() */
+                               break;
+                       freezable_schedule();
+               }
+               __set_current_state(TASK_RUNNING);
+               mmap_read_lock(mm);
+       }
+}
+
 #ifdef CONFIG_MEMCG
 /*
  * A task is exiting.   If it owned this mm, find a new owner for the mm.
@@ -434,47 +474,13 @@ assign_new_owner:
 static void exit_mm(void)
 {
        struct mm_struct *mm = current->mm;
-       struct core_state *core_state;
 
        exit_mm_release(current, mm);
        if (!mm)
                return;
        sync_mm_rss(mm);
-       /*
-        * Serialize with any possible pending coredump.
-        * We must hold mmap_lock around checking core_state
-        * and clearing tsk->mm.  The core-inducing thread
-        * will increment ->nr_threads for each thread in the
-        * group with ->mm != NULL.
-        */
        mmap_read_lock(mm);
-       core_state = mm->core_state;
-       if (core_state) {
-               struct core_thread self;
-
-               mmap_read_unlock(mm);
-
-               self.task = current;
-               if (self.task->flags & PF_SIGNALED)
-                       self.next = xchg(&core_state->dumper.next, &self);
-               else
-                       self.task = NULL;
-               /*
-                * Implies mb(), the result of xchg() must be visible
-                * to core_state->dumper.
-                */
-               if (atomic_dec_and_test(&core_state->nr_threads))
-                       complete(&core_state->startup);
-
-               for (;;) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       if (!self.task) /* see coredump_finish() */
-                               break;
-                       freezable_schedule();
-               }
-               __set_current_state(TASK_RUNNING);
-               mmap_read_lock(mm);
-       }
+       coredump_exit_mm(mm);
        mmgrab(mm);
        BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
index 831340e7ad8b4721a066c7d7d850e9b841ab38ff..295c8bdfd6c8bec6385316258a04ee2f29132fd2 100644 (file)
@@ -787,9 +787,9 @@ static inline bool __task_will_free_mem(struct task_struct *task)
        struct signal_struct *sig = task->signal;
 
        /*
-        * A coredumping process may sleep for an extended period in exit_mm(),
-        * so the oom killer cannot assume that the process will promptly exit
-        * and release memory.
+        * A coredumping process may sleep for an extended period in
+        * coredump_exit_mm(), so the oom killer cannot assume that
+        * the process will promptly exit and release memory.
         */
        if (sig->flags & SIGNAL_GROUP_COREDUMP)
                return false;
This page took 0.065472 seconds and 4 git commands to generate.