]> Git Repo - linux.git/commitdiff
Merge tag 'gfs2-v5.19-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Sat, 6 Aug 2022 21:44:49 +0000 (14:44 -0700)
committerLinus Torvalds <[email protected]>
Sat, 6 Aug 2022 21:44:49 +0000 (14:44 -0700)
Pull gfs2 updates from Andreas Gruenbacher:

 - Instantiate glocks ouside of the glock state engine, in the contect
   of the process taking the glock. This moves unnecessary complexity
   out of the core glock code. Clean up the instantiate logic to be more
   sensible.

 - In gfs2_glock_async_wait(), cancel pending locking request upon
   failure. Make sure all glocks are left in a consistent state.

 - Various other minor cleanups and fixes.

* tag 'gfs2-v5.19-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: List traversal in do_promote is safe
  gfs2: do_promote glock holder stealing fix
  gfs2: Use better variable name
  gfs2: Make go_instantiate take a glock
  gfs2: Add new go_held glock operation
  gfs2: Revert 'Fix "truncate in progress" hang'
  gfs2: Instantiate glocks ouside of glock state engine
  gfs2: Fix up gfs2_glock_async_wait
  gfs2: Minor gfs2_glock_nq_m cleanup
  gfs2: Fix spelling mistake in comment
  gfs2: Rewrap overlong comment in do_promote
  gfs2: Remove redundant NULL check before kfree

1  2 
fs/gfs2/dir.c
fs/gfs2/glock.c
fs/gfs2/main.c
fs/gfs2/quota.c

diff --combined fs/gfs2/dir.c
index a0562dd1bada22671c421a60b633c00fc8d069e4,df938b8c8359c8c4e95e7faca4e9545abfebd34d..54a6d17b8c252211e3889121b5089b82244faaeb
@@@ -1508,8 -1508,9 +1508,8 @@@ static void gfs2_dir_readahead(struct i
                                continue;
                        }
                        bh->b_end_io = end_buffer_read_sync;
 -                      submit_bh(REQ_OP_READ,
 -                                REQ_RAHEAD | REQ_META | REQ_PRIO,
 -                                bh);
 +                      submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META |
 +                                REQ_PRIO, bh);
                        continue;
                }
                brelse(bh);
@@@ -2016,7 -2017,7 +2016,7 @@@ static int leaf_dealloc(struct gfs2_ino
                l_blocks++;
        }
  
-       gfs2_rlist_alloc(&rlist);
+       gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
  
        for (x = 0; x < rlist.rl_rgrps; x++) {
                struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
diff --combined fs/gfs2/glock.c
index dca842379caba00380fa947e5fe3440d768430d4,0b36a16659b625d5cbfb8b6c24151cc1f5314693..41b6c89e4bf7dfd39dbe26a15a3bc119e4d38e1b
@@@ -405,10 -405,13 +405,13 @@@ static void do_error(struct gfs2_glock 
  /**
   * demote_incompat_holders - demote incompatible demoteable holders
   * @gl: the glock we want to promote
-  * @new_gh: the new holder to be promoted
+  * @current_gh: the newly promoted holder
+  *
+  * We're passing the newly promoted holder in @current_gh, but actually, any of
+  * the strong holders would do.
   */
  static void demote_incompat_holders(struct gfs2_glock *gl,
-                                   struct gfs2_holder *new_gh)
+                                   struct gfs2_holder *current_gh)
  {
        struct gfs2_holder *gh, *tmp;
  
                 */
                if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
                        return;
+               if (gh == current_gh)
+                       continue;
                if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags) &&
-                   !may_grant(gl, new_gh, gh)) {
+                   !may_grant(gl, current_gh, gh)) {
                        /*
                         * We should not recurse into do_promote because
                         * __gfs2_glock_dq only calls handle_callback,
@@@ -478,8 -483,7 +483,7 @@@ find_first_strong_holder(struct gfs2_gl
   * gfs2_instantiate - Call the glops instantiate function
   * @gh: The glock holder
   *
-  * Returns: 0 if instantiate was successful, 2 if type specific operation is
-  * underway, or error.
+  * Returns: 0 if instantiate was successful, or error.
   */
  int gfs2_instantiate(struct gfs2_holder *gh)
  {
  
  again:
        if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
-               return 0;
+               goto done;
  
        /*
         * Since we unlock the lockref lock, we set a flag to indicate
                goto again;
        }
  
-       ret = glops->go_instantiate(gh);
+       ret = glops->go_instantiate(gl);
        if (!ret)
                clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
        clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
-       return ret;
+       if (ret)
+               return ret;
+ done:
+       if (glops->go_held)
+               return glops->go_held(gh);
+       return 0;
  }
  
  /**
   * do_promote - promote as many requests as possible on the current queue
   * @gl: The glock
   * 
-  * Returns: 1 if there is a blocked holder at the head of the list, or 2
-  *          if a type specific operation is underway.
+  * Returns: 1 if there is a blocked holder at the head of the list
   */
  
  static int do_promote(struct gfs2_glock *gl)
- __releases(&gl->gl_lockref.lock)
- __acquires(&gl->gl_lockref.lock)
  {
-       struct gfs2_holder *gh, *tmp, *first_gh;
+       struct gfs2_holder *gh, *current_gh;
        bool incompat_holders_demoted = false;
-       bool lock_released;
-       int ret;
  
- restart:
-       first_gh = find_first_strong_holder(gl);
-       list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
-               lock_released = false;
+       current_gh = find_first_strong_holder(gl);
+       list_for_each_entry(gh, &gl->gl_holders, gh_list) {
                if (test_bit(HIF_HOLDER, &gh->gh_iflags))
                        continue;
-               if (!may_grant(gl, first_gh, gh)) {
+               if (!may_grant(gl, current_gh, gh)) {
                        /*
-                        * If we get here, it means we may not grant this holder for
-                        * some reason. If this holder is the head of the list, it
-                        * means we have a blocked holder at the head, so return 1.
+                        * If we get here, it means we may not grant this
+                        * holder for some reason. If this holder is at the
+                        * head of the list, it means we have a blocked holder
+                        * at the head, so return 1.
                         */
                        if (list_is_first(&gh->gh_list, &gl->gl_holders))
                                return 1;
                        do_error(gl, 0);
                        break;
                }
-               if (!incompat_holders_demoted) {
-                       demote_incompat_holders(gl, first_gh);
-                       incompat_holders_demoted = true;
-                       first_gh = gh;
-               }
-               if (test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags) &&
-                   !(gh->gh_flags & GL_SKIP) && gl->gl_ops->go_instantiate) {
-                       lock_released = true;
-                       spin_unlock(&gl->gl_lockref.lock);
-                       ret = gfs2_instantiate(gh);
-                       spin_lock(&gl->gl_lockref.lock);
-                       if (ret) {
-                               if (ret == 1)
-                                       return 2;
-                               gh->gh_error = ret;
-                               list_del_init(&gh->gh_list);
-                               trace_gfs2_glock_queue(gh, 0);
-                               gfs2_holder_wake(gh);
-                               goto restart;
-                       }
-               }
                set_bit(HIF_HOLDER, &gh->gh_iflags);
                trace_gfs2_promote(gh);
                gfs2_holder_wake(gh);
-               /*
-                * If we released the gl_lockref.lock the holders list may have
-                * changed. For that reason, we start again at the start of
-                * the holders queue.
-                */
-               if (lock_released)
-                       goto restart;
+               if (!incompat_holders_demoted) {
+                       current_gh = gh;
+                       demote_incompat_holders(gl, current_gh);
+                       incompat_holders_demoted = true;
+               }
        }
        return 0;
  }
@@@ -657,7 -638,6 +638,6 @@@ static void finish_xmote(struct gfs2_gl
        const struct gfs2_glock_operations *glops = gl->gl_ops;
        struct gfs2_holder *gh;
        unsigned state = ret & LM_OUT_ST_MASK;
-       int rv;
  
        spin_lock(&gl->gl_lockref.lock);
        trace_gfs2_glock_state_change(gl, state);
@@@ -715,6 -695,8 +695,8 @@@ retry
                gfs2_demote_wake(gl);
        if (state != LM_ST_UNLOCKED) {
                if (glops->go_xmote_bh) {
+                       int rv;
                        spin_unlock(&gl->gl_lockref.lock);
                        rv = glops->go_xmote_bh(gl);
                        spin_lock(&gl->gl_lockref.lock);
                                goto out;
                        }
                }
-               rv = do_promote(gl);
-               if (rv == 2)
-                       goto out_locked;
+               do_promote(gl);
        }
  out:
        clear_bit(GLF_LOCK, &gl->gl_flags);
- out_locked:
        spin_unlock(&gl->gl_lockref.lock);
  }
  
@@@ -886,7 -865,6 +865,6 @@@ __releases(&gl->gl_lockref.lock
  __acquires(&gl->gl_lockref.lock)
  {
        struct gfs2_holder *gh = NULL;
-       int ret;
  
        if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
                return;
        } else {
                if (test_bit(GLF_DEMOTE, &gl->gl_flags))
                        gfs2_demote_wake(gl);
-               ret = do_promote(gl);
-               if (ret == 0)
+               if (do_promote(gl) == 0)
                        goto out_unlock;
-               if (ret == 2)
-                       goto out;
                gh = find_first_waiter(gl);
                gl->gl_target = gh->gh_state;
                if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
                        do_error(gl, 0); /* Fail queued try locks */
        }
        do_xmote(gl, gh, gl->gl_target);
- out:
        return;
  
  out_sched:
@@@ -1313,6 -1287,25 +1287,25 @@@ static void gfs2_glock_update_hold_time
        }
  }
  
+ /**
+  * gfs2_glock_holder_ready - holder is ready and its error code can be collected
+  * @gh: the glock holder
+  *
+  * Called when a glock holder no longer needs to be waited for because it is
+  * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
+  * failed (gh_error != 0).
+  */
+ int gfs2_glock_holder_ready(struct gfs2_holder *gh)
+ {
+       if (gh->gh_error || (gh->gh_flags & GL_SKIP))
+               return gh->gh_error;
+       gh->gh_error = gfs2_instantiate(gh);
+       if (gh->gh_error)
+               gfs2_glock_dq(gh);
+       return gh->gh_error;
+ }
  /**
   * gfs2_glock_wait - wait on a glock acquisition
   * @gh: the glock holder
@@@ -1327,7 -1320,7 +1320,7 @@@ int gfs2_glock_wait(struct gfs2_holder 
        might_sleep();
        wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
        gfs2_glock_update_hold_time(gh->gh_gl, start_time);
-       return gh->gh_error;
+       return gfs2_glock_holder_ready(gh);
  }
  
  static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
@@@ -1355,7 -1348,6 +1348,6 @@@ int gfs2_glock_async_wait(unsigned int 
        struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
        int i, ret = 0, timeout = 0;
        unsigned long start_time = jiffies;
-       bool keep_waiting;
  
        might_sleep();
        /*
        for (i = 0; i < num_gh; i++)
                timeout += ghs[i].gh_gl->gl_hold_time << 1;
  
- wait_for_dlm:
        if (!wait_event_timeout(sdp->sd_async_glock_wait,
-                               !glocks_pending(num_gh, ghs), timeout))
+                               !glocks_pending(num_gh, ghs), timeout)) {
                ret = -ESTALE; /* request timed out. */
+               goto out;
+       }
  
-       /*
-        * If dlm granted all our requests, we need to adjust the glock
-        * minimum hold time values according to how long we waited.
-        *
-        * If our request timed out, we need to repeatedly release any held
-        * glocks we acquired thus far to allow dlm to acquire the remaining
-        * glocks without deadlocking.  We cannot currently cancel outstanding
-        * glock acquisitions.
-        *
-        * The HIF_WAIT bit tells us which requests still need a response from
-        * dlm.
-        *
-        * If dlm sent us any errors, we return the first error we find.
-        */
-       keep_waiting = false;
        for (i = 0; i < num_gh; i++) {
-               /* Skip holders we have already dequeued below. */
-               if (!gfs2_holder_queued(&ghs[i]))
-                       continue;
-               /* Skip holders with a pending DLM response. */
-               if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
-                       keep_waiting = true;
-                       continue;
-               }
+               struct gfs2_holder *gh = &ghs[i];
+               int ret2;
  
-               if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
-                       if (ret == -ESTALE)
-                               gfs2_glock_dq(&ghs[i]);
-                       else
-                               gfs2_glock_update_hold_time(ghs[i].gh_gl,
-                                                           start_time);
+               if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+                       gfs2_glock_update_hold_time(gh->gh_gl,
+                                                   start_time);
                }
+               ret2 = gfs2_glock_holder_ready(gh);
                if (!ret)
-                       ret = ghs[i].gh_error;
+                       ret = ret2;
        }
  
-       if (keep_waiting)
-               goto wait_for_dlm;
+ out:
+       if (ret) {
+               for (i = 0; i < num_gh; i++) {
+                       struct gfs2_holder *gh = &ghs[i];
  
-       /*
-        * At this point, we've either acquired all locks or released them all.
-        */
+                       gfs2_glock_dq(gh);
+               }
+       }
        return ret;
  }
  
@@@ -1490,10 -1462,10 +1462,10 @@@ __acquires(&gl->gl_lockref.lock
  
        if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
                if (test_bit(GLF_LOCK, &gl->gl_flags)) {
-                       struct gfs2_holder *first_gh;
+                       struct gfs2_holder *current_gh;
  
-                       first_gh = find_first_strong_holder(gl);
-                       try_futile = !may_grant(gl, first_gh, gh);
+                       current_gh = find_first_strong_holder(gl);
+                       try_futile = !may_grant(gl, current_gh, gh);
                }
                if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
                        goto fail;
@@@ -1779,7 -1751,7 +1751,7 @@@ static int glock_compare(const void *ar
  }
  
  /**
-  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
+  * nq_m_sync - synchronously acquire more than one glock in deadlock free order
   * @num_gh: the number of structures
   * @ghs: an array of struct gfs2_holder structures
   * @p: placeholder for the holder structure to pass back
@@@ -1800,8 -1772,6 +1772,6 @@@ static int nq_m_sync(unsigned int num_g
        sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  
        for (x = 0; x < num_gh; x++) {
-               p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
                error = gfs2_glock_nq(p[x]);
                if (error) {
                        while (x--)
   * @num_gh: the number of structures
   * @ghs: an array of struct gfs2_holder structures
   *
-  *
   * Returns: 0 on success (all glocks acquired),
   *          errno on failure (no glocks acquired)
   */
@@@ -1833,7 -1802,6 +1802,6 @@@ int gfs2_glock_nq_m(unsigned int num_gh
        case 0:
                return 0;
        case 1:
-               ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
                return gfs2_glock_nq(ghs);
        default:
                if (num_gh <= 4)
@@@ -2245,20 -2213,6 +2213,6 @@@ void gfs2_gl_hash_clear(struct gfs2_sb
        glock_hash_walk(dump_glock_func, sdp);
  }
  
- void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
- {
-       struct gfs2_glock *gl = ip->i_gl;
-       int ret;
-       ret = gfs2_truncatei_resume(ip);
-       gfs2_glock_assert_withdraw(gl, ret == 0);
-       spin_lock(&gl->gl_lockref.lock);
-       clear_bit(GLF_LOCK, &gl->gl_flags);
-       run_queue(gl, 1);
-       spin_unlock(&gl->gl_lockref.lock);
- }
  static const char *state2str(unsigned state)
  {
        switch(state) {
@@@ -2533,7 -2487,7 +2487,7 @@@ int __init gfs2_glock_init(void
                return -ENOMEM;
        }
  
 -      ret = register_shrinker(&glock_shrinker);
 +      ret = register_shrinker(&glock_shrinker, "gfs2-glock");
        if (ret) {
                destroy_workqueue(gfs2_delete_workqueue);
                destroy_workqueue(glock_workqueue);
diff --combined fs/gfs2/main.c
index b66a3e1ec15286655e0e3fa576c22aeaed5222c8,d94791527dcb42fff29dbd12ce34bf75960300a5..14ae9de7627726d515f2419a82dc4e109cd49689
@@@ -38,7 -38,6 +38,6 @@@ static void gfs2_init_inode_once(void *
        inode_init_once(&ip->i_inode);
        atomic_set(&ip->i_sizehint, 0);
        init_rwsem(&ip->i_rw_mutex);
-       INIT_LIST_HEAD(&ip->i_trunc_list);
        INIT_LIST_HEAD(&ip->i_ordered);
        ip->i_qadata = NULL;
        gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
@@@ -148,7 -147,7 +147,7 @@@ static int __init init_gfs2_fs(void
        if (!gfs2_trans_cachep)
                goto fail_cachep8;
  
 -      error = register_shrinker(&gfs2_qd_shrinker);
 +      error = register_shrinker(&gfs2_qd_shrinker, "gfs2-qd");
        if (error)
                goto fail_shrinker;
  
diff --combined fs/gfs2/quota.c
index c98a7faa67d3a190e929460027e4b5fcfcd1a45a,a6667e8d781f24aa8ac1632b7f5077d47b4343f4..f201eaf59d0da55cb83190cd2cd41d3b2237d845
@@@ -746,7 -746,7 +746,7 @@@ static int gfs2_write_buf_to_page(struc
                if (PageUptodate(page))
                        set_buffer_uptodate(bh);
                if (!buffer_uptodate(bh)) {
 -                      ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
 +                      ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &bh);
                        wait_on_buffer(bh);
                        if (!buffer_uptodate(bh))
                                goto unlock_out;
@@@ -1517,25 -1517,6 +1517,6 @@@ static void quotad_check_timeo(struct g
        }
  }
  
- static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
- {
-       struct gfs2_inode *ip;
-       while(1) {
-               ip = NULL;
-               spin_lock(&sdp->sd_trunc_lock);
-               if (!list_empty(&sdp->sd_trunc_list)) {
-                       ip = list_first_entry(&sdp->sd_trunc_list,
-                                       struct gfs2_inode, i_trunc_list);
-                       list_del_init(&ip->i_trunc_list);
-               }
-               spin_unlock(&sdp->sd_trunc_lock);
-               if (ip == NULL)
-                       return;
-               gfs2_glock_finish_truncate(ip);
-       }
- }
  void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
        if (!sdp->sd_statfs_force_sync) {
                sdp->sd_statfs_force_sync = 1;
@@@ -1558,7 -1539,6 +1539,6 @@@ int gfs2_quotad(void *data
        unsigned long quotad_timeo = 0;
        unsigned long t = 0;
        DEFINE_WAIT(wait);
-       int empty;
  
        while (!kthread_should_stop()) {
  
                quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
                                   &quotad_timeo, &tune->gt_quota_quantum);
  
-               /* Check for & recover partially truncated inodes */
-               quotad_check_trunc_list(sdp);
                try_to_freeze();
  
  bypass:
                t = min(quotad_timeo, statfs_timeo);
  
                prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
-               spin_lock(&sdp->sd_trunc_lock);
-               empty = list_empty(&sdp->sd_trunc_list);
-               spin_unlock(&sdp->sd_trunc_lock);
-               if (empty && !sdp->sd_statfs_force_sync)
+               if (!sdp->sd_statfs_force_sync)
                        t -= schedule_timeout(t);
                else
                        t = 0;
This page took 0.088938 seconds and 4 git commands to generate.