]> Git Repo - linux.git/commitdiff
netfs: Add a function to consolidate beginning a read
authorDavid Howells <[email protected]>
Wed, 2 Mar 2022 10:50:22 +0000 (10:50 +0000)
committerDavid Howells <[email protected]>
Fri, 18 Mar 2022 09:29:05 +0000 (09:29 +0000)
Add a function to do the steps needed to begin a read request, allowing
this code to be removed from several other functions and consolidated.

Changes
=======
ver #2)
 - Move before the unstaticking patch so that some functions can be left
   static.
 - Set uninitialised return code in netfs_begin_read()[1][2].
 - Fixed a refleak caused by non-removal of a get from netfs_write_begin()
   when the request submission code got moved to netfs_begin_read().
 - Use INIT_WORK() to (re-)init the request work_struct[3].

Signed-off-by: David Howells <[email protected]>
Reviewed-by: Jeff Layton <[email protected]>
cc: [email protected]

Link: https://lore.kernel.org/r/[email protected]/
Link: https://lore.kernel.org/r/[email protected]/
Link: https://lore.kernel.org/r/[email protected]/
Link: https://lore.kernel.org/r/164623004355.3564931.7275693529042495641.stgit@warthog.procyon.org.uk/
Link: https://lore.kernel.org/r/164678214287.1200972.16734134007649832160.stgit@warthog.procyon.org.uk/
Link: https://lore.kernel.org/r/164692911113.2099075.1060868473229451371.stgit@warthog.procyon.org.uk/
fs/netfs/internal.h
fs/netfs/objects.c
fs/netfs/read_helper.c
include/trace/events/netfs.h

index 54c761bcc8e6a7b71b67ebd4ce910a7f3b532844..54faf0c72297d28096b02c81702bc3332455e052 100644 (file)
@@ -39,7 +39,7 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
  */
 extern unsigned int netfs_debug;
 
-void netfs_rreq_work(struct work_struct *work);
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
 
 /*
  * stats.c
index 657b19e6011857d7b7683477a3e940bcb7f50cf5..e86107b30ba443ba783ac28eb66126ce61488e93 100644 (file)
@@ -35,7 +35,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
        rreq->i_size    = i_size_read(inode);
        rreq->debug_id  = atomic_inc_return(&debug_ids);
        INIT_LIST_HEAD(&rreq->subrequests);
-       INIT_WORK(&rreq->work, netfs_rreq_work);
        refcount_set(&rreq->ref, 1);
        __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
        if (rreq->netfs_ops->init_request) {
index c048cd328ce5680947a20445db478c9c7500cbc0..8a4cebf5eb892ae174e2c2ba8c0da9f63f5e2a73 100644 (file)
@@ -443,7 +443,7 @@ again:
        netfs_rreq_completed(rreq, was_async);
 }
 
-void netfs_rreq_work(struct work_struct *work)
+static void netfs_rreq_work(struct work_struct *work)
 {
        struct netfs_io_request *rreq =
                container_of(work, struct netfs_io_request, work);
@@ -688,6 +688,69 @@ subreq_failed:
        return false;
 }
 
+/*
+ * Begin the process of reading in a chunk of data, where that data may be
+ * stitched together from multiple sources, including multiple servers and the
+ * local cache.
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
+{
+       unsigned int debug_index = 0;
+       int ret;
+
+       _enter("R=%x %llx-%llx",
+              rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+
+       if (rreq->len == 0) {
+               pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
+               return -EIO;
+       }
+
+       INIT_WORK(&rreq->work, netfs_rreq_work);
+
+       if (sync)
+               netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+
+       /* Chop the read into slices according to what the cache and the netfs
+        * want and submit each one.
+        */
+       atomic_set(&rreq->nr_outstanding, 1);
+       do {
+               if (!netfs_rreq_submit_slice(rreq, &debug_index))
+                       break;
+
+       } while (rreq->submitted < rreq->len);
+
+       if (sync) {
+               /* Keep nr_outstanding incremented so that the ref always belongs to
+                * us, and the service code isn't punted off to a random thread pool to
+                * process.
+                */
+               for (;;) {
+                       wait_var_event(&rreq->nr_outstanding,
+                                      atomic_read(&rreq->nr_outstanding) == 1);
+                       netfs_rreq_assess(rreq, false);
+                       if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+                               break;
+                       cond_resched();
+               }
+
+               ret = rreq->error;
+               if (ret == 0 && rreq->submitted < rreq->len) {
+                       trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+                       ret = -EIO;
+               }
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
+       } else {
+               /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+               if (atomic_dec_and_test(&rreq->nr_outstanding))
+                       netfs_rreq_assess(rreq, false);
+               ret = 0;
+       }
+       return ret;
+}
+
 static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
                                         loff_t *_start, size_t *_len, loff_t i_size)
 {
@@ -750,7 +813,6 @@ void netfs_readahead(struct readahead_control *ractl)
 {
        struct netfs_io_request *rreq;
        struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
-       unsigned int debug_index = 0;
        int ret;
 
        _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
@@ -777,22 +839,13 @@ void netfs_readahead(struct readahead_control *ractl)
 
        netfs_rreq_expand(rreq, ractl);
 
-       atomic_set(&rreq->nr_outstanding, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
        /* Drop the refs on the folios here rather than in the cache or
         * filesystem.  The locks will be dropped in netfs_rreq_unlock().
         */
        while (readahead_folio(ractl))
                ;
 
-       /* If we decrement nr_outstanding to 0, the ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_outstanding))
-               netfs_rreq_assess(rreq, false);
+       netfs_begin_read(rreq, false);
        return;
 
 cleanup_free:
@@ -821,7 +874,6 @@ int netfs_readpage(struct file *file, struct page *subpage)
        struct address_space *mapping = folio->mapping;
        struct netfs_io_request *rreq;
        struct netfs_i_context *ctx = netfs_i_context(mapping->host);
-       unsigned int debug_index = 0;
        int ret;
 
        _enter("%lx", folio_index(folio));
@@ -836,42 +888,16 @@ int netfs_readpage(struct file *file, struct page *subpage)
 
        if (ctx->ops->begin_cache_operation) {
                ret = ctx->ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
-                       folio_unlock(folio);
-                       goto out;
-               }
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto discard;
        }
 
        netfs_stat(&netfs_n_rh_readpage);
        trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
+       return netfs_begin_read(rreq, true);
 
-       netfs_get_request(rreq, netfs_rreq_trace_get_hold);
-
-       atomic_set(&rreq->nr_outstanding, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Keep nr_outstanding incremented so that the ref always belongs to us, and
-        * the service code isn't punted off to a random thread pool to
-        * process.
-        */
-       do {
-               wait_var_event(&rreq->nr_outstanding,
-                              atomic_read(&rreq->nr_outstanding) == 1);
-               netfs_rreq_assess(rreq, false);
-       } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
-
-       ret = rreq->error;
-       if (ret == 0 && rreq->submitted < rreq->len) {
-               trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
-               ret = -EIO;
-       }
-out:
-       netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
-       return ret;
+discard:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
 alloc_error:
        folio_unlock(folio);
        return ret;
@@ -966,7 +992,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
        struct netfs_io_request *rreq;
        struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
        struct folio *folio;
-       unsigned int debug_index = 0, fgp_flags;
+       unsigned int fgp_flags;
        pgoff_t index = pos >> PAGE_SHIFT;
        int ret;
 
@@ -1029,39 +1055,13 @@ retry:
         */
        ractl._nr_pages = folio_nr_pages(folio);
        netfs_rreq_expand(rreq, &ractl);
-       netfs_get_request(rreq, netfs_rreq_trace_get_hold);
 
        /* We hold the folio locks, so we can drop the references */
        folio_get(folio);
        while (readahead_folio(&ractl))
                ;
 
-       atomic_set(&rreq->nr_outstanding, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Keep nr_outstanding incremented so that the ref always belongs to
-        * us, and the service code isn't punted off to a random thread pool to
-        * process.
-        */
-       for (;;) {
-               wait_var_event(&rreq->nr_outstanding,
-                              atomic_read(&rreq->nr_outstanding) == 1);
-               netfs_rreq_assess(rreq, false);
-               if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
-                       break;
-               cond_resched();
-       }
-
-       ret = rreq->error;
-       if (ret == 0 && rreq->submitted < rreq->len) {
-               trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
-               ret = -EIO;
-       }
-       netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
+       ret = netfs_begin_read(rreq, true);
        if (ret < 0)
                goto error;
 
index f00e3e1821c87a7fc6bcd59132684f8f404b4a48..beec534cbaab25e6a2ce4aacaa09134068f7394a 100644 (file)
        EM(netfs_fail_check_write_begin,        "check-write-begin")    \
        EM(netfs_fail_copy_to_cache,            "copy-to-cache")        \
        EM(netfs_fail_read,                     "read")                 \
-       EM(netfs_fail_short_readpage,           "short-readpage")       \
-       EM(netfs_fail_short_write_begin,        "short-write-begin")    \
+       EM(netfs_fail_short_read,               "short-read")           \
        E_(netfs_fail_prepare_write,            "prep-write")
 
 #define netfs_rreq_ref_traces                                  \
        EM(netfs_rreq_trace_get_hold,           "GET HOLD   ")  \
        EM(netfs_rreq_trace_get_subreq,         "GET SUBREQ ")  \
        EM(netfs_rreq_trace_put_complete,       "PUT COMPLT ")  \
+       EM(netfs_rreq_trace_put_discard,        "PUT DISCARD")  \
        EM(netfs_rreq_trace_put_failed,         "PUT FAILED ")  \
        EM(netfs_rreq_trace_put_hold,           "PUT HOLD   ")  \
        EM(netfs_rreq_trace_put_subreq,         "PUT SUBREQ ")  \
+       EM(netfs_rreq_trace_put_zero_len,       "PUT ZEROLEN")  \
        E_(netfs_rreq_trace_new,                "NEW        ")
 
 #define netfs_sreq_ref_traces                                  \
This page took 0.07702 seconds and 4 git commands to generate.