1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem read subrequest retrying.
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
9 #include <linux/slab.h>
12 static void netfs_reissue_read(struct netfs_io_request *rreq,
13 struct netfs_io_subrequest *subreq)
15 __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
16 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
17 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
18 subreq->rreq->netfs_ops->issue_read(subreq);
22 * Go through the list of failed/short reads, retrying all retryable ones. We
23 * need to switch failed cache reads to network downloads.
25 static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
27 struct netfs_io_subrequest *subreq;
28 struct netfs_io_stream *stream = &rreq->io_streams[0];
29 struct list_head *next;
31 _enter("R=%x", rreq->debug_id);
33 if (list_empty(&stream->subrequests))
36 if (rreq->netfs_ops->retry_request)
37 rreq->netfs_ops->retry_request(rreq, NULL);
39 /* If there's no renegotiation to do, just resend each retryable subreq
40 * up to the first permanently failed one.
42 if (!rreq->netfs_ops->prepare_read &&
43 !rreq->cache_resources.ops) {
44 list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
45 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
47 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
48 __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
49 subreq->retry_count++;
50 netfs_reset_iter(subreq);
51 netfs_reissue_read(rreq, subreq);
57 /* Okay, we need to renegotiate all the download requests and flip any
58 * failed cache reads over to being download requests and negotiate
59 * those also. All fully successful subreqs have been removed from the
60 * list and any spare data from those has been donated.
62 * What we do is decant the list and rebuild it one subreq at a time so
63 * that we don't end up with donations jumping over a gap we're busy
64 * populating with smaller subrequests. In the event that the subreq
65 * we just launched finishes before we insert the next subreq, it'll
66 * fill in rreq->prev_donated instead.
68 * Note: Alternatively, we could split the tail subrequest right before
69 * we reissue it and fix up the donations under lock.
71 next = stream->subrequests.next;
74 struct netfs_io_subrequest *from, *to, *tmp;
75 struct iov_iter source;
76 unsigned long long start, len;
78 bool boundary = false;
80 /* Go through the subreqs and find the next span of contiguous
81 * buffer that we then rejig (cifs, for example, needs the
82 * rsize renegotiating) and reissue.
84 from = list_entry(next, struct netfs_io_subrequest, rreq_link);
86 start = from->start + from->transferred;
87 len = from->len - from->transferred;
89 _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
90 rreq->debug_id, from->debug_index,
91 from->start, from->transferred, from->len);
93 if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
94 !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
97 list_for_each_continue(next, &stream->subrequests) {
98 subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
99 if (subreq->start + subreq->transferred != start + len ||
100 test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
101 !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
107 _debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
109 /* Determine the set of buffers we're going to use. Each
110 * subreq gets a subset of a single overall contiguous buffer.
112 netfs_reset_iter(from);
113 source = from->io_iter;
116 /* Work through the sublist. */
118 list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
121 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
122 subreq->start = start - subreq->transferred;
123 subreq->len = len + subreq->transferred;
124 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
125 __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
126 subreq->retry_count++;
128 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 /* Renegotiate max_len (rsize) */
131 stream->sreq_max_len = subreq->len;
132 if (rreq->netfs_ops->prepare_read &&
133 rreq->netfs_ops->prepare_read(subreq) < 0) {
134 trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
135 __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
139 part = umin(len, stream->sreq_max_len);
140 if (unlikely(stream->sreq_max_segs))
141 part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
142 subreq->len = subreq->transferred + part;
143 subreq->io_iter = source;
144 iov_iter_truncate(&subreq->io_iter, part);
145 iov_iter_advance(&source, part);
150 __set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
152 __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
155 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
156 netfs_reissue_read(rreq, subreq);
161 /* If we managed to use fewer subreqs, we can discard the
162 * excess; if we used the same number, then we're done.
167 list_for_each_entry_safe_from(subreq, tmp,
168 &stream->subrequests, rreq_link) {
169 trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
170 list_del(&subreq->rreq_link);
171 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
178 /* We ran out of subrequests, so we need to allocate some more
179 * and insert them after.
182 subreq = netfs_alloc_subrequest(rreq);
187 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
188 subreq->start = start;
190 subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
191 subreq->stream_nr = stream->stream_nr;
192 subreq->retry_count = 1;
194 trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
195 refcount_read(&subreq->ref),
196 netfs_sreq_trace_new);
197 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
199 list_add(&subreq->rreq_link, &to->rreq_link);
200 to = list_next_entry(to, rreq_link);
201 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
203 stream->sreq_max_len = umin(len, rreq->rsize);
204 stream->sreq_max_segs = 0;
205 if (unlikely(stream->sreq_max_segs))
206 part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
208 netfs_stat(&netfs_n_rh_download);
209 if (rreq->netfs_ops->prepare_read(subreq) < 0) {
210 trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
211 __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
215 part = umin(len, stream->sreq_max_len);
216 subreq->len = subreq->transferred + part;
217 subreq->io_iter = source;
218 iov_iter_truncate(&subreq->io_iter, part);
219 iov_iter_advance(&source, part);
223 if (!len && boundary) {
224 __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
228 netfs_reissue_read(rreq, subreq);
231 } while (!list_is_head(next, &stream->subrequests));
235 /* If we hit an error, fail all remaining incomplete subrequests */
237 if (list_is_last(&subreq->rreq_link, &stream->subrequests))
239 subreq = list_next_entry(subreq, rreq_link);
241 list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
242 if (!subreq->error &&
243 !test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
244 !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
246 subreq->error = -ENOMEM;
247 __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
248 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
255 void netfs_retry_reads(struct netfs_io_request *rreq)
257 struct netfs_io_subrequest *subreq;
258 struct netfs_io_stream *stream = &rreq->io_streams[0];
260 /* Wait for all outstanding I/O to quiesce before performing retries as
261 * we may need to renegotiate the I/O sizes.
263 list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
264 wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
265 TASK_UNINTERRUPTIBLE);
268 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
269 netfs_retry_read_subrequests(rreq);
273 * Unlock any the pages that haven't been unlocked yet due to abandoned
276 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
278 struct folio_queue *p;
280 for (p = rreq->buffer.tail; p; p = p->next) {
281 for (int slot = 0; slot < folioq_count(p); slot++) {
282 struct folio *folio = folioq_folio(p, slot);
284 if (folio && !folioq_is_marked2(p, slot)) {
285 trace_netfs_folio(folio, netfs_folio_trace_abandon);