1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level read support.
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
8 #include <linux/module.h>
9 #include <linux/export.h>
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/uio.h>
15 #include <linux/sched/mm.h>
16 #include <linux/task_io_accounting_ops.h>
20 * Clear the unread part of an I/O request.
22 static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
24 iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
27 static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
30 struct netfs_io_subrequest *subreq = priv;
32 netfs_subreq_terminated(subreq, transferred_or_error, was_async);
36 * Issue a read against the cache.
37 * - Eats the caller's ref on subreq.
39 static void netfs_read_from_cache(struct netfs_io_request *rreq,
40 struct netfs_io_subrequest *subreq,
41 enum netfs_read_from_hole read_hole)
43 struct netfs_cache_resources *cres = &rreq->cache_resources;
45 netfs_stat(&netfs_n_rh_read);
46 cres->ops->read(cres, subreq->start, &subreq->io_iter, read_hole,
47 netfs_cache_read_terminated, subreq);
51 * Fill a subrequest region with zeroes.
53 static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
54 struct netfs_io_subrequest *subreq)
56 netfs_stat(&netfs_n_rh_zero);
57 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
58 netfs_subreq_terminated(subreq, 0, false);
62 * Ask the netfs to issue a read request to the server for us.
64 * The netfs is expected to read from subreq->pos + subreq->transferred to
65 * subreq->pos + subreq->len - 1. It may not backtrack and write data into the
66 * buffer prior to the transferred point as it might clobber dirty data
67 * obtained from the cache.
69 * Alternatively, the netfs is allowed to indicate one of two things:
71 * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
74 * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
77 static void netfs_read_from_server(struct netfs_io_request *rreq,
78 struct netfs_io_subrequest *subreq)
80 netfs_stat(&netfs_n_rh_download);
82 if (rreq->origin != NETFS_DIO_READ &&
83 iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
84 pr_warn("R=%08x[%u] ITER PRE-MISMATCH %zx != %zx-%zx %lx\n",
85 rreq->debug_id, subreq->debug_index,
86 iov_iter_count(&subreq->io_iter), subreq->len,
87 subreq->transferred, subreq->flags);
88 rreq->netfs_ops->issue_read(subreq);
92 * Release those waiting.
94 static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
96 trace_netfs_rreq(rreq, netfs_rreq_trace_done);
97 netfs_clear_subrequests(rreq, was_async);
98 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
102 * Handle a short read.
104 static void netfs_rreq_short_read(struct netfs_io_request *rreq,
105 struct netfs_io_subrequest *subreq)
107 __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
108 __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
110 netfs_stat(&netfs_n_rh_short_read);
111 trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
113 netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
114 atomic_inc(&rreq->nr_outstanding);
115 if (subreq->source == NETFS_READ_FROM_CACHE)
116 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
118 netfs_read_from_server(rreq, subreq);
122 * Reset the subrequest iterator prior to resubmission.
124 static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
125 struct netfs_io_subrequest *subreq)
127 size_t remaining = subreq->len - subreq->transferred;
128 size_t count = iov_iter_count(&subreq->io_iter);
130 if (count == remaining)
133 _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
134 rreq->debug_id, subreq->debug_index,
135 iov_iter_count(&subreq->io_iter), subreq->transferred,
136 subreq->len, rreq->i_size,
137 subreq->io_iter.iter_type);
139 if (count < remaining)
140 iov_iter_revert(&subreq->io_iter, remaining - count);
142 iov_iter_advance(&subreq->io_iter, count - remaining);
146 * Resubmit any short or failed operations. Returns true if we got the rreq
149 static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
151 struct netfs_io_subrequest *subreq;
153 WARN_ON(in_interrupt());
155 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
157 /* We don't want terminating submissions trying to wake us up whilst
158 * we're still going through the list.
160 atomic_inc(&rreq->nr_outstanding);
162 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
163 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
165 if (subreq->source != NETFS_READ_FROM_CACHE)
167 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
169 netfs_stat(&netfs_n_rh_download_instead);
170 trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
171 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
172 atomic_inc(&rreq->nr_outstanding);
173 netfs_reset_subreq_iter(rreq, subreq);
174 netfs_read_from_server(rreq, subreq);
175 } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
176 netfs_rreq_short_read(rreq, subreq);
180 /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
181 if (atomic_dec_and_test(&rreq->nr_outstanding))
184 wake_up_var(&rreq->nr_outstanding);
189 * Check to see if the data read is still valid.
191 static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
193 struct netfs_io_subrequest *subreq;
195 if (!rreq->netfs_ops->is_still_valid ||
196 rreq->netfs_ops->is_still_valid(rreq))
199 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
200 if (subreq->source == NETFS_READ_FROM_CACHE) {
201 subreq->error = -ESTALE;
202 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
208 * Determine how much we can admit to having read from a DIO read.
210 static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
212 struct netfs_io_subrequest *subreq;
214 size_t transferred = 0;
216 for (i = 0; i < rreq->direct_bv_count; i++) {
217 flush_dcache_page(rreq->direct_bv[i].bv_page);
218 // TODO: cifs marks pages in the destination buffer
219 // dirty under some circumstances after a read. Do we
220 // need to do that too?
221 set_page_dirty(rreq->direct_bv[i].bv_page);
224 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
225 if (subreq->error || subreq->transferred == 0)
227 transferred += subreq->transferred;
228 if (subreq->transferred < subreq->len)
232 for (i = 0; i < rreq->direct_bv_count; i++)
233 flush_dcache_page(rreq->direct_bv[i].bv_page);
235 rreq->transferred = transferred;
236 task_io_account_read(transferred);
239 rreq->iocb->ki_pos += transferred;
240 if (rreq->iocb->ki_complete)
241 rreq->iocb->ki_complete(
242 rreq->iocb, rreq->error ? rreq->error : transferred);
244 if (rreq->netfs_ops->done)
245 rreq->netfs_ops->done(rreq);
246 inode_dio_end(rreq->inode);
250 * Assess the state of a read request and decide what to do next.
252 * Note that we could be in an ordinary kernel thread, on a workqueue or in
253 * softirq context at this point. We inherit a ref from the caller.
255 static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
257 trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
260 netfs_rreq_is_still_valid(rreq);
262 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
263 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
264 if (netfs_rreq_perform_resubmissions(rreq))
269 if (rreq->origin != NETFS_DIO_READ)
270 netfs_rreq_unlock_folios(rreq);
272 netfs_rreq_assess_dio(rreq);
274 trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
275 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
276 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
278 netfs_rreq_completed(rreq, was_async);
281 static void netfs_rreq_work(struct work_struct *work)
283 struct netfs_io_request *rreq =
284 container_of(work, struct netfs_io_request, work);
285 netfs_rreq_assess(rreq, false);
289 * Handle the completion of all outstanding I/O operations on a read request.
290 * We inherit a ref from the caller.
292 static void netfs_rreq_terminated(struct netfs_io_request *rreq,
295 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
297 if (!queue_work(system_unbound_wq, &rreq->work))
300 netfs_rreq_assess(rreq, was_async);
305 * netfs_subreq_terminated - Note the termination of an I/O operation.
306 * @subreq: The I/O request that has terminated.
307 * @transferred_or_error: The amount of data transferred or an error code.
308 * @was_async: The termination was asynchronous
310 * This tells the read helper that a contributory I/O operation has terminated,
311 * one way or another, and that it should integrate the results.
313 * The caller indicates in @transferred_or_error the outcome of the operation,
314 * supplying a positive value to indicate the number of bytes transferred, 0 to
315 * indicate a failure to transfer anything that should be retried or a negative
316 * error code. The helper will look after reissuing I/O operations as
317 * appropriate and writing downloaded data to the cache.
319 * If @was_async is true, the caller might be running in softirq or interrupt
320 * context and we can't sleep.
322 void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
323 ssize_t transferred_or_error,
326 struct netfs_io_request *rreq = subreq->rreq;
329 _enter("R=%x[%x]{%llx,%lx},%zd",
330 rreq->debug_id, subreq->debug_index,
331 subreq->start, subreq->flags, transferred_or_error);
333 switch (subreq->source) {
334 case NETFS_READ_FROM_CACHE:
335 netfs_stat(&netfs_n_rh_read_done);
337 case NETFS_DOWNLOAD_FROM_SERVER:
338 netfs_stat(&netfs_n_rh_download_done);
344 if (IS_ERR_VALUE(transferred_or_error)) {
345 subreq->error = transferred_or_error;
346 trace_netfs_failure(rreq, subreq, transferred_or_error,
351 if (WARN(transferred_or_error > subreq->len - subreq->transferred,
352 "Subreq overread: R%x[%x] %zd > %zu - %zu",
353 rreq->debug_id, subreq->debug_index,
354 transferred_or_error, subreq->len, subreq->transferred))
355 transferred_or_error = subreq->len - subreq->transferred;
358 subreq->transferred += transferred_or_error;
359 if (subreq->transferred < subreq->len)
363 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
364 if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
365 set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
368 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
370 /* If we decrement nr_outstanding to 0, the ref belongs to us. */
371 u = atomic_dec_return(&rreq->nr_outstanding);
373 netfs_rreq_terminated(rreq, was_async);
375 wake_up_var(&rreq->nr_outstanding);
377 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
381 if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
382 netfs_clear_unread(subreq);
383 subreq->transferred = subreq->len;
387 if (transferred_or_error == 0) {
388 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
389 subreq->error = -ENODATA;
393 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
396 __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
397 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
401 if (subreq->source == NETFS_READ_FROM_CACHE) {
402 netfs_stat(&netfs_n_rh_read_failed);
403 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
405 netfs_stat(&netfs_n_rh_download_failed);
406 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
407 rreq->error = subreq->error;
411 EXPORT_SYMBOL(netfs_subreq_terminated);
413 static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
416 struct netfs_io_request *rreq = subreq->rreq;
417 struct netfs_cache_resources *cres = &rreq->cache_resources;
420 return cres->ops->prepare_read(subreq, i_size);
421 if (subreq->start >= rreq->i_size)
422 return NETFS_FILL_WITH_ZEROES;
423 return NETFS_DOWNLOAD_FROM_SERVER;
427 * Work out what sort of subrequest the next one will be.
429 static enum netfs_io_source
430 netfs_rreq_prepare_read(struct netfs_io_request *rreq,
431 struct netfs_io_subrequest *subreq,
432 struct iov_iter *io_iter)
434 enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
435 struct netfs_inode *ictx = netfs_inode(rreq->inode);
438 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
440 if (rreq->origin != NETFS_DIO_READ) {
441 source = netfs_cache_prepare_read(subreq, rreq->i_size);
442 if (source == NETFS_INVALID_READ)
446 if (source == NETFS_DOWNLOAD_FROM_SERVER) {
447 /* Call out to the netfs to let it shrink the request to fit
448 * its own I/O sizes and boundaries. If it shinks it here, it
449 * will be called again to make simultaneous calls; if it wants
450 * to make serial calls, it can indicate a short read and then
451 * we will call it again.
453 if (rreq->origin != NETFS_DIO_READ) {
454 if (subreq->start >= ictx->zero_point) {
455 source = NETFS_FILL_WITH_ZEROES;
458 if (subreq->len > ictx->zero_point - subreq->start)
459 subreq->len = ictx->zero_point - subreq->start;
461 if (subreq->len > rreq->i_size - subreq->start)
462 subreq->len = rreq->i_size - subreq->start;
463 if (rreq->rsize && subreq->len > rreq->rsize)
464 subreq->len = rreq->rsize;
466 if (rreq->netfs_ops->clamp_length &&
467 !rreq->netfs_ops->clamp_length(subreq)) {
468 source = NETFS_INVALID_READ;
472 if (subreq->max_nr_segs) {
473 lsize = netfs_limit_iter(io_iter, 0, subreq->len,
474 subreq->max_nr_segs);
475 if (subreq->len > lsize) {
477 trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
483 if (subreq->len > rreq->len)
484 pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n",
485 rreq->debug_id, subreq->debug_index,
486 subreq->len, rreq->len);
488 if (WARN_ON(subreq->len == 0)) {
489 source = NETFS_INVALID_READ;
493 subreq->source = source;
494 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
496 subreq->io_iter = *io_iter;
497 iov_iter_truncate(&subreq->io_iter, subreq->len);
498 iov_iter_advance(io_iter, subreq->len);
500 subreq->source = source;
501 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
506 * Slice off a piece of a read request and submit an I/O request for it.
508 static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
509 struct iov_iter *io_iter)
511 struct netfs_io_subrequest *subreq;
512 enum netfs_io_source source;
514 subreq = netfs_alloc_subrequest(rreq);
518 subreq->start = rreq->start + rreq->submitted;
519 subreq->len = io_iter->count;
521 _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
522 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
524 /* Call out to the cache to find out what it can do with the remaining
525 * subset. It tells us in subreq->flags what it decided should be done
526 * and adjusts subreq->len down if the subset crosses a cache boundary.
528 * Then when we hand the subset, it can choose to take a subset of that
529 * (the starts must coincide), in which case, we go around the loop
530 * again and ask it to download the next piece.
532 source = netfs_rreq_prepare_read(rreq, subreq, io_iter);
533 if (source == NETFS_INVALID_READ)
536 atomic_inc(&rreq->nr_outstanding);
538 rreq->submitted += subreq->len;
540 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
542 case NETFS_FILL_WITH_ZEROES:
543 netfs_fill_with_zeroes(rreq, subreq);
545 case NETFS_DOWNLOAD_FROM_SERVER:
546 netfs_read_from_server(rreq, subreq);
548 case NETFS_READ_FROM_CACHE:
549 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
558 rreq->error = subreq->error;
559 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
564 * Begin the process of reading in a chunk of data, where that data may be
565 * stitched together from multiple sources, including multiple servers and the
568 int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
570 struct iov_iter io_iter;
573 _enter("R=%x %llx-%llx",
574 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
576 if (rreq->len == 0) {
577 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
581 if (rreq->origin == NETFS_DIO_READ)
582 inode_dio_begin(rreq->inode);
584 // TODO: Use bounce buffer if requested
585 rreq->io_iter = rreq->iter;
587 INIT_WORK(&rreq->work, netfs_rreq_work);
589 /* Chop the read into slices according to what the cache and the netfs
590 * want and submit each one.
592 netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
593 atomic_set(&rreq->nr_outstanding, 1);
594 io_iter = rreq->io_iter;
596 _debug("submit %llx + %llx >= %llx",
597 rreq->start, rreq->submitted, rreq->i_size);
598 if (rreq->origin == NETFS_DIO_READ &&
599 rreq->start + rreq->submitted >= rreq->i_size)
601 if (!netfs_rreq_submit_slice(rreq, &io_iter))
603 if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
604 test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
607 } while (rreq->submitted < rreq->len);
609 if (!rreq->submitted) {
610 netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
611 if (rreq->origin == NETFS_DIO_READ)
612 inode_dio_end(rreq->inode);
618 /* Keep nr_outstanding incremented so that the ref always
619 * belongs to us, and the service code isn't punted off to a
620 * random thread pool to process. Note that this might start
621 * further work, such as writing to the cache.
623 wait_var_event(&rreq->nr_outstanding,
624 atomic_read(&rreq->nr_outstanding) == 1);
625 if (atomic_dec_and_test(&rreq->nr_outstanding))
626 netfs_rreq_assess(rreq, false);
628 trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
629 wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
630 TASK_UNINTERRUPTIBLE);
633 if (ret == 0 && rreq->submitted < rreq->len &&
634 rreq->origin != NETFS_DIO_READ) {
635 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
639 /* If we decrement nr_outstanding to 0, the ref belongs to us. */
640 if (atomic_dec_and_test(&rreq->nr_outstanding))
641 netfs_rreq_assess(rreq, false);