]>
Commit | Line | Data |
---|---|---|
3d3c9504 DH |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* Network filesystem high-level read support. | |
3 | * | |
4 | * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells ([email protected]) | |
6 | */ | |
7 | ||
8 | #include <linux/module.h> | |
9 | #include <linux/export.h> | |
10 | #include <linux/fs.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/pagemap.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/uio.h> | |
15 | #include <linux/sched/mm.h> | |
16 | #include <linux/task_io_accounting_ops.h> | |
3d3c9504 | 17 | #include "internal.h" |
3d3c9504 | 18 | |
3d3c9504 DH |
19 | /* |
20 | * Clear the unread part of an I/O request. | |
21 | */ | |
6a19114b | 22 | static void netfs_clear_unread(struct netfs_io_subrequest *subreq) |
3d3c9504 DH |
23 | { |
24 | struct iov_iter iter; | |
25 | ||
330de47d | 26 | iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages, |
3d3c9504 DH |
27 | subreq->start + subreq->transferred, |
28 | subreq->len - subreq->transferred); | |
29 | iov_iter_zero(iov_iter_count(&iter), &iter); | |
30 | } | |
31 | ||
726218fd DH |
32 | static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, |
33 | bool was_async) | |
34 | { | |
6a19114b | 35 | struct netfs_io_subrequest *subreq = priv; |
726218fd DH |
36 | |
37 | netfs_subreq_terminated(subreq, transferred_or_error, was_async); | |
38 | } | |
39 | ||
40 | /* | |
41 | * Issue a read against the cache. | |
42 | * - Eats the caller's ref on subreq. | |
43 | */ | |
6a19114b DH |
44 | static void netfs_read_from_cache(struct netfs_io_request *rreq, |
45 | struct netfs_io_subrequest *subreq, | |
3a11b3a8 | 46 | enum netfs_read_from_hole read_hole) |
726218fd DH |
47 | { |
48 | struct netfs_cache_resources *cres = &rreq->cache_resources; | |
49 | struct iov_iter iter; | |
50 | ||
51 | netfs_stat(&netfs_n_rh_read); | |
52 | iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, | |
53 | subreq->start + subreq->transferred, | |
54 | subreq->len - subreq->transferred); | |
55 | ||
3a11b3a8 | 56 | cres->ops->read(cres, subreq->start, &iter, read_hole, |
726218fd DH |
57 | netfs_cache_read_terminated, subreq); |
58 | } | |
59 | ||
3d3c9504 DH |
60 | /* |
61 | * Fill a subrequest region with zeroes. | |
62 | */ | |
6a19114b DH |
63 | static void netfs_fill_with_zeroes(struct netfs_io_request *rreq, |
64 | struct netfs_io_subrequest *subreq) | |
3d3c9504 | 65 | { |
289af54c | 66 | netfs_stat(&netfs_n_rh_zero); |
3d3c9504 DH |
67 | __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); |
68 | netfs_subreq_terminated(subreq, 0, false); | |
69 | } | |
70 | ||
71 | /* | |
72 | * Ask the netfs to issue a read request to the server for us. | |
73 | * | |
74 | * The netfs is expected to read from subreq->pos + subreq->transferred to | |
75 | * subreq->pos + subreq->len - 1. It may not backtrack and write data into the | |
76 | * buffer prior to the transferred point as it might clobber dirty data | |
77 | * obtained from the cache. | |
78 | * | |
79 | * Alternatively, the netfs is allowed to indicate one of two things: | |
80 | * | |
81 | * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and | |
82 | * make progress. | |
83 | * | |
84 | * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be | |
85 | * cleared. | |
86 | */ | |
6a19114b DH |
87 | static void netfs_read_from_server(struct netfs_io_request *rreq, |
88 | struct netfs_io_subrequest *subreq) | |
3d3c9504 | 89 | { |
289af54c | 90 | netfs_stat(&netfs_n_rh_download); |
f18a3785 | 91 | rreq->netfs_ops->issue_read(subreq); |
3d3c9504 DH |
92 | } |
93 | ||
94 | /* | |
95 | * Release those waiting. | |
96 | */ | |
6a19114b | 97 | static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) |
3d3c9504 | 98 | { |
77b4d2c6 | 99 | trace_netfs_rreq(rreq, netfs_rreq_trace_done); |
f18a3785 | 100 | netfs_clear_subrequests(rreq, was_async); |
de74023b | 101 | netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete); |
3d3c9504 DH |
102 | } |
103 | ||
726218fd DH |
104 | /* |
105 | * Deal with the completion of writing the data to the cache. We have to clear | |
78525c74 | 106 | * the PG_fscache bits on the folios involved and release the caller's ref. |
726218fd DH |
107 | * |
108 | * May be called in softirq mode and we inherit a ref from the caller. | |
109 | */ | |
6a19114b | 110 | static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, |
726218fd DH |
111 | bool was_async) |
112 | { | |
6a19114b | 113 | struct netfs_io_subrequest *subreq; |
78525c74 | 114 | struct folio *folio; |
726218fd DH |
115 | pgoff_t unlocked = 0; |
116 | bool have_unlocked = false; | |
117 | ||
118 | rcu_read_lock(); | |
119 | ||
120 | list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { | |
121 | XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE); | |
122 | ||
78525c74 | 123 | xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) { |
726218fd | 124 | /* We might have multiple writes from the same huge |
78525c74 | 125 | * folio, but we mustn't unlock a folio more than once. |
726218fd | 126 | */ |
78525c74 | 127 | if (have_unlocked && folio_index(folio) <= unlocked) |
726218fd | 128 | continue; |
78525c74 DH |
129 | unlocked = folio_index(folio); |
130 | folio_end_fscache(folio); | |
726218fd DH |
131 | have_unlocked = true; |
132 | } | |
133 | } | |
134 | ||
135 | rcu_read_unlock(); | |
136 | netfs_rreq_completed(rreq, was_async); | |
137 | } | |
138 | ||
139 | static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, | |
140 | bool was_async) | |
141 | { | |
6a19114b DH |
142 | struct netfs_io_subrequest *subreq = priv; |
143 | struct netfs_io_request *rreq = subreq->rreq; | |
726218fd DH |
144 | |
145 | if (IS_ERR_VALUE(transferred_or_error)) { | |
146 | netfs_stat(&netfs_n_rh_write_failed); | |
0246f3e5 DH |
147 | trace_netfs_failure(rreq, subreq, transferred_or_error, |
148 | netfs_fail_copy_to_cache); | |
726218fd DH |
149 | } else { |
150 | netfs_stat(&netfs_n_rh_write_done); | |
151 | } | |
152 | ||
153 | trace_netfs_sreq(subreq, netfs_sreq_trace_write_term); | |
154 | ||
6a19114b DH |
155 | /* If we decrement nr_copy_ops to 0, the ref belongs to us. */ |
156 | if (atomic_dec_and_test(&rreq->nr_copy_ops)) | |
726218fd DH |
157 | netfs_rreq_unmark_after_write(rreq, was_async); |
158 | ||
6cd3d6fd | 159 | netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); |
726218fd DH |
160 | } |
161 | ||
162 | /* | |
163 | * Perform any outstanding writes to the cache. We inherit a ref from the | |
164 | * caller. | |
165 | */ | |
6a19114b | 166 | static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) |
726218fd DH |
167 | { |
168 | struct netfs_cache_resources *cres = &rreq->cache_resources; | |
6a19114b | 169 | struct netfs_io_subrequest *subreq, *next, *p; |
726218fd DH |
170 | struct iov_iter iter; |
171 | int ret; | |
172 | ||
18b3ff9f | 173 | trace_netfs_rreq(rreq, netfs_rreq_trace_copy); |
726218fd DH |
174 | |
175 | /* We don't want terminating writes trying to wake us up whilst we're | |
176 | * still going through the list. | |
177 | */ | |
6a19114b | 178 | atomic_inc(&rreq->nr_copy_ops); |
726218fd DH |
179 | |
180 | list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) { | |
f18a3785 | 181 | if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { |
726218fd | 182 | list_del_init(&subreq->rreq_link); |
6cd3d6fd DH |
183 | netfs_put_subrequest(subreq, false, |
184 | netfs_sreq_trace_put_no_copy); | |
726218fd DH |
185 | } |
186 | } | |
187 | ||
188 | list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { | |
189 | /* Amalgamate adjacent writes */ | |
190 | while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { | |
191 | next = list_next_entry(subreq, rreq_link); | |
192 | if (next->start != subreq->start + subreq->len) | |
193 | break; | |
194 | subreq->len += next->len; | |
195 | list_del_init(&next->rreq_link); | |
6cd3d6fd DH |
196 | netfs_put_subrequest(next, false, |
197 | netfs_sreq_trace_put_merged); | |
726218fd DH |
198 | } |
199 | ||
200 | ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len, | |
a39c41b8 | 201 | rreq->i_size, true); |
726218fd | 202 | if (ret < 0) { |
0246f3e5 | 203 | trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write); |
726218fd DH |
204 | trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip); |
205 | continue; | |
206 | } | |
207 | ||
208 | iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages, | |
209 | subreq->start, subreq->len); | |
210 | ||
6a19114b | 211 | atomic_inc(&rreq->nr_copy_ops); |
726218fd | 212 | netfs_stat(&netfs_n_rh_write); |
6cd3d6fd | 213 | netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache); |
726218fd DH |
214 | trace_netfs_sreq(subreq, netfs_sreq_trace_write); |
215 | cres->ops->write(cres, subreq->start, &iter, | |
216 | netfs_rreq_copy_terminated, subreq); | |
217 | } | |
218 | ||
6a19114b DH |
219 | /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */ |
220 | if (atomic_dec_and_test(&rreq->nr_copy_ops)) | |
726218fd DH |
221 | netfs_rreq_unmark_after_write(rreq, false); |
222 | } | |
223 | ||
224 | static void netfs_rreq_write_to_cache_work(struct work_struct *work) | |
225 | { | |
6a19114b DH |
226 | struct netfs_io_request *rreq = |
227 | container_of(work, struct netfs_io_request, work); | |
726218fd DH |
228 | |
229 | netfs_rreq_do_write_to_cache(rreq); | |
230 | } | |
231 | ||
6a19114b | 232 | static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) |
726218fd | 233 | { |
598ad0bd DH |
234 | rreq->work.func = netfs_rreq_write_to_cache_work; |
235 | if (!queue_work(system_unbound_wq, &rreq->work)) | |
236 | BUG(); | |
726218fd DH |
237 | } |
238 | ||
3d3c9504 DH |
239 | /* |
240 | * Handle a short read. | |
241 | */ | |
6a19114b DH |
242 | static void netfs_rreq_short_read(struct netfs_io_request *rreq, |
243 | struct netfs_io_subrequest *subreq) | |
3d3c9504 | 244 | { |
f18a3785 | 245 | __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); |
3d3c9504 DH |
246 | __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); |
247 | ||
289af54c | 248 | netfs_stat(&netfs_n_rh_short_read); |
77b4d2c6 DH |
249 | trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); |
250 | ||
6cd3d6fd | 251 | netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read); |
6a19114b | 252 | atomic_inc(&rreq->nr_outstanding); |
726218fd | 253 | if (subreq->source == NETFS_READ_FROM_CACHE) |
3a11b3a8 | 254 | netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR); |
726218fd DH |
255 | else |
256 | netfs_read_from_server(rreq, subreq); | |
3d3c9504 DH |
257 | } |
258 | ||
259 | /* | |
260 | * Resubmit any short or failed operations. Returns true if we got the rreq | |
261 | * ref back. | |
262 | */ | |
6a19114b | 263 | static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) |
3d3c9504 | 264 | { |
6a19114b | 265 | struct netfs_io_subrequest *subreq; |
3d3c9504 DH |
266 | |
267 | WARN_ON(in_interrupt()); | |
268 | ||
77b4d2c6 DH |
269 | trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit); |
270 | ||
3d3c9504 DH |
271 | /* We don't want terminating submissions trying to wake us up whilst |
272 | * we're still going through the list. | |
273 | */ | |
6a19114b | 274 | atomic_inc(&rreq->nr_outstanding); |
3d3c9504 DH |
275 | |
276 | __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); | |
277 | list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { | |
278 | if (subreq->error) { | |
279 | if (subreq->source != NETFS_READ_FROM_CACHE) | |
280 | break; | |
281 | subreq->source = NETFS_DOWNLOAD_FROM_SERVER; | |
282 | subreq->error = 0; | |
289af54c | 283 | netfs_stat(&netfs_n_rh_download_instead); |
77b4d2c6 | 284 | trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); |
6cd3d6fd | 285 | netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); |
6a19114b | 286 | atomic_inc(&rreq->nr_outstanding); |
3d3c9504 | 287 | netfs_read_from_server(rreq, subreq); |
f18a3785 | 288 | } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) { |
3d3c9504 DH |
289 | netfs_rreq_short_read(rreq, subreq); |
290 | } | |
291 | } | |
292 | ||
6a19114b DH |
293 | /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */ |
294 | if (atomic_dec_and_test(&rreq->nr_outstanding)) | |
3d3c9504 DH |
295 | return true; |
296 | ||
6a19114b | 297 | wake_up_var(&rreq->nr_outstanding); |
3d3c9504 DH |
298 | return false; |
299 | } | |
300 | ||
726218fd DH |
301 | /* |
302 | * Check to see if the data read is still valid. | |
303 | */ | |
6a19114b | 304 | static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq) |
726218fd | 305 | { |
6a19114b | 306 | struct netfs_io_subrequest *subreq; |
726218fd DH |
307 | |
308 | if (!rreq->netfs_ops->is_still_valid || | |
309 | rreq->netfs_ops->is_still_valid(rreq)) | |
310 | return; | |
311 | ||
312 | list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { | |
313 | if (subreq->source == NETFS_READ_FROM_CACHE) { | |
314 | subreq->error = -ESTALE; | |
315 | __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); | |
316 | } | |
317 | } | |
318 | } | |
319 | ||
3d3c9504 DH |
320 | /* |
321 | * Assess the state of a read request and decide what to do next. | |
322 | * | |
323 | * Note that we could be in an ordinary kernel thread, on a workqueue or in | |
324 | * softirq context at this point. We inherit a ref from the caller. | |
325 | */ | |
6a19114b | 326 | static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async) |
3d3c9504 | 327 | { |
77b4d2c6 DH |
328 | trace_netfs_rreq(rreq, netfs_rreq_trace_assess); |
329 | ||
3d3c9504 | 330 | again: |
726218fd DH |
331 | netfs_rreq_is_still_valid(rreq); |
332 | ||
3d3c9504 DH |
333 | if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) && |
334 | test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) { | |
335 | if (netfs_rreq_perform_resubmissions(rreq)) | |
336 | goto again; | |
337 | return; | |
338 | } | |
339 | ||
93345c3b | 340 | netfs_rreq_unlock_folios(rreq); |
3d3c9504 DH |
341 | |
342 | clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); | |
343 | wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); | |
344 | ||
f18a3785 | 345 | if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) |
598ad0bd | 346 | return netfs_rreq_write_to_cache(rreq); |
726218fd | 347 | |
3d3c9504 DH |
348 | netfs_rreq_completed(rreq, was_async); |
349 | } | |
350 | ||
4090b314 | 351 | static void netfs_rreq_work(struct work_struct *work) |
3d3c9504 | 352 | { |
6a19114b DH |
353 | struct netfs_io_request *rreq = |
354 | container_of(work, struct netfs_io_request, work); | |
3d3c9504 DH |
355 | netfs_rreq_assess(rreq, false); |
356 | } | |
357 | ||
358 | /* | |
359 | * Handle the completion of all outstanding I/O operations on a read request. | |
360 | * We inherit a ref from the caller. | |
361 | */ | |
6a19114b | 362 | static void netfs_rreq_terminated(struct netfs_io_request *rreq, |
3d3c9504 DH |
363 | bool was_async) |
364 | { | |
365 | if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) && | |
366 | was_async) { | |
367 | if (!queue_work(system_unbound_wq, &rreq->work)) | |
368 | BUG(); | |
369 | } else { | |
370 | netfs_rreq_assess(rreq, was_async); | |
371 | } | |
372 | } | |
373 | ||
374 | /** | |
375 | * netfs_subreq_terminated - Note the termination of an I/O operation. | |
376 | * @subreq: The I/O request that has terminated. | |
377 | * @transferred_or_error: The amount of data transferred or an error code. | |
378 | * @was_async: The termination was asynchronous | |
379 | * | |
380 | * This tells the read helper that a contributory I/O operation has terminated, | |
381 | * one way or another, and that it should integrate the results. | |
382 | * | |
383 | * The caller indicates in @transferred_or_error the outcome of the operation, | |
384 | * supplying a positive value to indicate the number of bytes transferred, 0 to | |
385 | * indicate a failure to transfer anything that should be retried or a negative | |
386 | * error code. The helper will look after reissuing I/O operations as | |
387 | * appropriate and writing downloaded data to the cache. | |
388 | * | |
389 | * If @was_async is true, the caller might be running in softirq or interrupt | |
390 | * context and we can't sleep. | |
391 | */ | |
6a19114b | 392 | void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, |
3d3c9504 DH |
393 | ssize_t transferred_or_error, |
394 | bool was_async) | |
395 | { | |
6a19114b | 396 | struct netfs_io_request *rreq = subreq->rreq; |
3d3c9504 DH |
397 | int u; |
398 | ||
399 | _enter("[%u]{%llx,%lx},%zd", | |
400 | subreq->debug_index, subreq->start, subreq->flags, | |
401 | transferred_or_error); | |
402 | ||
289af54c DH |
403 | switch (subreq->source) { |
404 | case NETFS_READ_FROM_CACHE: | |
405 | netfs_stat(&netfs_n_rh_read_done); | |
406 | break; | |
407 | case NETFS_DOWNLOAD_FROM_SERVER: | |
408 | netfs_stat(&netfs_n_rh_download_done); | |
409 | break; | |
410 | default: | |
411 | break; | |
412 | } | |
413 | ||
3d3c9504 DH |
414 | if (IS_ERR_VALUE(transferred_or_error)) { |
415 | subreq->error = transferred_or_error; | |
0246f3e5 DH |
416 | trace_netfs_failure(rreq, subreq, transferred_or_error, |
417 | netfs_fail_read); | |
3d3c9504 DH |
418 | goto failed; |
419 | } | |
420 | ||
421 | if (WARN(transferred_or_error > subreq->len - subreq->transferred, | |
422 | "Subreq overread: R%x[%x] %zd > %zu - %zu", | |
423 | rreq->debug_id, subreq->debug_index, | |
424 | transferred_or_error, subreq->len, subreq->transferred)) | |
425 | transferred_or_error = subreq->len - subreq->transferred; | |
426 | ||
427 | subreq->error = 0; | |
428 | subreq->transferred += transferred_or_error; | |
429 | if (subreq->transferred < subreq->len) | |
430 | goto incomplete; | |
431 | ||
432 | complete: | |
433 | __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); | |
f18a3785 DH |
434 | if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) |
435 | set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); | |
3d3c9504 DH |
436 | |
437 | out: | |
77b4d2c6 DH |
438 | trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); |
439 | ||
6a19114b DH |
440 | /* If we decrement nr_outstanding to 0, the ref belongs to us. */ |
441 | u = atomic_dec_return(&rreq->nr_outstanding); | |
3d3c9504 DH |
442 | if (u == 0) |
443 | netfs_rreq_terminated(rreq, was_async); | |
444 | else if (u == 1) | |
6a19114b | 445 | wake_up_var(&rreq->nr_outstanding); |
3d3c9504 | 446 | |
6cd3d6fd | 447 | netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); |
3d3c9504 DH |
448 | return; |
449 | ||
450 | incomplete: | |
451 | if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) { | |
452 | netfs_clear_unread(subreq); | |
453 | subreq->transferred = subreq->len; | |
454 | goto complete; | |
455 | } | |
456 | ||
457 | if (transferred_or_error == 0) { | |
458 | if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) { | |
459 | subreq->error = -ENODATA; | |
460 | goto failed; | |
461 | } | |
462 | } else { | |
463 | __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); | |
464 | } | |
465 | ||
f18a3785 | 466 | __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); |
3d3c9504 DH |
467 | set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); |
468 | goto out; | |
469 | ||
470 | failed: | |
471 | if (subreq->source == NETFS_READ_FROM_CACHE) { | |
289af54c | 472 | netfs_stat(&netfs_n_rh_read_failed); |
3d3c9504 DH |
473 | set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); |
474 | } else { | |
289af54c | 475 | netfs_stat(&netfs_n_rh_download_failed); |
3d3c9504 DH |
476 | set_bit(NETFS_RREQ_FAILED, &rreq->flags); |
477 | rreq->error = subreq->error; | |
478 | } | |
479 | goto out; | |
480 | } | |
481 | EXPORT_SYMBOL(netfs_subreq_terminated); | |
482 | ||
6a19114b | 483 | static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq, |
3d3c9504 DH |
484 | loff_t i_size) |
485 | { | |
6a19114b | 486 | struct netfs_io_request *rreq = subreq->rreq; |
726218fd | 487 | struct netfs_cache_resources *cres = &rreq->cache_resources; |
3d3c9504 | 488 | |
726218fd DH |
489 | if (cres->ops) |
490 | return cres->ops->prepare_read(subreq, i_size); | |
3d3c9504 DH |
491 | if (subreq->start >= rreq->i_size) |
492 | return NETFS_FILL_WITH_ZEROES; | |
493 | return NETFS_DOWNLOAD_FROM_SERVER; | |
494 | } | |
495 | ||
496 | /* | |
497 | * Work out what sort of subrequest the next one will be. | |
498 | */ | |
6a19114b DH |
499 | static enum netfs_io_source |
500 | netfs_rreq_prepare_read(struct netfs_io_request *rreq, | |
501 | struct netfs_io_subrequest *subreq) | |
3d3c9504 | 502 | { |
6a19114b | 503 | enum netfs_io_source source; |
3d3c9504 DH |
504 | |
505 | _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size); | |
506 | ||
507 | source = netfs_cache_prepare_read(subreq, rreq->i_size); | |
508 | if (source == NETFS_INVALID_READ) | |
509 | goto out; | |
510 | ||
511 | if (source == NETFS_DOWNLOAD_FROM_SERVER) { | |
512 | /* Call out to the netfs to let it shrink the request to fit | |
513 | * its own I/O sizes and boundaries. If it shinks it here, it | |
514 | * will be called again to make simultaneous calls; if it wants | |
515 | * to make serial calls, it can indicate a short read and then | |
516 | * we will call it again. | |
517 | */ | |
518 | if (subreq->len > rreq->i_size - subreq->start) | |
519 | subreq->len = rreq->i_size - subreq->start; | |
520 | ||
521 | if (rreq->netfs_ops->clamp_length && | |
522 | !rreq->netfs_ops->clamp_length(subreq)) { | |
523 | source = NETFS_INVALID_READ; | |
524 | goto out; | |
525 | } | |
526 | } | |
527 | ||
528 | if (WARN_ON(subreq->len == 0)) | |
529 | source = NETFS_INVALID_READ; | |
530 | ||
531 | out: | |
532 | subreq->source = source; | |
77b4d2c6 | 533 | trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); |
3d3c9504 DH |
534 | return source; |
535 | } | |
536 | ||
537 | /* | |
538 | * Slice off a piece of a read request and submit an I/O request for it. | |
539 | */ | |
6a19114b | 540 | static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, |
3d3c9504 DH |
541 | unsigned int *_debug_index) |
542 | { | |
6a19114b DH |
543 | struct netfs_io_subrequest *subreq; |
544 | enum netfs_io_source source; | |
3d3c9504 DH |
545 | |
546 | subreq = netfs_alloc_subrequest(rreq); | |
547 | if (!subreq) | |
548 | return false; | |
549 | ||
550 | subreq->debug_index = (*_debug_index)++; | |
551 | subreq->start = rreq->start + rreq->submitted; | |
552 | subreq->len = rreq->len - rreq->submitted; | |
553 | ||
554 | _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted); | |
555 | list_add_tail(&subreq->rreq_link, &rreq->subrequests); | |
556 | ||
557 | /* Call out to the cache to find out what it can do with the remaining | |
558 | * subset. It tells us in subreq->flags what it decided should be done | |
559 | * and adjusts subreq->len down if the subset crosses a cache boundary. | |
560 | * | |
561 | * Then when we hand the subset, it can choose to take a subset of that | |
562 | * (the starts must coincide), in which case, we go around the loop | |
563 | * again and ask it to download the next piece. | |
564 | */ | |
565 | source = netfs_rreq_prepare_read(rreq, subreq); | |
566 | if (source == NETFS_INVALID_READ) | |
567 | goto subreq_failed; | |
568 | ||
6a19114b | 569 | atomic_inc(&rreq->nr_outstanding); |
3d3c9504 DH |
570 | |
571 | rreq->submitted += subreq->len; | |
572 | ||
77b4d2c6 | 573 | trace_netfs_sreq(subreq, netfs_sreq_trace_submit); |
3d3c9504 DH |
574 | switch (source) { |
575 | case NETFS_FILL_WITH_ZEROES: | |
576 | netfs_fill_with_zeroes(rreq, subreq); | |
577 | break; | |
578 | case NETFS_DOWNLOAD_FROM_SERVER: | |
579 | netfs_read_from_server(rreq, subreq); | |
580 | break; | |
726218fd | 581 | case NETFS_READ_FROM_CACHE: |
3a11b3a8 | 582 | netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE); |
726218fd | 583 | break; |
3d3c9504 DH |
584 | default: |
585 | BUG(); | |
586 | } | |
587 | ||
588 | return true; | |
589 | ||
590 | subreq_failed: | |
591 | rreq->error = subreq->error; | |
6cd3d6fd | 592 | netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed); |
3d3c9504 DH |
593 | return false; |
594 | } | |
595 | ||
4090b314 DH |
596 | /* |
597 | * Begin the process of reading in a chunk of data, where that data may be | |
598 | * stitched together from multiple sources, including multiple servers and the | |
599 | * local cache. | |
600 | */ | |
601 | int netfs_begin_read(struct netfs_io_request *rreq, bool sync) | |
602 | { | |
603 | unsigned int debug_index = 0; | |
604 | int ret; | |
605 | ||
606 | _enter("R=%x %llx-%llx", | |
607 | rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); | |
608 | ||
609 | if (rreq->len == 0) { | |
610 | pr_err("Zero-sized read [R=%x]\n", rreq->debug_id); | |
611 | netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len); | |
612 | return -EIO; | |
613 | } | |
614 | ||
615 | INIT_WORK(&rreq->work, netfs_rreq_work); | |
616 | ||
617 | if (sync) | |
618 | netfs_get_request(rreq, netfs_rreq_trace_get_hold); | |
619 | ||
620 | /* Chop the read into slices according to what the cache and the netfs | |
621 | * want and submit each one. | |
622 | */ | |
623 | atomic_set(&rreq->nr_outstanding, 1); | |
624 | do { | |
625 | if (!netfs_rreq_submit_slice(rreq, &debug_index)) | |
626 | break; | |
627 | ||
628 | } while (rreq->submitted < rreq->len); | |
629 | ||
630 | if (sync) { | |
631 | /* Keep nr_outstanding incremented so that the ref always belongs to | |
632 | * us, and the service code isn't punted off to a random thread pool to | |
633 | * process. | |
634 | */ | |
635 | for (;;) { | |
636 | wait_var_event(&rreq->nr_outstanding, | |
637 | atomic_read(&rreq->nr_outstanding) == 1); | |
638 | netfs_rreq_assess(rreq, false); | |
639 | if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) | |
640 | break; | |
641 | cond_resched(); | |
642 | } | |
643 | ||
644 | ret = rreq->error; | |
645 | if (ret == 0 && rreq->submitted < rreq->len) { | |
646 | trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); | |
647 | ret = -EIO; | |
648 | } | |
649 | netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); | |
650 | } else { | |
651 | /* If we decrement nr_outstanding to 0, the ref belongs to us. */ | |
652 | if (atomic_dec_and_test(&rreq->nr_outstanding)) | |
653 | netfs_rreq_assess(rreq, false); | |
654 | ret = 0; | |
655 | } | |
656 | return ret; | |
657 | } |