]> Git Repo - linux.git/blob - fs/netfs/read_collect.c
Linux 6.14-rc3
[linux.git] / fs / netfs / read_collect.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem read subrequest result collection, assessment and
3  * retrying.
4  *
5  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
6  * Written by David Howells ([email protected])
7  */
8
9 #include <linux/export.h>
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include "internal.h"
16
17 /* Notes made in the collector */
18 #define HIT_PENDING     0x01    /* A front op was still pending */
19 #define MADE_PROGRESS   0x04    /* Made progress cleaning up a stream or the folio set */
20 #define BUFFERED        0x08    /* The pagecache needs cleaning up */
21 #define NEED_RETRY      0x10    /* A front op requests retrying */
22 #define COPY_TO_CACHE   0x40    /* Need to copy subrequest to cache */
23 #define ABANDON_SREQ    0x80    /* Need to abandon untransferred part of subrequest */
24
25 /*
26  * Clear the unread part of an I/O request.
27  */
28 static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
29 {
30         netfs_reset_iter(subreq);
31         WARN_ON_ONCE(subreq->len - subreq->transferred != iov_iter_count(&subreq->io_iter));
32         iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
33         if (subreq->start + subreq->transferred >= subreq->rreq->i_size)
34                 __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
35 }
36
37 /*
38  * Flush, mark and unlock a folio that's now completely read.  If we want to
39  * cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it
40  * dirty and let writeback handle it.
41  */
42 static void netfs_unlock_read_folio(struct netfs_io_request *rreq,
43                                     struct folio_queue *folioq,
44                                     int slot)
45 {
46         struct netfs_folio *finfo;
47         struct folio *folio = folioq_folio(folioq, slot);
48
49         if (unlikely(folio_pos(folio) < rreq->abandon_to)) {
50                 trace_netfs_folio(folio, netfs_folio_trace_abandon);
51                 goto just_unlock;
52         }
53
54         flush_dcache_folio(folio);
55         folio_mark_uptodate(folio);
56
57         if (!test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
58                 finfo = netfs_folio_info(folio);
59                 if (finfo) {
60                         trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
61                         if (finfo->netfs_group)
62                                 folio_change_private(folio, finfo->netfs_group);
63                         else
64                                 folio_detach_private(folio);
65                         kfree(finfo);
66                 }
67
68                 if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags)) {
69                         if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
70                                 trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
71                                 folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
72                                 folio_mark_dirty(folio);
73                         }
74                 } else {
75                         trace_netfs_folio(folio, netfs_folio_trace_read_done);
76                 }
77
78                 folioq_clear(folioq, slot);
79         } else {
80                 // TODO: Use of PG_private_2 is deprecated.
81                 if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags))
82                         netfs_pgpriv2_copy_to_cache(rreq, folio);
83         }
84
85 just_unlock:
86         if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
87                 if (folio->index == rreq->no_unlock_folio &&
88                     test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
89                         _debug("no unlock");
90                 } else {
91                         trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
92                         folio_unlock(folio);
93                 }
94         }
95
96         folioq_clear(folioq, slot);
97 }
98
99 /*
100  * Unlock any folios we've finished with.
101  */
102 static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
103                                      unsigned int *notes)
104 {
105         struct folio_queue *folioq = rreq->buffer.tail;
106         unsigned long long collected_to = rreq->collected_to;
107         unsigned int slot = rreq->buffer.first_tail_slot;
108
109         if (rreq->cleaned_to >= rreq->collected_to)
110                 return;
111
112         // TODO: Begin decryption
113
114         if (slot >= folioq_nr_slots(folioq)) {
115                 folioq = rolling_buffer_delete_spent(&rreq->buffer);
116                 if (!folioq) {
117                         rreq->front_folio_order = 0;
118                         return;
119                 }
120                 slot = 0;
121         }
122
123         for (;;) {
124                 struct folio *folio;
125                 unsigned long long fpos, fend;
126                 unsigned int order;
127                 size_t fsize;
128
129                 if (*notes & COPY_TO_CACHE)
130                         set_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
131
132                 folio = folioq_folio(folioq, slot);
133                 if (WARN_ONCE(!folio_test_locked(folio),
134                               "R=%08x: folio %lx is not locked\n",
135                               rreq->debug_id, folio->index))
136                         trace_netfs_folio(folio, netfs_folio_trace_not_locked);
137
138                 order = folioq_folio_order(folioq, slot);
139                 rreq->front_folio_order = order;
140                 fsize = PAGE_SIZE << order;
141                 fpos = folio_pos(folio);
142                 fend = umin(fpos + fsize, rreq->i_size);
143
144                 trace_netfs_collect_folio(rreq, folio, fend, collected_to);
145
146                 /* Unlock any folio we've transferred all of. */
147                 if (collected_to < fend)
148                         break;
149
150                 netfs_unlock_read_folio(rreq, folioq, slot);
151                 WRITE_ONCE(rreq->cleaned_to, fpos + fsize);
152                 *notes |= MADE_PROGRESS;
153
154                 clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
155
156                 /* Clean up the head folioq.  If we clear an entire folioq, then
157                  * we can get rid of it provided it's not also the tail folioq
158                  * being filled by the issuer.
159                  */
160                 folioq_clear(folioq, slot);
161                 slot++;
162                 if (slot >= folioq_nr_slots(folioq)) {
163                         folioq = rolling_buffer_delete_spent(&rreq->buffer);
164                         if (!folioq)
165                                 goto done;
166                         slot = 0;
167                         trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress);
168                 }
169
170                 if (fpos + fsize >= collected_to)
171                         break;
172         }
173
174         rreq->buffer.tail = folioq;
175 done:
176         rreq->buffer.first_tail_slot = slot;
177 }
178
179 /*
180  * Collect and assess the results of various read subrequests.  We may need to
181  * retry some of the results.
182  *
183  * Note that we have a sequence of subrequests, which may be drawing on
184  * different sources and may or may not be the same size or starting position
185  * and may not even correspond in boundary alignment.
186  */
187 static void netfs_collect_read_results(struct netfs_io_request *rreq)
188 {
189         struct netfs_io_subrequest *front, *remove;
190         struct netfs_io_stream *stream = &rreq->io_streams[0];
191         unsigned int notes;
192
193         _enter("%llx-%llx", rreq->start, rreq->start + rreq->len);
194         trace_netfs_rreq(rreq, netfs_rreq_trace_collect);
195         trace_netfs_collect(rreq);
196
197 reassess:
198         if (rreq->origin == NETFS_READAHEAD ||
199             rreq->origin == NETFS_READPAGE ||
200             rreq->origin == NETFS_READ_FOR_WRITE)
201                 notes = BUFFERED;
202         else
203                 notes = 0;
204
205         /* Remove completed subrequests from the front of the stream and
206          * advance the completion point.  We stop when we hit something that's
207          * in progress.  The issuer thread may be adding stuff to the tail
208          * whilst we're doing this.
209          */
210         front = READ_ONCE(stream->front);
211         while (front) {
212                 size_t transferred;
213
214                 trace_netfs_collect_sreq(rreq, front);
215                 _debug("sreq [%x] %llx %zx/%zx",
216                        front->debug_index, front->start, front->transferred, front->len);
217
218                 if (stream->collected_to < front->start) {
219                         trace_netfs_collect_gap(rreq, stream, front->start, 'F');
220                         stream->collected_to = front->start;
221                 }
222
223                 if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags))
224                         notes |= HIT_PENDING;
225                 smp_rmb(); /* Read counters after IN_PROGRESS flag. */
226                 transferred = READ_ONCE(front->transferred);
227
228                 /* If we can now collect the next folio, do so.  We don't want
229                  * to defer this as we have to decide whether we need to copy
230                  * to the cache or not, and that may differ between adjacent
231                  * subreqs.
232                  */
233                 if (notes & BUFFERED) {
234                         size_t fsize = PAGE_SIZE << rreq->front_folio_order;
235
236                         /* Clear the tail of a short read. */
237                         if (!(notes & HIT_PENDING) &&
238                             front->error == 0 &&
239                             transferred < front->len &&
240                             (test_bit(NETFS_SREQ_HIT_EOF, &front->flags) ||
241                              test_bit(NETFS_SREQ_CLEAR_TAIL, &front->flags))) {
242                                 netfs_clear_unread(front);
243                                 transferred = front->transferred = front->len;
244                                 trace_netfs_sreq(front, netfs_sreq_trace_clear);
245                         }
246
247                         stream->collected_to = front->start + transferred;
248                         rreq->collected_to = stream->collected_to;
249
250                         if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &front->flags))
251                                 notes |= COPY_TO_CACHE;
252
253                         if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
254                                 rreq->abandon_to = front->start + front->len;
255                                 front->transferred = front->len;
256                                 transferred = front->len;
257                                 trace_netfs_rreq(rreq, netfs_rreq_trace_set_abandon);
258                         }
259                         if (front->start + transferred >= rreq->cleaned_to + fsize ||
260                             test_bit(NETFS_SREQ_HIT_EOF, &front->flags))
261                                 netfs_read_unlock_folios(rreq, &notes);
262                 } else {
263                         stream->collected_to = front->start + transferred;
264                         rreq->collected_to = stream->collected_to;
265                 }
266
267                 /* Stall if the front is still undergoing I/O. */
268                 if (notes & HIT_PENDING)
269                         break;
270
271                 if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
272                         if (!stream->failed) {
273                                 stream->error = front->error;
274                                 rreq->error = front->error;
275                                 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
276                                 stream->failed = true;
277                         }
278                         notes |= MADE_PROGRESS | ABANDON_SREQ;
279                 } else if (test_bit(NETFS_SREQ_NEED_RETRY, &front->flags)) {
280                         stream->need_retry = true;
281                         notes |= NEED_RETRY | MADE_PROGRESS;
282                         break;
283                 } else {
284                         if (!stream->failed)
285                                 stream->transferred = stream->collected_to - rreq->start;
286                         notes |= MADE_PROGRESS;
287                 }
288
289                 /* Remove if completely consumed. */
290                 stream->source = front->source;
291                 spin_lock(&rreq->lock);
292
293                 remove = front;
294                 trace_netfs_sreq(front, netfs_sreq_trace_discard);
295                 list_del_init(&front->rreq_link);
296                 front = list_first_entry_or_null(&stream->subrequests,
297                                                  struct netfs_io_subrequest, rreq_link);
298                 stream->front = front;
299                 spin_unlock(&rreq->lock);
300                 netfs_put_subrequest(remove, false,
301                                      notes & ABANDON_SREQ ?
302                                      netfs_sreq_trace_put_abandon :
303                                      netfs_sreq_trace_put_done);
304         }
305
306         trace_netfs_collect_stream(rreq, stream);
307         trace_netfs_collect_state(rreq, rreq->collected_to, notes);
308
309         if (!(notes & BUFFERED))
310                 rreq->cleaned_to = rreq->collected_to;
311
312         if (notes & NEED_RETRY)
313                 goto need_retry;
314         if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) {
315                 trace_netfs_rreq(rreq, netfs_rreq_trace_unpause);
316                 clear_bit_unlock(NETFS_RREQ_PAUSE, &rreq->flags);
317                 smp_mb__after_atomic(); /* Set PAUSE before task state */
318                 wake_up(&rreq->waitq);
319         }
320
321         if (notes & MADE_PROGRESS) {
322                 //cond_resched();
323                 goto reassess;
324         }
325
326 out:
327         _leave(" = %x", notes);
328         return;
329
330 need_retry:
331         /* Okay...  We're going to have to retry parts of the stream.  Note
332          * that any partially completed op will have had any wholly transferred
333          * folios removed from it.
334          */
335         _debug("retry");
336         netfs_retry_reads(rreq);
337         goto out;
338 }
339
340 /*
341  * Do page flushing and suchlike after DIO.
342  */
343 static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
344 {
345         struct netfs_io_subrequest *subreq;
346         struct netfs_io_stream *stream = &rreq->io_streams[0];
347         unsigned int i;
348
349         /* Collect unbuffered reads and direct reads, adding up the transfer
350          * sizes until we find the first short or failed subrequest.
351          */
352         list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
353                 rreq->transferred += subreq->transferred;
354
355                 if (subreq->transferred < subreq->len ||
356                     test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
357                         rreq->error = subreq->error;
358                         break;
359                 }
360         }
361
362         if (rreq->origin == NETFS_DIO_READ) {
363                 for (i = 0; i < rreq->direct_bv_count; i++) {
364                         flush_dcache_page(rreq->direct_bv[i].bv_page);
365                         // TODO: cifs marks pages in the destination buffer
366                         // dirty under some circumstances after a read.  Do we
367                         // need to do that too?
368                         set_page_dirty(rreq->direct_bv[i].bv_page);
369                 }
370         }
371
372         if (rreq->iocb) {
373                 rreq->iocb->ki_pos += rreq->transferred;
374                 if (rreq->iocb->ki_complete)
375                         rreq->iocb->ki_complete(
376                                 rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
377         }
378         if (rreq->netfs_ops->done)
379                 rreq->netfs_ops->done(rreq);
380         if (rreq->origin == NETFS_DIO_READ)
381                 inode_dio_end(rreq->inode);
382 }
383
384 /*
385  * Do processing after reading a monolithic single object.
386  */
387 static void netfs_rreq_assess_single(struct netfs_io_request *rreq)
388 {
389         struct netfs_io_stream *stream = &rreq->io_streams[0];
390
391         if (!rreq->error && stream->source == NETFS_DOWNLOAD_FROM_SERVER &&
392             fscache_resources_valid(&rreq->cache_resources)) {
393                 trace_netfs_rreq(rreq, netfs_rreq_trace_dirty);
394                 netfs_single_mark_inode_dirty(rreq->inode);
395         }
396
397         if (rreq->iocb) {
398                 rreq->iocb->ki_pos += rreq->transferred;
399                 if (rreq->iocb->ki_complete)
400                         rreq->iocb->ki_complete(
401                                 rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
402         }
403         if (rreq->netfs_ops->done)
404                 rreq->netfs_ops->done(rreq);
405 }
406
407 /*
408  * Perform the collection of subrequests and folios.
409  *
410  * Note that we're in normal kernel thread context at this point, possibly
411  * running on a workqueue.
412  */
413 static void netfs_read_collection(struct netfs_io_request *rreq)
414 {
415         struct netfs_io_stream *stream = &rreq->io_streams[0];
416
417         netfs_collect_read_results(rreq);
418
419         /* We're done when the app thread has finished posting subreqs and the
420          * queue is empty.
421          */
422         if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
423                 return;
424         smp_rmb(); /* Read ALL_QUEUED before subreq lists. */
425
426         if (!list_empty(&stream->subrequests))
427                 return;
428
429         /* Okay, declare that all I/O is complete. */
430         rreq->transferred = stream->transferred;
431         trace_netfs_rreq(rreq, netfs_rreq_trace_complete);
432
433         //netfs_rreq_is_still_valid(rreq);
434
435         switch (rreq->origin) {
436         case NETFS_DIO_READ:
437         case NETFS_READ_GAPS:
438                 netfs_rreq_assess_dio(rreq);
439                 break;
440         case NETFS_READ_SINGLE:
441                 netfs_rreq_assess_single(rreq);
442                 break;
443         default:
444                 break;
445         }
446         task_io_account_read(rreq->transferred);
447
448         trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
449         clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
450
451         trace_netfs_rreq(rreq, netfs_rreq_trace_done);
452         netfs_clear_subrequests(rreq, false);
453         netfs_unlock_abandoned_read_pages(rreq);
454         if (unlikely(rreq->copy_to_cache))
455                 netfs_pgpriv2_end_copy_to_cache(rreq);
456 }
457
458 void netfs_read_collection_worker(struct work_struct *work)
459 {
460         struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
461
462         netfs_see_request(rreq, netfs_rreq_trace_see_work);
463         if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
464                 netfs_read_collection(rreq);
465         netfs_put_request(rreq, false, netfs_rreq_trace_put_work);
466 }
467
468 /*
469  * Wake the collection work item.
470  */
471 void netfs_wake_read_collector(struct netfs_io_request *rreq)
472 {
473         if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
474                 if (!work_pending(&rreq->work)) {
475                         netfs_get_request(rreq, netfs_rreq_trace_get_work);
476                         if (!queue_work(system_unbound_wq, &rreq->work))
477                                 netfs_put_request(rreq, true, netfs_rreq_trace_put_work_nq);
478                 }
479         } else {
480                 trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
481                 wake_up(&rreq->waitq);
482         }
483 }
484
485 /**
486  * netfs_read_subreq_progress - Note progress of a read operation.
487  * @subreq: The read request that has terminated.
488  *
489  * This tells the read side of netfs lib that a contributory I/O operation has
490  * made some progress and that it may be possible to unlock some folios.
491  *
492  * Before calling, the filesystem should update subreq->transferred to track
493  * the amount of data copied into the output buffer.
494  */
495 void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq)
496 {
497         struct netfs_io_request *rreq = subreq->rreq;
498         struct netfs_io_stream *stream = &rreq->io_streams[0];
499         size_t fsize = PAGE_SIZE << rreq->front_folio_order;
500
501         trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
502
503         /* If we are at the head of the queue, wake up the collector,
504          * getting a ref to it if we were the ones to do so.
505          */
506         if (subreq->start + subreq->transferred > rreq->cleaned_to + fsize &&
507             (rreq->origin == NETFS_READAHEAD ||
508              rreq->origin == NETFS_READPAGE ||
509              rreq->origin == NETFS_READ_FOR_WRITE) &&
510             list_is_first(&subreq->rreq_link, &stream->subrequests)
511             ) {
512                 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
513                 netfs_wake_read_collector(rreq);
514         }
515 }
516 EXPORT_SYMBOL(netfs_read_subreq_progress);
517
518 /**
519  * netfs_read_subreq_terminated - Note the termination of an I/O operation.
520  * @subreq: The I/O request that has terminated.
521  *
522  * This tells the read helper that a contributory I/O operation has terminated,
523  * one way or another, and that it should integrate the results.
524  *
525  * The caller indicates the outcome of the operation through @subreq->error,
526  * supplying 0 to indicate a successful or retryable transfer (if
527  * NETFS_SREQ_NEED_RETRY is set) or a negative error code.  The helper will
528  * look after reissuing I/O operations as appropriate and writing downloaded
529  * data to the cache.
530  *
531  * Before calling, the filesystem should update subreq->transferred to track
532  * the amount of data copied into the output buffer.
533  */
534 void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
535 {
536         struct netfs_io_request *rreq = subreq->rreq;
537         struct netfs_io_stream *stream = &rreq->io_streams[0];
538
539         switch (subreq->source) {
540         case NETFS_READ_FROM_CACHE:
541                 netfs_stat(&netfs_n_rh_read_done);
542                 break;
543         case NETFS_DOWNLOAD_FROM_SERVER:
544                 netfs_stat(&netfs_n_rh_download_done);
545                 break;
546         default:
547                 break;
548         }
549
550         /* Deal with retry requests, short reads and errors.  If we retry
551          * but don't make progress, we abandon the attempt.
552          */
553         if (!subreq->error && subreq->transferred < subreq->len) {
554                 if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
555                         trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
556                 } else if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
557                         trace_netfs_sreq(subreq, netfs_sreq_trace_need_clear);
558                 } else if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
559                         trace_netfs_sreq(subreq, netfs_sreq_trace_need_retry);
560                 } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
561                         __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
562                         trace_netfs_sreq(subreq, netfs_sreq_trace_partial_read);
563                 } else {
564                         __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
565                         subreq->error = -ENODATA;
566                         trace_netfs_sreq(subreq, netfs_sreq_trace_short);
567                 }
568         }
569
570         if (unlikely(subreq->error < 0)) {
571                 trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read);
572                 if (subreq->source == NETFS_READ_FROM_CACHE) {
573                         netfs_stat(&netfs_n_rh_read_failed);
574                         __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
575                 } else {
576                         netfs_stat(&netfs_n_rh_download_failed);
577                         __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
578                 }
579                 trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause);
580                 set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
581         }
582
583         trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
584
585         clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
586         smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */
587
588         /* If we are at the head of the queue, wake up the collector. */
589         if (list_is_first(&subreq->rreq_link, &stream->subrequests))
590                 netfs_wake_read_collector(rreq);
591
592         netfs_put_subrequest(subreq, true, netfs_sreq_trace_put_terminated);
593 }
594 EXPORT_SYMBOL(netfs_read_subreq_terminated);
595
596 /*
597  * Handle termination of a read from the cache.
598  */
599 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async)
600 {
601         struct netfs_io_subrequest *subreq = priv;
602
603         if (transferred_or_error > 0) {
604                 subreq->error = 0;
605                 if (transferred_or_error > 0) {
606                         subreq->transferred += transferred_or_error;
607                         __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
608                 }
609         } else {
610                 subreq->error = transferred_or_error;
611         }
612         netfs_read_subreq_terminated(subreq);
613 }
614
615 /*
616  * Wait for the read operation to complete, successfully or otherwise.
617  */
618 ssize_t netfs_wait_for_read(struct netfs_io_request *rreq)
619 {
620         struct netfs_io_subrequest *subreq;
621         struct netfs_io_stream *stream = &rreq->io_streams[0];
622         DEFINE_WAIT(myself);
623         ssize_t ret;
624
625         for (;;) {
626                 trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
627                 prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
628
629                 subreq = list_first_entry_or_null(&stream->subrequests,
630                                                   struct netfs_io_subrequest, rreq_link);
631                 if (subreq &&
632                     (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
633                      test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
634                         __set_current_state(TASK_RUNNING);
635                         netfs_read_collection(rreq);
636                         continue;
637                 }
638
639                 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
640                         break;
641
642                 schedule();
643                 trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
644         }
645
646         finish_wait(&rreq->waitq, &myself);
647
648         ret = rreq->error;
649         if (ret == 0) {
650                 ret = rreq->transferred;
651                 switch (rreq->origin) {
652                 case NETFS_DIO_READ:
653                 case NETFS_READ_SINGLE:
654                         ret = rreq->transferred;
655                         break;
656                 default:
657                         if (rreq->submitted < rreq->len) {
658                                 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
659                                 ret = -EIO;
660                         }
661                         break;
662                 }
663         }
664
665         return ret;
666 }
667
668 /*
669  * Wait for a paused read operation to unpause or complete in some manner.
670  */
671 void netfs_wait_for_pause(struct netfs_io_request *rreq)
672 {
673         struct netfs_io_subrequest *subreq;
674         struct netfs_io_stream *stream = &rreq->io_streams[0];
675         DEFINE_WAIT(myself);
676
677         trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause);
678
679         for (;;) {
680                 trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
681                 prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
682
683                 subreq = list_first_entry_or_null(&stream->subrequests,
684                                                   struct netfs_io_subrequest, rreq_link);
685                 if (subreq &&
686                     (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
687                      test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
688                         __set_current_state(TASK_RUNNING);
689                         netfs_read_collection(rreq);
690                         continue;
691                 }
692
693                 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) ||
694                     !test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
695                         break;
696
697                 schedule();
698                 trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
699         }
700
701         finish_wait(&rreq->waitq, &myself);
702 }
This page took 0.081045 seconds and 4 git commands to generate.