]> Git Repo - linux.git/blob - fs/gfs2/aops.c
x86/cpufeatures: Add Slow Memory Bandwidth Allocation feature flag
[linux.git] / fs / gfs2 / aops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38
39
40 void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41                             unsigned int from, unsigned int len)
42 {
43         struct buffer_head *head = page_buffers(page);
44         unsigned int bsize = head->b_size;
45         struct buffer_head *bh;
46         unsigned int to = from + len;
47         unsigned int start, end;
48
49         for (bh = head, start = 0; bh != head || !start;
50              bh = bh->b_this_page, start = end) {
51                 end = start + bsize;
52                 if (end <= from)
53                         continue;
54                 if (start >= to)
55                         break;
56                 set_buffer_uptodate(bh);
57                 gfs2_trans_add_data(ip->i_gl, bh);
58         }
59 }
60
61 /**
62  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63  * @inode: The inode
64  * @lblock: The block number to look up
65  * @bh_result: The buffer head to return the result in
66  * @create: Non-zero if we may add block to the file
67  *
68  * Returns: errno
69  */
70
71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72                                   struct buffer_head *bh_result, int create)
73 {
74         int error;
75
76         error = gfs2_block_map(inode, lblock, bh_result, 0);
77         if (error)
78                 return error;
79         if (!buffer_mapped(bh_result))
80                 return -ENODATA;
81         return 0;
82 }
83
84 /**
85  * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
86  * @page: The page to write
87  * @wbc: The writeback control
88  *
89  * This is the same as calling block_write_full_page, but it also
90  * writes pages outside of i_size
91  */
92 static int gfs2_write_jdata_page(struct page *page,
93                                  struct writeback_control *wbc)
94 {
95         struct inode * const inode = page->mapping->host;
96         loff_t i_size = i_size_read(inode);
97         const pgoff_t end_index = i_size >> PAGE_SHIFT;
98         unsigned offset;
99
100         /*
101          * The page straddles i_size.  It must be zeroed out on each and every
102          * writepage invocation because it may be mmapped.  "A file is mapped
103          * in multiples of the page size.  For a file that is not a multiple of
104          * the  page size, the remaining memory is zeroed when mapped, and
105          * writes to that region are not written out to the file."
106          */
107         offset = i_size & (PAGE_SIZE - 1);
108         if (page->index == end_index && offset)
109                 zero_user_segment(page, offset, PAGE_SIZE);
110
111         return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
112                                        end_buffer_async_write);
113 }
114
115 /**
116  * __gfs2_jdata_writepage - The core of jdata writepage
117  * @page: The page to write
118  * @wbc: The writeback control
119  *
120  * This is shared between writepage and writepages and implements the
121  * core of the writepage operation. If a transaction is required then
122  * PageChecked will have been set and the transaction will have
123  * already been started before this is called.
124  */
125
126 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
127 {
128         struct inode *inode = page->mapping->host;
129         struct gfs2_inode *ip = GFS2_I(inode);
130         struct gfs2_sbd *sdp = GFS2_SB(inode);
131
132         if (PageChecked(page)) {
133                 ClearPageChecked(page);
134                 if (!page_has_buffers(page)) {
135                         create_empty_buffers(page, inode->i_sb->s_blocksize,
136                                              BIT(BH_Dirty)|BIT(BH_Uptodate));
137                 }
138                 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
139         }
140         return gfs2_write_jdata_page(page, wbc);
141 }
142
143 /**
144  * gfs2_jdata_writepage - Write complete page
145  * @page: Page to write
146  * @wbc: The writeback control
147  *
148  * Returns: errno
149  *
150  */
151
152 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
153 {
154         struct inode *inode = page->mapping->host;
155         struct gfs2_inode *ip = GFS2_I(inode);
156         struct gfs2_sbd *sdp = GFS2_SB(inode);
157
158         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
159                 goto out;
160         if (PageChecked(page) || current->journal_info)
161                 goto out_ignore;
162         return __gfs2_jdata_writepage(page, wbc);
163
164 out_ignore:
165         redirty_page_for_writepage(wbc, page);
166 out:
167         unlock_page(page);
168         return 0;
169 }
170
171 /**
172  * gfs2_writepages - Write a bunch of dirty pages back to disk
173  * @mapping: The mapping to write
174  * @wbc: Write-back control
175  *
176  * Used for both ordered and writeback modes.
177  */
178 static int gfs2_writepages(struct address_space *mapping,
179                            struct writeback_control *wbc)
180 {
181         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
182         struct iomap_writepage_ctx wpc = { };
183         int ret;
184
185         /*
186          * Even if we didn't write any pages here, we might still be holding
187          * dirty pages in the ail. We forcibly flush the ail because we don't
188          * want balance_dirty_pages() to loop indefinitely trying to write out
189          * pages held in the ail that it can't find.
190          */
191         ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
192         if (ret == 0)
193                 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
194         return ret;
195 }
196
197 /**
198  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
199  * @mapping: The mapping
200  * @wbc: The writeback control
201  * @pvec: The vector of pages
202  * @nr_pages: The number of pages to write
203  * @done_index: Page index
204  *
205  * Returns: non-zero if loop should terminate, zero otherwise
206  */
207
208 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
209                                     struct writeback_control *wbc,
210                                     struct pagevec *pvec,
211                                     int nr_pages,
212                                     pgoff_t *done_index)
213 {
214         struct inode *inode = mapping->host;
215         struct gfs2_sbd *sdp = GFS2_SB(inode);
216         unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
217         int i;
218         int ret;
219
220         ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
221         if (ret < 0)
222                 return ret;
223
224         for(i = 0; i < nr_pages; i++) {
225                 struct page *page = pvec->pages[i];
226
227                 *done_index = page->index;
228
229                 lock_page(page);
230
231                 if (unlikely(page->mapping != mapping)) {
232 continue_unlock:
233                         unlock_page(page);
234                         continue;
235                 }
236
237                 if (!PageDirty(page)) {
238                         /* someone wrote it for us */
239                         goto continue_unlock;
240                 }
241
242                 if (PageWriteback(page)) {
243                         if (wbc->sync_mode != WB_SYNC_NONE)
244                                 wait_on_page_writeback(page);
245                         else
246                                 goto continue_unlock;
247                 }
248
249                 BUG_ON(PageWriteback(page));
250                 if (!clear_page_dirty_for_io(page))
251                         goto continue_unlock;
252
253                 trace_wbc_writepage(wbc, inode_to_bdi(inode));
254
255                 ret = __gfs2_jdata_writepage(page, wbc);
256                 if (unlikely(ret)) {
257                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
258                                 unlock_page(page);
259                                 ret = 0;
260                         } else {
261
262                                 /*
263                                  * done_index is set past this page,
264                                  * so media errors will not choke
265                                  * background writeout for the entire
266                                  * file. This has consequences for
267                                  * range_cyclic semantics (ie. it may
268                                  * not be suitable for data integrity
269                                  * writeout).
270                                  */
271                                 *done_index = page->index + 1;
272                                 ret = 1;
273                                 break;
274                         }
275                 }
276
277                 /*
278                  * We stop writing back only if we are not doing
279                  * integrity sync. In case of integrity sync we have to
280                  * keep going until we have written all the pages
281                  * we tagged for writeback prior to entering this loop.
282                  */
283                 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
284                         ret = 1;
285                         break;
286                 }
287
288         }
289         gfs2_trans_end(sdp);
290         return ret;
291 }
292
293 /**
294  * gfs2_write_cache_jdata - Like write_cache_pages but different
295  * @mapping: The mapping to write
296  * @wbc: The writeback control
297  *
298  * The reason that we use our own function here is that we need to
299  * start transactions before we grab page locks. This allows us
300  * to get the ordering right.
301  */
302
303 static int gfs2_write_cache_jdata(struct address_space *mapping,
304                                   struct writeback_control *wbc)
305 {
306         int ret = 0;
307         int done = 0;
308         struct pagevec pvec;
309         int nr_pages;
310         pgoff_t writeback_index;
311         pgoff_t index;
312         pgoff_t end;
313         pgoff_t done_index;
314         int cycled;
315         int range_whole = 0;
316         xa_mark_t tag;
317
318         pagevec_init(&pvec);
319         if (wbc->range_cyclic) {
320                 writeback_index = mapping->writeback_index; /* prev offset */
321                 index = writeback_index;
322                 if (index == 0)
323                         cycled = 1;
324                 else
325                         cycled = 0;
326                 end = -1;
327         } else {
328                 index = wbc->range_start >> PAGE_SHIFT;
329                 end = wbc->range_end >> PAGE_SHIFT;
330                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
331                         range_whole = 1;
332                 cycled = 1; /* ignore range_cyclic tests */
333         }
334         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
335                 tag = PAGECACHE_TAG_TOWRITE;
336         else
337                 tag = PAGECACHE_TAG_DIRTY;
338
339 retry:
340         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
341                 tag_pages_for_writeback(mapping, index, end);
342         done_index = index;
343         while (!done && (index <= end)) {
344                 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
345                                 tag);
346                 if (nr_pages == 0)
347                         break;
348
349                 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
350                 if (ret)
351                         done = 1;
352                 if (ret > 0)
353                         ret = 0;
354                 pagevec_release(&pvec);
355                 cond_resched();
356         }
357
358         if (!cycled && !done) {
359                 /*
360                  * range_cyclic:
361                  * We hit the last page and there is more work to be done: wrap
362                  * back to the start of the file
363                  */
364                 cycled = 1;
365                 index = 0;
366                 end = writeback_index - 1;
367                 goto retry;
368         }
369
370         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
371                 mapping->writeback_index = done_index;
372
373         return ret;
374 }
375
376
377 /**
378  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
379  * @mapping: The mapping to write
380  * @wbc: The writeback control
381  * 
382  */
383
384 static int gfs2_jdata_writepages(struct address_space *mapping,
385                                  struct writeback_control *wbc)
386 {
387         struct gfs2_inode *ip = GFS2_I(mapping->host);
388         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
389         int ret;
390
391         ret = gfs2_write_cache_jdata(mapping, wbc);
392         if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
393                 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
394                                GFS2_LFC_JDATA_WPAGES);
395                 ret = gfs2_write_cache_jdata(mapping, wbc);
396         }
397         return ret;
398 }
399
400 /**
401  * stuffed_readpage - Fill in a Linux page with stuffed file data
402  * @ip: the inode
403  * @page: the page
404  *
405  * Returns: errno
406  */
407 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
408 {
409         struct buffer_head *dibh;
410         u64 dsize = i_size_read(&ip->i_inode);
411         void *kaddr;
412         int error;
413
414         /*
415          * Due to the order of unstuffing files and ->fault(), we can be
416          * asked for a zero page in the case of a stuffed file being extended,
417          * so we need to supply one here. It doesn't happen often.
418          */
419         if (unlikely(page->index)) {
420                 zero_user(page, 0, PAGE_SIZE);
421                 SetPageUptodate(page);
422                 return 0;
423         }
424
425         error = gfs2_meta_inode_buffer(ip, &dibh);
426         if (error)
427                 return error;
428
429         kaddr = kmap_atomic(page);
430         memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
431         memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
432         kunmap_atomic(kaddr);
433         flush_dcache_page(page);
434         brelse(dibh);
435         SetPageUptodate(page);
436
437         return 0;
438 }
439
440 /**
441  * gfs2_read_folio - read a folio from a file
442  * @file: The file to read
443  * @folio: The folio in the file
444  */
445 static int gfs2_read_folio(struct file *file, struct folio *folio)
446 {
447         struct inode *inode = folio->mapping->host;
448         struct gfs2_inode *ip = GFS2_I(inode);
449         struct gfs2_sbd *sdp = GFS2_SB(inode);
450         int error;
451
452         if (!gfs2_is_jdata(ip) ||
453             (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
454                 error = iomap_read_folio(folio, &gfs2_iomap_ops);
455         } else if (gfs2_is_stuffed(ip)) {
456                 error = stuffed_readpage(ip, &folio->page);
457                 folio_unlock(folio);
458         } else {
459                 error = mpage_read_folio(folio, gfs2_block_map);
460         }
461
462         if (unlikely(gfs2_withdrawn(sdp)))
463                 return -EIO;
464
465         return error;
466 }
467
468 /**
469  * gfs2_internal_read - read an internal file
470  * @ip: The gfs2 inode
471  * @buf: The buffer to fill
472  * @pos: The file position
473  * @size: The amount to read
474  *
475  */
476
477 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
478                        unsigned size)
479 {
480         struct address_space *mapping = ip->i_inode.i_mapping;
481         unsigned long index = *pos >> PAGE_SHIFT;
482         unsigned offset = *pos & (PAGE_SIZE - 1);
483         unsigned copied = 0;
484         unsigned amt;
485         struct page *page;
486         void *p;
487
488         do {
489                 amt = size - copied;
490                 if (offset + size > PAGE_SIZE)
491                         amt = PAGE_SIZE - offset;
492                 page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
493                 if (IS_ERR(page))
494                         return PTR_ERR(page);
495                 p = kmap_atomic(page);
496                 memcpy(buf + copied, p + offset, amt);
497                 kunmap_atomic(p);
498                 put_page(page);
499                 copied += amt;
500                 index++;
501                 offset = 0;
502         } while(copied < size);
503         (*pos) += size;
504         return size;
505 }
506
507 /**
508  * gfs2_readahead - Read a bunch of pages at once
509  * @rac: Read-ahead control structure
510  *
511  * Some notes:
512  * 1. This is only for readahead, so we can simply ignore any things
513  *    which are slightly inconvenient (such as locking conflicts between
514  *    the page lock and the glock) and return having done no I/O. Its
515  *    obviously not something we'd want to do on too regular a basis.
516  *    Any I/O we ignore at this time will be done via readpage later.
517  * 2. We don't handle stuffed files here we let readpage do the honours.
518  * 3. mpage_readahead() does most of the heavy lifting in the common case.
519  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
520  */
521
522 static void gfs2_readahead(struct readahead_control *rac)
523 {
524         struct inode *inode = rac->mapping->host;
525         struct gfs2_inode *ip = GFS2_I(inode);
526
527         if (gfs2_is_stuffed(ip))
528                 ;
529         else if (gfs2_is_jdata(ip))
530                 mpage_readahead(rac, gfs2_block_map);
531         else
532                 iomap_readahead(rac, &gfs2_iomap_ops);
533 }
534
535 /**
536  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
537  * @inode: the rindex inode
538  */
539 void adjust_fs_space(struct inode *inode)
540 {
541         struct gfs2_sbd *sdp = GFS2_SB(inode);
542         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
543         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
544         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
545         struct buffer_head *m_bh;
546         u64 fs_total, new_free;
547
548         if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
549                 return;
550
551         /* Total up the file system space, according to the latest rindex. */
552         fs_total = gfs2_ri_total(sdp);
553         if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
554                 goto out;
555
556         spin_lock(&sdp->sd_statfs_spin);
557         gfs2_statfs_change_in(m_sc, m_bh->b_data +
558                               sizeof(struct gfs2_dinode));
559         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
560                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
561         else
562                 new_free = 0;
563         spin_unlock(&sdp->sd_statfs_spin);
564         fs_warn(sdp, "File system extended by %llu blocks.\n",
565                 (unsigned long long)new_free);
566         gfs2_statfs_change(sdp, new_free, new_free, 0);
567
568         update_statfs(sdp, m_bh);
569         brelse(m_bh);
570 out:
571         sdp->sd_rindex_uptodate = 0;
572         gfs2_trans_end(sdp);
573 }
574
575 static bool jdata_dirty_folio(struct address_space *mapping,
576                 struct folio *folio)
577 {
578         if (current->journal_info)
579                 folio_set_checked(folio);
580         return block_dirty_folio(mapping, folio);
581 }
582
583 /**
584  * gfs2_bmap - Block map function
585  * @mapping: Address space info
586  * @lblock: The block to map
587  *
588  * Returns: The disk address for the block or 0 on hole or error
589  */
590
591 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
592 {
593         struct gfs2_inode *ip = GFS2_I(mapping->host);
594         struct gfs2_holder i_gh;
595         sector_t dblock = 0;
596         int error;
597
598         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
599         if (error)
600                 return 0;
601
602         if (!gfs2_is_stuffed(ip))
603                 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
604
605         gfs2_glock_dq_uninit(&i_gh);
606
607         return dblock;
608 }
609
610 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
611 {
612         struct gfs2_bufdata *bd;
613
614         lock_buffer(bh);
615         gfs2_log_lock(sdp);
616         clear_buffer_dirty(bh);
617         bd = bh->b_private;
618         if (bd) {
619                 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
620                         list_del_init(&bd->bd_list);
621                 else {
622                         spin_lock(&sdp->sd_ail_lock);
623                         gfs2_remove_from_journal(bh, REMOVE_JDATA);
624                         spin_unlock(&sdp->sd_ail_lock);
625                 }
626         }
627         bh->b_bdev = NULL;
628         clear_buffer_mapped(bh);
629         clear_buffer_req(bh);
630         clear_buffer_new(bh);
631         gfs2_log_unlock(sdp);
632         unlock_buffer(bh);
633 }
634
635 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
636                                 size_t length)
637 {
638         struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
639         size_t stop = offset + length;
640         int partial_page = (offset || length < folio_size(folio));
641         struct buffer_head *bh, *head;
642         unsigned long pos = 0;
643
644         BUG_ON(!folio_test_locked(folio));
645         if (!partial_page)
646                 folio_clear_checked(folio);
647         head = folio_buffers(folio);
648         if (!head)
649                 goto out;
650
651         bh = head;
652         do {
653                 if (pos + bh->b_size > stop)
654                         return;
655
656                 if (offset <= pos)
657                         gfs2_discard(sdp, bh);
658                 pos += bh->b_size;
659                 bh = bh->b_this_page;
660         } while (bh != head);
661 out:
662         if (!partial_page)
663                 filemap_release_folio(folio, 0);
664 }
665
666 /**
667  * gfs2_release_folio - free the metadata associated with a folio
668  * @folio: the folio that's being released
669  * @gfp_mask: passed from Linux VFS, ignored by us
670  *
671  * Calls try_to_free_buffers() to free the buffers and put the folio if the
672  * buffers can be released.
673  *
674  * Returns: true if the folio was put or else false
675  */
676
677 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
678 {
679         struct address_space *mapping = folio->mapping;
680         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
681         struct buffer_head *bh, *head;
682         struct gfs2_bufdata *bd;
683
684         head = folio_buffers(folio);
685         if (!head)
686                 return false;
687
688         /*
689          * mm accommodates an old ext3 case where clean folios might
690          * not have had the dirty bit cleared.  Thus, it can send actual
691          * dirty folios to ->release_folio() via shrink_active_list().
692          *
693          * As a workaround, we skip folios that contain dirty buffers
694          * below.  Once ->release_folio isn't called on dirty folios
695          * anymore, we can warn on dirty buffers like we used to here
696          * again.
697          */
698
699         gfs2_log_lock(sdp);
700         bh = head;
701         do {
702                 if (atomic_read(&bh->b_count))
703                         goto cannot_release;
704                 bd = bh->b_private;
705                 if (bd && bd->bd_tr)
706                         goto cannot_release;
707                 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
708                         goto cannot_release;
709                 bh = bh->b_this_page;
710         } while (bh != head);
711
712         bh = head;
713         do {
714                 bd = bh->b_private;
715                 if (bd) {
716                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
717                         bd->bd_bh = NULL;
718                         bh->b_private = NULL;
719                         /*
720                          * The bd may still be queued as a revoke, in which
721                          * case we must not dequeue nor free it.
722                          */
723                         if (!bd->bd_blkno && !list_empty(&bd->bd_list))
724                                 list_del_init(&bd->bd_list);
725                         if (list_empty(&bd->bd_list))
726                                 kmem_cache_free(gfs2_bufdata_cachep, bd);
727                 }
728
729                 bh = bh->b_this_page;
730         } while (bh != head);
731         gfs2_log_unlock(sdp);
732
733         return try_to_free_buffers(folio);
734
735 cannot_release:
736         gfs2_log_unlock(sdp);
737         return false;
738 }
739
740 static const struct address_space_operations gfs2_aops = {
741         .writepages = gfs2_writepages,
742         .read_folio = gfs2_read_folio,
743         .readahead = gfs2_readahead,
744         .dirty_folio = filemap_dirty_folio,
745         .release_folio = iomap_release_folio,
746         .invalidate_folio = iomap_invalidate_folio,
747         .bmap = gfs2_bmap,
748         .direct_IO = noop_direct_IO,
749         .migrate_folio = filemap_migrate_folio,
750         .is_partially_uptodate = iomap_is_partially_uptodate,
751         .error_remove_page = generic_error_remove_page,
752 };
753
754 static const struct address_space_operations gfs2_jdata_aops = {
755         .writepage = gfs2_jdata_writepage,
756         .writepages = gfs2_jdata_writepages,
757         .read_folio = gfs2_read_folio,
758         .readahead = gfs2_readahead,
759         .dirty_folio = jdata_dirty_folio,
760         .bmap = gfs2_bmap,
761         .invalidate_folio = gfs2_invalidate_folio,
762         .release_folio = gfs2_release_folio,
763         .is_partially_uptodate = block_is_partially_uptodate,
764         .error_remove_page = generic_error_remove_page,
765 };
766
767 void gfs2_set_aops(struct inode *inode)
768 {
769         if (gfs2_is_jdata(GFS2_I(inode)))
770                 inode->i_mapping->a_ops = &gfs2_jdata_aops;
771         else
772                 inode->i_mapping->a_ops = &gfs2_aops;
773 }
This page took 0.101663 seconds and 4 git commands to generate.