]> Git Repo - linux.git/blobdiff - fs/btrfs/file.c
Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[linux.git] / fs / btrfs / file.c
index 55f68422061d1aac01d38a5c459f69c5d13d1fc6..ee34497500e169415efdb47817c5af65260aebfd 100644 (file)
@@ -28,6 +28,7 @@
 #include "compression.h"
 #include "delalloc-space.h"
 #include "reflink.h"
+#include "subpage.h"
 
 static struct kmem_cache *btrfs_inode_defrag_cachep;
 /*
@@ -398,7 +399,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
                /*
                 * Copy data from userspace to the current page
                 */
-               copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
+               copied = copy_page_from_iter_atomic(page, offset, count, i);
 
                /* Flush processor's dcache for this page */
                flush_dcache_page(page);
@@ -412,20 +413,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
                 * The rest of the btrfs_file_write code will fall
                 * back to page at a time copies after we return 0.
                 */
-               if (!PageUptodate(page) && copied < count)
-                       copied = 0;
+               if (unlikely(copied < count)) {
+                       if (!PageUptodate(page)) {
+                               iov_iter_revert(i, copied);
+                               copied = 0;
+                       }
+                       if (!copied)
+                               break;
+               }
 
-               iov_iter_advance(i, copied);
                write_bytes -= copied;
                total_copied += copied;
-
-               /* Return to btrfs_file_write_iter to fault page */
-               if (unlikely(copied == 0))
-                       break;
-
-               if (copied < PAGE_SIZE - offset) {
-                       offset += copied;
-               } else {
+               offset += copied;
+               if (offset == PAGE_SIZE) {
                        pg++;
                        offset = 0;
                }
@@ -482,6 +482,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
        start_pos = round_down(pos, fs_info->sectorsize);
        num_bytes = round_up(write_bytes + pos - start_pos,
                             fs_info->sectorsize);
+       ASSERT(num_bytes <= U32_MAX);
 
        end_of_last_block = start_pos + num_bytes - 1;
 
@@ -500,9 +501,10 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
 
        for (i = 0; i < num_pages; i++) {
                struct page *p = pages[i];
-               SetPageUptodate(p);
+
+               btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
                ClearPageChecked(p);
-               set_page_dirty(p);
+               btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
        }
 
        /*
@@ -2483,6 +2485,17 @@ static int btrfs_punch_hole_lock_range(struct inode *inode,
                                       const u64 lockend,
                                       struct extent_state **cached_state)
 {
+       /*
+        * For subpage case, if the range is not at page boundary, we could
+        * have pages at the leading/tailing part of the range.
+        * This could lead to dead loop since filemap_range_has_page()
+        * will always return true.
+        * So here we need to do extra page alignment for
+        * filemap_range_has_page().
+        */
+       const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
+       const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
+
        while (1) {
                struct btrfs_ordered_extent *ordered;
                int ret;
@@ -2503,7 +2516,7 @@ static int btrfs_punch_hole_lock_range(struct inode *inode,
                    (ordered->file_offset + ordered->num_bytes <= lockstart ||
                     ordered->file_offset > lockend)) &&
                     !filemap_range_has_page(inode->i_mapping,
-                                            lockstart, lockend)) {
+                                            page_lockstart, page_lockend)) {
                        if (ordered)
                                btrfs_put_ordered_extent(ordered);
                        break;
@@ -3034,22 +3047,20 @@ struct falloc_range {
  */
 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
 {
-       struct falloc_range *prev = NULL;
        struct falloc_range *range = NULL;
 
-       if (list_empty(head))
-               goto insert;
-
-       /*
-        * As fallocate iterate by bytenr order, we only need to check
-        * the last range.
-        */
-       prev = list_entry(head->prev, struct falloc_range, list);
-       if (prev->start + prev->len == start) {
-               prev->len += len;
-               return 0;
+       if (!list_empty(head)) {
+               /*
+                * As fallocate iterates by bytenr order, we only need to check
+                * the last range.
+                */
+               range = list_last_entry(head, struct falloc_range, list);
+               if (range->start + range->len == start) {
+                       range->len += len;
+                       return 0;
+               }
        }
-insert:
+
        range = kmalloc(sizeof(*range), GFP_KERNEL);
        if (!range)
                return -ENOMEM;
This page took 0.032672 seconds and 4 git commands to generate.