xa_unlock_irq(&mapping->i_pages);
break;
} else if (IS_ERR(entry)) {
+ xa_unlock_irq(&mapping->i_pages);
WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
continue;
}
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
indices)) {
+ pgoff_t nr_pages = 1;
+
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *pvec_ent = pvec.pages[i];
void *entry;
xa_lock_irq(&mapping->i_pages);
entry = get_unlocked_mapping_entry(mapping, index, NULL);
- if (entry)
+ if (entry) {
page = dax_busy_page(entry);
+ /*
+ * Account for multi-order entries at
+ * the end of the pagevec.
+ */
+ if (i + 1 >= pagevec_count(&pvec))
+ nr_pages = 1UL << dax_radix_order(entry);
+ }
put_unlocked_mapping_entry(mapping, index, entry);
xa_unlock_irq(&mapping->i_pages);
if (page)
*/
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
- index++;
+ index += nr_pages;
if (page)
break;
{
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
- vm_fault_t ret = VM_FAULT_NOPAGE;
- struct page *zero_page;
- pfn_t pfn;
-
- zero_page = ZERO_PAGE(0);
- if (unlikely(!zero_page)) {
- ret = VM_FAULT_OOM;
- goto out;
- }
+ pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+ vm_fault_t ret;
- pfn = page_to_pfn_t(zero_page);
dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
false);
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
-out:
trace_dax_load_hole(inode, vmf, ret);
return ret;
}