#include <linux/rmap.h>
#include <linux/sched.h>
#include <asm/tlbflush.h>
-#include "filemap.h"
/*
* We do use our own empty page to avoid interference with other users
}
EXPORT_SYMBOL_GPL(xip_file_read);
-ssize_t
-xip_file_sendfile(struct file *in_file, loff_t *ppos,
- size_t count, read_actor_t actor, void *target)
-{
- read_descriptor_t desc;
-
- if (!count)
- return 0;
-
- desc.written = 0;
- desc.count = count;
- desc.arg.data = target;
- desc.error = 0;
-
- do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file,
- ppos, &desc, actor);
- if (desc.written)
- return desc.written;
- return desc.error;
-}
-EXPORT_SYMBOL_GPL(xip_file_sendfile);
-
/*
* __xip_unmap is invoked from xip_unmap and
* xip_write
}
/*
- * xip_nopage() is invoked via the vma operations vector for a
+ * xip_fault() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
- * This function is derived from filemap_nopage, but used for execute in place
+ * This function is derived from filemap_fault, but used for execute in place
*/
-static struct page *
-xip_file_nopage(struct vm_area_struct * area,
- unsigned long address,
- int *type)
+static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
{
struct file *file = area->vm_file;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct page *page;
- unsigned long size, pgoff, endoff;
+ pgoff_t size;
- pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
- + area->vm_pgoff;
- endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
- + area->vm_pgoff;
+ /* XXX: are VM_FAULT_ codes OK? */
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (pgoff >= size)
- return NOPAGE_SIGBUS;
+ if (vmf->pgoff >= size)
+ return VM_FAULT_SIGBUS;
- page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
+ page = mapping->a_ops->get_xip_page(mapping,
+ vmf->pgoff*(PAGE_SIZE/512), 0);
if (!IS_ERR(page))
goto out;
if (PTR_ERR(page) != -ENODATA)
- return NOPAGE_SIGBUS;
+ return VM_FAULT_OOM;
/* sparse block */
if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
(area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
(!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
/* maybe shared writable, allocate new block */
- page = mapping->a_ops->get_xip_page (mapping,
- pgoff*(PAGE_SIZE/512), 1);
+ page = mapping->a_ops->get_xip_page(mapping,
+ vmf->pgoff*(PAGE_SIZE/512), 1);
if (IS_ERR(page))
- return NOPAGE_SIGBUS;
+ return VM_FAULT_SIGBUS;
/* unmap page at pgoff from all other vmas */
- __xip_unmap(mapping, pgoff);
+ __xip_unmap(mapping, vmf->pgoff);
} else {
/* not shared and writable, use xip_sparse_page() */
page = xip_sparse_page();
if (!page)
- return NOPAGE_OOM;
+ return VM_FAULT_OOM;
}
out:
page_cache_get(page);
- return page;
+ vmf->page = page;
+ return 0;
}
static struct vm_operations_struct xip_file_vm_ops = {
- .nopage = xip_file_nopage,
+ .fault = xip_file_fault,
};
int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
file_accessed(file);
vma->vm_ops = &xip_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
EXPORT_SYMBOL_GPL(xip_file_mmap);
unsigned long index;
unsigned long offset;
size_t copied;
+ char *kaddr;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT;
if (bytes > count)
bytes = count;
- /*
- * Bring in the user page that we will copy from _first_.
- * Otherwise there's a nasty deadlock on copying from the
- * same page as we're writing to, without it being marked
- * up-to-date.
- */
- fault_in_pages_readable(buf, bytes);
-
page = a_ops->get_xip_page(mapping,
index*(PAGE_SIZE/512), 0);
if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
break;
}
- copied = filemap_copy_from_user(page, offset, buf, bytes);
+ fault_in_pages_readable(buf, bytes);
+ kaddr = kmap_atomic(page, KM_USER0);
+ copied = bytes -
+ __copy_from_user_inatomic_nocache(kaddr, buf, bytes);
+ kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
+
if (likely(copied > 0)) {
status = copied;