2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
12 #include <linux/pagemap.h>
13 #include <linux/export.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/sched.h>
18 #include <linux/seqlock.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <asm/tlbflush.h>
25 * We do use our own empty page to avoid interference with other users
26 * of ZERO_PAGE(), such as /dev/zero
28 static DEFINE_MUTEX(xip_sparse_mutex);
29 static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq);
30 static struct page *__xip_sparse_page;
32 /* called under xip_sparse_mutex */
33 static struct page *xip_sparse_page(void)
35 if (!__xip_sparse_page) {
36 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
39 __xip_sparse_page = page;
41 return __xip_sparse_page;
45 * This is a file read routine for execute in place files, and uses
46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
49 * Note the struct file* is not used at all. It may be NULL.
52 do_xip_mapping_read(struct address_space *mapping,
53 struct file_ra_state *_ra,
59 struct inode *inode = mapping->host;
60 pgoff_t index, end_index;
63 size_t copied = 0, error = 0;
65 BUG_ON(!mapping->a_ops->get_xip_mem);
68 index = pos >> PAGE_CACHE_SHIFT;
69 offset = pos & ~PAGE_CACHE_MASK;
71 isize = i_size_read(inode);
75 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
77 unsigned long nr, left;
79 unsigned long xip_pfn;
82 /* nr is the maximum number of bytes to copy from this page */
84 if (index >= end_index) {
85 if (index > end_index)
87 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
93 if (nr > len - copied)
96 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
98 if (unlikely(error)) {
99 if (error == -ENODATA) {
106 /* If users can be writing to this page using arbitrary
107 * virtual addresses, take care about potential aliasing
108 * before reading the page on the kernel side.
110 if (mapping_writably_mapped(mapping))
111 /* address based flush */ ;
114 * Ok, we have the mem, so now we can copy it to user space...
116 * The actor routine returns how many bytes were actually used..
117 * NOTE! This may not be the same as how much of a user buffer
118 * we filled up (we may be padding etc), so we can only update
119 * "pos" here (the actor routine has to update the user buffer
120 * pointers and the remaining count).
123 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
125 left = __clear_user(buf + copied, nr);
132 copied += (nr - left);
133 offset += (nr - left);
134 index += offset >> PAGE_CACHE_SHIFT;
135 offset &= ~PAGE_CACHE_MASK;
136 } while (copied < len);
139 *ppos = pos + copied;
143 return (copied ? copied : error);
147 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
149 if (!access_ok(VERIFY_WRITE, buf, len))
152 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
155 EXPORT_SYMBOL_GPL(xip_file_read);
158 * __xip_unmap is invoked from xip_unmap and xip_write
160 * This function walks all vmas of the address_space and unmaps the
161 * __xip_sparse_page when found at pgoff.
163 static void __xip_unmap(struct address_space * mapping, unsigned long pgoff)
165 struct vm_area_struct *vma;
170 count = read_seqcount_begin(&xip_sparse_seq);
172 page = __xip_sparse_page;
177 i_mmap_lock_read(mapping);
178 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
181 struct mm_struct *mm = vma->vm_mm;
182 unsigned long address = vma->vm_start +
183 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
185 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
186 pte = page_check_address(page, mm, address, &ptl, 1);
188 /* Nuke the page table entry. */
189 flush_cache_page(vma, address, pte_pfn(*pte));
190 pteval = ptep_clear_flush(vma, address, pte);
191 page_remove_rmap(page);
192 dec_mm_counter(mm, MM_FILEPAGES);
193 BUG_ON(pte_dirty(pteval));
194 pte_unmap_unlock(pte, ptl);
195 /* must invalidate_page _before_ freeing the page */
196 mmu_notifier_invalidate_page(mm, address);
197 page_cache_release(page);
200 i_mmap_unlock_read(mapping);
203 mutex_unlock(&xip_sparse_mutex);
204 } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
205 mutex_lock(&xip_sparse_mutex);
212 * xip_fault() is invoked via the vma operations vector for a
213 * mapped memory region to read in file data during a page fault.
215 * This function is derived from filemap_fault, but used for execute in place
217 static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
219 struct file *file = vma->vm_file;
220 struct address_space *mapping = file->f_mapping;
221 struct inode *inode = mapping->host;
224 unsigned long xip_pfn;
228 /* XXX: are VM_FAULT_ codes OK? */
230 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
231 if (vmf->pgoff >= size)
232 return VM_FAULT_SIGBUS;
234 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
238 if (error != -ENODATA)
242 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
243 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
244 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
247 /* maybe shared writable, allocate new block */
248 mutex_lock(&xip_sparse_mutex);
249 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
251 mutex_unlock(&xip_sparse_mutex);
253 return VM_FAULT_SIGBUS;
254 /* unmap sparse mappings at pgoff from all other vmas */
255 __xip_unmap(mapping, vmf->pgoff);
258 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
263 * err == -EBUSY is fine, we've raced against another thread
264 * that faulted-in the same page
268 return VM_FAULT_NOPAGE;
270 int err, ret = VM_FAULT_OOM;
272 mutex_lock(&xip_sparse_mutex);
273 write_seqcount_begin(&xip_sparse_seq);
274 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
276 if (unlikely(!error)) {
277 write_seqcount_end(&xip_sparse_seq);
278 mutex_unlock(&xip_sparse_mutex);
281 if (error != -ENODATA)
283 /* not shared and writable, use xip_sparse_page() */
284 page = xip_sparse_page();
287 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
292 ret = VM_FAULT_NOPAGE;
294 write_seqcount_end(&xip_sparse_seq);
295 mutex_unlock(&xip_sparse_mutex);
301 static const struct vm_operations_struct xip_file_vm_ops = {
302 .fault = xip_file_fault,
303 .page_mkwrite = filemap_page_mkwrite,
304 .remap_pages = generic_file_remap_pages,
307 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
309 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
312 vma->vm_ops = &xip_file_vm_ops;
313 vma->vm_flags |= VM_MIXEDMAP;
316 EXPORT_SYMBOL_GPL(xip_file_mmap);
319 __xip_file_write(struct file *filp, const char __user *buf,
320 size_t count, loff_t pos, loff_t *ppos)
322 struct address_space * mapping = filp->f_mapping;
323 const struct address_space_operations *a_ops = mapping->a_ops;
324 struct inode *inode = mapping->host;
329 BUG_ON(!mapping->a_ops->get_xip_mem);
333 unsigned long offset;
336 unsigned long xip_pfn;
338 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
339 index = pos >> PAGE_CACHE_SHIFT;
340 bytes = PAGE_CACHE_SIZE - offset;
344 status = a_ops->get_xip_mem(mapping, index, 0,
346 if (status == -ENODATA) {
347 /* we allocate a new page unmap it */
348 mutex_lock(&xip_sparse_mutex);
349 status = a_ops->get_xip_mem(mapping, index, 1,
351 mutex_unlock(&xip_sparse_mutex);
353 /* unmap page at pgoff from all other vmas */
354 __xip_unmap(mapping, index);
361 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
363 if (likely(copied > 0)) {
373 if (unlikely(copied != bytes))
381 * No need to use i_size_read() here, the i_size
382 * cannot change under us because we hold i_mutex.
384 if (pos > inode->i_size) {
385 i_size_write(inode, pos);
386 mark_inode_dirty(inode);
389 return written ? written : status;
393 xip_file_write(struct file *filp, const char __user *buf, size_t len,
396 struct address_space *mapping = filp->f_mapping;
397 struct inode *inode = mapping->host;
402 mutex_lock(&inode->i_mutex);
404 if (!access_ok(VERIFY_READ, buf, len)) {
412 /* We can write back this queue in page reclaim */
413 current->backing_dev_info = mapping->backing_dev_info;
415 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
421 ret = file_remove_suid(filp);
425 ret = file_update_time(filp);
429 ret = __xip_file_write (filp, buf, count, pos, ppos);
432 current->backing_dev_info = NULL;
434 mutex_unlock(&inode->i_mutex);
437 EXPORT_SYMBOL_GPL(xip_file_write);
440 * truncate a page used for execute in place
441 * functionality is analog to block_truncate_page but does use get_xip_mem
442 * to get the page instead of page cache
445 xip_truncate_page(struct address_space *mapping, loff_t from)
447 pgoff_t index = from >> PAGE_CACHE_SHIFT;
448 unsigned offset = from & (PAGE_CACHE_SIZE-1);
452 unsigned long xip_pfn;
455 BUG_ON(!mapping->a_ops->get_xip_mem);
457 blocksize = 1 << mapping->host->i_blkbits;
458 length = offset & (blocksize - 1);
460 /* Block boundary? Nothing to do */
464 length = blocksize - length;
466 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
470 /* Hole? No need to truncate */
475 memset(xip_mem + offset, 0, length);
478 EXPORT_SYMBOL_GPL(xip_truncate_page);